aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-05 17:54:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-05 17:54:29 -0400
commitcc998ff8811530be521f6b316f37ab7676a07938 (patch)
treea054b3bf4b2ef406bf756a6cfc9be2f9115f17ae /drivers/net/ethernet
parent57d730924d5cc2c3e280af16a9306587c3a511db (diff)
parent0d40f75bdab241868c0eb6f97aef9f8b3a66f7b3 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking changes from David Miller: "Noteworthy changes this time around: 1) Multicast rejoin support for team driver, from Jiri Pirko. 2) Centralize and simplify TCP RTT measurement handling in order to reduce the impact of bad RTO seeding from SYN/ACKs. Also, when both timestamps and local RTT measurements are available prefer the later because there are broken middleware devices which scramble the timestamp. From Yuchung Cheng. 3) Add TCP_NOTSENT_LOWAT socket option to limit the amount of kernel memory consumed to queue up unsend user data. From Eric Dumazet. 4) Add a "physical port ID" abstraction for network devices, from Jiri Pirko. 5) Add a "suppress" operation to influence fib_rules lookups, from Stefan Tomanek. 6) Add a networking development FAQ, from Paul Gortmaker. 7) Extend the information provided by tcp_probe and add ipv6 support, from Daniel Borkmann. 8) Use RCU locking more extensively in openvswitch data paths, from Pravin B Shelar. 9) Add SCTP support to openvswitch, from Joe Stringer. 10) Add EF10 chip support to SFC driver, from Ben Hutchings. 11) Add new SYNPROXY netfilter target, from Patrick McHardy. 12) Compute a rate approximation for sending in TCP sockets, and use this to more intelligently coalesce TSO frames. Furthermore, add a new packet scheduler which takes advantage of this estimate when available. From Eric Dumazet. 13) Allow AF_PACKET fanouts with random selection, from Daniel Borkmann. 14) Add ipv6 support to vxlan driver, from Cong Wang" Resolved conflicts as per discussion. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1218 commits) openvswitch: Fix alignment of struct sw_flow_key. netfilter: Fix build errors with xt_socket.c tcp: Add missing braces to do_tcp_setsockopt caif: Add missing braces to multiline if in cfctrl_linkup_request bnx2x: Add missing braces in bnx2x:bnx2x_link_initialize vxlan: Fix kernel panic on device delete. net: mvneta: implement ->ndo_do_ioctl() to support PHY ioctls net: mvneta: properly disable HW PHY polling and ensure adjust_link() works icplus: Use netif_running to determine device state ethernet/arc/arc_emac: Fix huge delays in large file copies tuntap: orphan frags before trying to set tx timestamp tuntap: purge socket error queue on detach qlcnic: use standard NAPI weights ipv6:introduce function to find route for redirect bnx2x: VF RSS support - VF side bnx2x: VF RSS support - PF side vxlan: Notify drivers for listening UDP port changes net: usbnet: update addr_assign_type if appropriate driver/net: enic: update enic maintainers and driver driver/net: enic: Exposing symbols for Cisco's low latency driver ...
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/8390/Kconfig2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c6
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c6
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c12
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c6
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c317
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h39
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c106
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c93
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c309
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c396
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h39
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c221
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h41
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c189
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h69
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h6
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h12
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c146
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h12
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h4
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c53
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c195
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/Makefile3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h55
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.c48
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.h30
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c257
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c329
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h9
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c10
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h176
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h14
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c8
-rw-r--r--drivers/net/ethernet/dlink/sundance.c14
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h76
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c503
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h97
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c25
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c777
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h4
-rw-r--r--drivers/net/ethernet/ethoc.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c7
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c8
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c245
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c21
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c168
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h16
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c74
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.h4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c12
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h19
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c15
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c107
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h11
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c140
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c130
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c198
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h42
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c155
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c11
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c80
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c31
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c132
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c148
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c321
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c157
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c133
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c180
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c542
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c105
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c90
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c177
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c104
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h13
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c12
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c4
-rw-r--r--drivers/net/ethernet/moxa/Kconfig30
-rw-r--r--drivers/net/ethernet/moxa/Makefile5
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c559
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h330
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c213
-rw-r--r--drivers/net/ethernet/netx-eth.c2
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c15
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h15
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c67
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c98
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h1
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c20
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.h2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig11
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h304
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c743
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h50
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c292
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c40
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c237
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c1179
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h41
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c223
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c18
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c179
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c454
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c165
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c21
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c19
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c9
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c71
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h10
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig9
-rw-r--r--drivers/net/ethernet/sfc/Makefile7
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h8
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3043
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h415
-rw-r--r--drivers/net/ethernet/sfc/efx.c500
-rw-r--r--drivers/net/ethernet/sfc/efx.h129
-rw-r--r--drivers/net/ethernet/sfc/enum.h10
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c399
-rw-r--r--drivers/net/ethernet/sfc/falcon.c1171
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon_xmac.c362
-rw-r--r--drivers/net/ethernet/sfc/farch.c2942
-rw-r--r--drivers/net/ethernet/sfc/farch_regs.h (renamed from drivers/net/ethernet/sfc/regs.h)272
-rw-r--r--drivers/net/ethernet/sfc/filter.c1274
-rw-r--r--drivers/net/ethernet/sfc/filter.h238
-rw-r--r--drivers/net/ethernet/sfc/io.h50
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c1262
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h313
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mac.c130
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c274
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h5540
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c (renamed from drivers/net/ethernet/sfc/mcdi_phy.c)345
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.c2
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.h2
-rw-r--r--drivers/net/ethernet/sfc/mtd.c634
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h408
-rw-r--r--drivers/net/ethernet/sfc/nic.c1902
-rw-r--r--drivers/net/ethernet/sfc/nic.h539
-rw-r--r--drivers/net/ethernet/sfc/phy.h19
-rw-r--r--drivers/net/ethernet/sfc/ptp.c95
-rw-r--r--drivers/net/ethernet/sfc/qt202x_phy.c4
-rw-r--r--drivers/net/ethernet/sfc/rx.c176
-rw-r--r--drivers/net/ethernet/sfc/selftest.c15
-rw-r--r--drivers/net/ethernet/sfc/selftest.h4
-rw-r--r--drivers/net/ethernet/sfc/siena.c711
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c102
-rw-r--r--drivers/net/ethernet/sfc/spi.h99
-rw-r--r--drivers/net/ethernet/sfc/tenxpress.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c35
-rw-r--r--drivers/net/ethernet/sfc/txc43128_phy.c2
-rw-r--r--drivers/net/ethernet/sfc/vfdi.h2
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h22
-rw-r--r--drivers/net/ethernet/sgi/meth.c5
-rw-r--r--drivers/net/ethernet/sis/sis190.c3
-rw-r--r--drivers/net/ethernet/sis/sis900.c28
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c9
-rw-r--r--drivers/net/ethernet/sun/niu.c8
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c12
-rw-r--r--drivers/net/ethernet/ti/cpmac.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c263
-rw-r--r--drivers/net/ethernet/ti/cpsw.h42
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c7
-rw-r--r--drivers/net/ethernet/tile/Kconfig11
-rw-r--r--drivers/net/ethernet/tile/tilegx.c1116
-rw-r--r--drivers/net/ethernet/tile/tilepro.c241
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c10
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c20
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c12
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c14
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
249 files changed, 28620 insertions, 10940 deletions
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index a5f91e1e8fe3..becef25fa194 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -148,7 +148,7 @@ config PCMCIA_PCNET
148 148
149config NE_H8300 149config NE_H8300
150 tristate "NE2000 compatible support for H8/300" 150 tristate "NE2000 compatible support for H8/300"
151 depends on H8300 151 depends on H8300H_AKI3068NET || H8300H_H8MAX
152 ---help--- 152 ---help---
153 Say Y here if you want to use the NE2000 compatible 153 Say Y here if you want to use the NE2000 compatible
154 controller on the Renesas H8/300 processor. 154 controller on the Renesas H8/300 processor.
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index e1d26433d619..f92f001551da 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -707,7 +707,7 @@ static int ax_init_dev(struct net_device *dev)
707 707
708#ifdef CONFIG_AX88796_93CX6 708#ifdef CONFIG_AX88796_93CX6
709 if (ax->plat->flags & AXFLG_HAS_93CX6) { 709 if (ax->plat->flags & AXFLG_HAS_93CX6) {
710 unsigned char mac_addr[6]; 710 unsigned char mac_addr[ETH_ALEN];
711 struct eeprom_93cx6 eeprom; 711 struct eeprom_93cx6 eeprom;
712 712
713 eeprom.data = ei_local; 713 eeprom.data = ei_local;
@@ -719,7 +719,7 @@ static int ax_init_dev(struct net_device *dev)
719 (__le16 __force *)mac_addr, 719 (__le16 __force *)mac_addr,
720 sizeof(mac_addr) >> 1); 720 sizeof(mac_addr) >> 1);
721 721
722 memcpy(dev->dev_addr, mac_addr, 6); 722 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
723 } 723 }
724#endif 724#endif
725 if (ax->plat->wordlength == 2) { 725 if (ax->plat->wordlength == 2) {
@@ -840,7 +840,7 @@ static int ax_probe(struct platform_device *pdev)
840 ei_local = netdev_priv(dev); 840 ei_local = netdev_priv(dev);
841 ax = to_ax_dev(dev); 841 ax = to_ax_dev(dev);
842 842
843 ax->plat = pdev->dev.platform_data; 843 ax->plat = dev_get_platdata(&pdev->dev);
844 platform_set_drvdata(pdev, dev); 844 platform_set_drvdata(pdev, dev);
845 845
846 ei_local->rxcr_base = ax->plat->rcr_val; 846 ei_local->rxcr_base = ax->plat->rcr_val;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 2037080c504d..506b0248c400 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -90,6 +90,7 @@ source "drivers/net/ethernet/marvell/Kconfig"
90source "drivers/net/ethernet/mellanox/Kconfig" 90source "drivers/net/ethernet/mellanox/Kconfig"
91source "drivers/net/ethernet/micrel/Kconfig" 91source "drivers/net/ethernet/micrel/Kconfig"
92source "drivers/net/ethernet/microchip/Kconfig" 92source "drivers/net/ethernet/microchip/Kconfig"
93source "drivers/net/ethernet/moxa/Kconfig"
93source "drivers/net/ethernet/myricom/Kconfig" 94source "drivers/net/ethernet/myricom/Kconfig"
94 95
95config FEALNX 96config FEALNX
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 390bd0bfaa27..c0b8789952e7 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
42obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ 42obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
43obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ 43obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
44obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ 44obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
45obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
45obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ 46obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
46obj-$(CONFIG_FEALNX) += fealnx.o 47obj-$(CONFIG_FEALNX) += fealnx.o
47obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ 48obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index e904b3838dcc..e66684a438f5 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1647,12 +1647,12 @@ static int bfin_mac_probe(struct platform_device *pdev)
1647 1647
1648 setup_mac_addr(ndev->dev_addr); 1648 setup_mac_addr(ndev->dev_addr);
1649 1649
1650 if (!pdev->dev.platform_data) { 1650 if (!dev_get_platdata(&pdev->dev)) {
1651 dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n"); 1651 dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n");
1652 rc = -ENODEV; 1652 rc = -ENODEV;
1653 goto out_err_probe_mac; 1653 goto out_err_probe_mac;
1654 } 1654 }
1655 pd = pdev->dev.platform_data; 1655 pd = dev_get_platdata(&pdev->dev);
1656 lp->mii_bus = platform_get_drvdata(pd); 1656 lp->mii_bus = platform_get_drvdata(pd);
1657 if (!lp->mii_bus) { 1657 if (!lp->mii_bus) {
1658 dev_err(&pdev->dev, "Cannot get mii_bus!\n"); 1658 dev_err(&pdev->dev, "Cannot get mii_bus!\n");
@@ -1660,7 +1660,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
1660 goto out_err_probe_mac; 1660 goto out_err_probe_mac;
1661 } 1661 }
1662 lp->mii_bus->priv = ndev; 1662 lp->mii_bus->priv = ndev;
1663 mii_bus_data = pd->dev.platform_data; 1663 mii_bus_data = dev_get_platdata(&pd->dev);
1664 1664
1665 rc = mii_probe(ndev, mii_bus_data->phy_mode); 1665 rc = mii_probe(ndev, mii_bus_data->phy_mode);
1666 if (rc) { 1666 if (rc) {
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 7ff4b30d55ea..e06694555144 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1464,18 +1464,18 @@ static int greth_of_probe(struct platform_device *ofdev)
1464 } 1464 }
1465 1465
1466 /* Allocate TX descriptor ring in coherent memory */ 1466 /* Allocate TX descriptor ring in coherent memory */
1467 greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024, 1467 greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
1468 &greth->tx_bd_base_phys, 1468 &greth->tx_bd_base_phys,
1469 GFP_KERNEL | __GFP_ZERO); 1469 GFP_KERNEL);
1470 if (!greth->tx_bd_base) { 1470 if (!greth->tx_bd_base) {
1471 err = -ENOMEM; 1471 err = -ENOMEM;
1472 goto error3; 1472 goto error3;
1473 } 1473 }
1474 1474
1475 /* Allocate RX descriptor ring in coherent memory */ 1475 /* Allocate RX descriptor ring in coherent memory */
1476 greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024, 1476 greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
1477 &greth->rx_bd_base_phys, 1477 &greth->rx_bd_base_phys,
1478 GFP_KERNEL | __GFP_ZERO); 1478 GFP_KERNEL);
1479 if (!greth->rx_bd_base) { 1479 if (!greth->rx_bd_base) {
1480 err = -ENOMEM; 1480 err = -ENOMEM;
1481 goto error4; 1481 goto error4;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index ceb45bc963a9..91d52b495848 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1131,7 +1131,7 @@ static int au1000_probe(struct platform_device *pdev)
1131 writel(0, aup->enable); 1131 writel(0, aup->enable);
1132 aup->mac_enabled = 0; 1132 aup->mac_enabled = 0;
1133 1133
1134 pd = pdev->dev.platform_data; 1134 pd = dev_get_platdata(&pdev->dev);
1135 if (!pd) { 1135 if (!pd) {
1136 dev_info(&pdev->dev, "no platform_data passed," 1136 dev_info(&pdev->dev, "no platform_data passed,"
1137 " PHY search on MAC0\n"); 1137 " PHY search on MAC0\n");
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index ed2130727643..2d8e28819779 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1521,7 +1521,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1521 char *chipname; 1521 char *chipname;
1522 struct net_device *dev; 1522 struct net_device *dev;
1523 const struct pcnet32_access *a = NULL; 1523 const struct pcnet32_access *a = NULL;
1524 u8 promaddr[6]; 1524 u8 promaddr[ETH_ALEN];
1525 int ret = -ENODEV; 1525 int ret = -ENODEV;
1526 1526
1527 /* reset the chip */ 1527 /* reset the chip */
@@ -1665,10 +1665,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1665 } 1665 }
1666 1666
1667 /* read PROM address and compare with CSR address */ 1667 /* read PROM address and compare with CSR address */
1668 for (i = 0; i < 6; i++) 1668 for (i = 0; i < ETH_ALEN; i++)
1669 promaddr[i] = inb(ioaddr + i); 1669 promaddr[i] = inb(ioaddr + i);
1670 1670
1671 if (memcmp(promaddr, dev->dev_addr, 6) || 1671 if (memcmp(promaddr, dev->dev_addr, ETH_ALEN) ||
1672 !is_valid_ether_addr(dev->dev_addr)) { 1672 !is_valid_ether_addr(dev->dev_addr)) {
1673 if (is_valid_ether_addr(promaddr)) { 1673 if (is_valid_ether_addr(promaddr)) {
1674 if (pcnet32_debug & NETIF_MSG_PROBE) { 1674 if (pcnet32_debug & NETIF_MSG_PROBE) {
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 55d79cb53a79..9e1601487263 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -149,8 +149,6 @@ static void arc_emac_tx_clean(struct net_device *ndev)
149 struct sk_buff *skb = tx_buff->skb; 149 struct sk_buff *skb = tx_buff->skb;
150 unsigned int info = le32_to_cpu(txbd->info); 150 unsigned int info = le32_to_cpu(txbd->info);
151 151
152 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
153
154 if ((info & FOR_EMAC) || !txbd->data) 152 if ((info & FOR_EMAC) || !txbd->data)
155 break; 153 break;
156 154
@@ -180,6 +178,8 @@ static void arc_emac_tx_clean(struct net_device *ndev)
180 txbd->data = 0; 178 txbd->data = 0;
181 txbd->info = 0; 179 txbd->info = 0;
182 180
181 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
182
183 if (netif_queue_stopped(ndev)) 183 if (netif_queue_stopped(ndev))
184 netif_wake_queue(ndev); 184 netif_wake_queue(ndev);
185 } 185 }
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 52c96036dcc4..2fa5b86f139d 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -130,7 +130,7 @@ config BNX2X_SRIOV
130 130
131config BGMAC 131config BGMAC
132 tristate "BCMA bus GBit core support" 132 tristate "BCMA bus GBit core support"
133 depends on BCMA_HOST_SOC && HAS_DMA 133 depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX
134 select PHYLIB 134 select PHYLIB
135 ---help--- 135 ---help---
136 This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus. 136 This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index b1bcd4ba4744..8ac48fbf8a66 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -948,8 +948,7 @@ static int bcm_enet_open(struct net_device *dev)
948 948
949 /* allocate rx dma ring */ 949 /* allocate rx dma ring */
950 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 950 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
951 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, 951 p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
952 GFP_KERNEL | __GFP_ZERO);
953 if (!p) { 952 if (!p) {
954 ret = -ENOMEM; 953 ret = -ENOMEM;
955 goto out_freeirq_tx; 954 goto out_freeirq_tx;
@@ -960,8 +959,7 @@ static int bcm_enet_open(struct net_device *dev)
960 959
961 /* allocate tx dma ring */ 960 /* allocate tx dma ring */
962 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 961 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
963 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, 962 p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
964 GFP_KERNEL | __GFP_ZERO);
965 if (!p) { 963 if (!p) {
966 ret = -ENOMEM; 964 ret = -ENOMEM;
967 goto out_free_rx_ring; 965 goto out_free_rx_ring;
@@ -1747,11 +1745,10 @@ static int bcm_enet_probe(struct platform_device *pdev)
1747 if (!bcm_enet_shared_base[0]) 1745 if (!bcm_enet_shared_base[0])
1748 return -ENODEV; 1746 return -ENODEV;
1749 1747
1750 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1751 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1748 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1752 res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 1749 res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1753 res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); 1750 res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1754 if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx) 1751 if (!res_irq || !res_irq_rx || !res_irq_tx)
1755 return -ENODEV; 1752 return -ENODEV;
1756 1753
1757 ret = 0; 1754 ret = 0;
@@ -1767,9 +1764,10 @@ static int bcm_enet_probe(struct platform_device *pdev)
1767 if (ret) 1764 if (ret)
1768 goto out; 1765 goto out;
1769 1766
1770 priv->base = devm_request_and_ioremap(&pdev->dev, res_mem); 1767 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1771 if (priv->base == NULL) { 1768 priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
1772 ret = -ENOMEM; 1769 if (IS_ERR(priv->base)) {
1770 ret = PTR_ERR(priv->base);
1773 goto out; 1771 goto out;
1774 } 1772 }
1775 1773
@@ -1800,7 +1798,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
1800 priv->rx_ring_size = BCMENET_DEF_RX_DESC; 1798 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1801 priv->tx_ring_size = BCMENET_DEF_TX_DESC; 1799 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1802 1800
1803 pd = pdev->dev.platform_data; 1801 pd = dev_get_platdata(&pdev->dev);
1804 if (pd) { 1802 if (pd) {
1805 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); 1803 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1806 priv->has_phy = pd->has_phy; 1804 priv->has_phy = pd->has_phy;
@@ -1964,7 +1962,7 @@ static int bcm_enet_remove(struct platform_device *pdev)
1964 } else { 1962 } else {
1965 struct bcm63xx_enet_platform_data *pd; 1963 struct bcm63xx_enet_platform_data *pd;
1966 1964
1967 pd = pdev->dev.platform_data; 1965 pd = dev_get_platdata(&pdev->dev);
1968 if (pd && pd->mii_config) 1966 if (pd && pd->mii_config)
1969 pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, 1967 pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1970 bcm_enet_mdio_write_mii); 1968 bcm_enet_mdio_write_mii);
@@ -2742,7 +2740,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
2742 priv->tx_ring_size = BCMENET_DEF_TX_DESC; 2740 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2743 priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; 2741 priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2744 2742
2745 pd = pdev->dev.platform_data; 2743 pd = dev_get_platdata(&pdev->dev);
2746 if (pd) { 2744 if (pd) {
2747 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); 2745 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
2748 memcpy(priv->used_ports, pd->used_ports, 2746 memcpy(priv->used_ports, pd->used_ports,
@@ -2836,7 +2834,6 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
2836 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2834 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2837 release_mem_region(res->start, resource_size(res)); 2835 release_mem_region(res->start, resource_size(res));
2838 2836
2839 platform_set_drvdata(pdev, NULL);
2840 free_netdev(dev); 2837 free_netdev(dev);
2841 return 0; 2838 return 0;
2842} 2839}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 6a2de1d79ff6..e838a3f74b69 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -1,6 +1,6 @@
1/* bnx2.c: Broadcom NX2 network driver. 1/* bnx2.c: Broadcom NX2 network driver.
2 * 2 *
3 * Copyright (c) 2004-2011 Broadcom Corporation 3 * Copyright (c) 2004-2013 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -58,8 +58,8 @@
58#include "bnx2_fw.h" 58#include "bnx2_fw.h"
59 59
60#define DRV_MODULE_NAME "bnx2" 60#define DRV_MODULE_NAME "bnx2"
61#define DRV_MODULE_VERSION "2.2.3" 61#define DRV_MODULE_VERSION "2.2.4"
62#define DRV_MODULE_RELDATE "June 27, 2012" 62#define DRV_MODULE_RELDATE "Aug 05, 2013"
63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw" 63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" 64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw" 65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -853,9 +853,8 @@ bnx2_alloc_mem(struct bnx2 *bp)
853 bp->status_stats_size = status_blk_size + 853 bp->status_stats_size = status_blk_size +
854 sizeof(struct statistics_block); 854 sizeof(struct statistics_block);
855 855
856 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, 856 status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 &bp->status_blk_mapping, 857 &bp->status_blk_mapping, GFP_KERNEL);
858 GFP_KERNEL | __GFP_ZERO);
859 if (status_blk == NULL) 858 if (status_blk == NULL)
860 goto alloc_mem_err; 859 goto alloc_mem_err;
861 860
@@ -3908,136 +3907,121 @@ init_cpu_err:
3908 return rc; 3907 return rc;
3909} 3908}
3910 3909
3911static int 3910static void
3912bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) 3911bnx2_setup_wol(struct bnx2 *bp)
3913{ 3912{
3914 u16 pmcsr; 3913 int i;
3914 u32 val, wol_msg;
3915 3915
3916 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); 3916 if (bp->wol) {
3917 u32 advertising;
3918 u8 autoneg;
3917 3919
3918 switch (state) { 3920 autoneg = bp->autoneg;
3919 case PCI_D0: { 3921 advertising = bp->advertising;
3920 u32 val;
3921 3922
3922 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, 3923 if (bp->phy_port == PORT_TP) {
3923 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) | 3924 bp->autoneg = AUTONEG_SPEED;
3924 PCI_PM_CTRL_PME_STATUS); 3925 bp->advertising = ADVERTISED_10baseT_Half |
3926 ADVERTISED_10baseT_Full |
3927 ADVERTISED_100baseT_Half |
3928 ADVERTISED_100baseT_Full |
3929 ADVERTISED_Autoneg;
3930 }
3925 3931
3926 if (pmcsr & PCI_PM_CTRL_STATE_MASK) 3932 spin_lock_bh(&bp->phy_lock);
3927 /* delay required during transition out of D3hot */ 3933 bnx2_setup_phy(bp, bp->phy_port);
3928 msleep(20); 3934 spin_unlock_bh(&bp->phy_lock);
3929 3935
3930 val = BNX2_RD(bp, BNX2_EMAC_MODE); 3936 bp->autoneg = autoneg;
3931 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD; 3937 bp->advertising = advertising;
3932 val &= ~BNX2_EMAC_MODE_MPKT;
3933 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3934 3938
3935 val = BNX2_RD(bp, BNX2_RPM_CONFIG); 3939 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3936 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3937 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3938 break;
3939 }
3940 case PCI_D3hot: {
3941 int i;
3942 u32 val, wol_msg;
3943
3944 if (bp->wol) {
3945 u32 advertising;
3946 u8 autoneg;
3947
3948 autoneg = bp->autoneg;
3949 advertising = bp->advertising;
3950
3951 if (bp->phy_port == PORT_TP) {
3952 bp->autoneg = AUTONEG_SPEED;
3953 bp->advertising = ADVERTISED_10baseT_Half |
3954 ADVERTISED_10baseT_Full |
3955 ADVERTISED_100baseT_Half |
3956 ADVERTISED_100baseT_Full |
3957 ADVERTISED_Autoneg;
3958 }
3959 3940
3960 spin_lock_bh(&bp->phy_lock); 3941 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3961 bnx2_setup_phy(bp, bp->phy_port);
3962 spin_unlock_bh(&bp->phy_lock);
3963 3942
3964 bp->autoneg = autoneg; 3943 /* Enable port mode. */
3965 bp->advertising = advertising; 3944 val &= ~BNX2_EMAC_MODE_PORT;
3945 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3946 BNX2_EMAC_MODE_ACPI_RCVD |
3947 BNX2_EMAC_MODE_MPKT;
3948 if (bp->phy_port == PORT_TP) {
3949 val |= BNX2_EMAC_MODE_PORT_MII;
3950 } else {
3951 val |= BNX2_EMAC_MODE_PORT_GMII;
3952 if (bp->line_speed == SPEED_2500)
3953 val |= BNX2_EMAC_MODE_25G_MODE;
3954 }
3966 3955
3967 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); 3956 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3968 3957
3969 val = BNX2_RD(bp, BNX2_EMAC_MODE); 3958 /* receive all multicast */
3959 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3960 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3961 0xffffffff);
3962 }
3963 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3970 3964
3971 /* Enable port mode. */ 3965 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3972 val &= ~BNX2_EMAC_MODE_PORT; 3966 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3973 val |= BNX2_EMAC_MODE_MPKT_RCVD | 3967 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3974 BNX2_EMAC_MODE_ACPI_RCVD | 3968 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3975 BNX2_EMAC_MODE_MPKT;
3976 if (bp->phy_port == PORT_TP)
3977 val |= BNX2_EMAC_MODE_PORT_MII;
3978 else {
3979 val |= BNX2_EMAC_MODE_PORT_GMII;
3980 if (bp->line_speed == SPEED_2500)
3981 val |= BNX2_EMAC_MODE_25G_MODE;
3982 }
3983 3969
3984 BNX2_WR(bp, BNX2_EMAC_MODE, val); 3970 /* Need to enable EMAC and RPM for WOL. */
3971 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3972 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3973 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3974 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3985 3975
3986 /* receive all multicast */ 3976 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3987 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 3977 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3988 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), 3978 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3989 0xffffffff);
3990 }
3991 BNX2_WR(bp, BNX2_EMAC_RX_MODE,
3992 BNX2_EMAC_RX_MODE_SORT_MODE);
3993 3979
3994 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | 3980 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3995 BNX2_RPM_SORT_USER0_MC_EN; 3981 } else {
3996 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0); 3982 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3997 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val); 3983 }
3998 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val |
3999 BNX2_RPM_SORT_USER0_ENA);
4000 3984
4001 /* Need to enable EMAC and RPM for WOL. */ 3985 if (!(bp->flags & BNX2_FLAG_NO_WOL))
4002 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 3986 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
4003 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4004 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4005 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4006 3987
4007 val = BNX2_RD(bp, BNX2_RPM_CONFIG); 3988}
4008 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4009 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4010 3989
4011 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL; 3990static int
4012 } 3991bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4013 else { 3992{
4014 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; 3993 switch (state) {
4015 } 3994 case PCI_D0: {
3995 u32 val;
3996
3997 pci_enable_wake(bp->pdev, PCI_D0, false);
3998 pci_set_power_state(bp->pdev, PCI_D0);
4016 3999
4017 if (!(bp->flags & BNX2_FLAG_NO_WOL)) 4000 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4018 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 4001 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4019 1, 0); 4002 val &= ~BNX2_EMAC_MODE_MPKT;
4003 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4020 4004
4021 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 4005 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4006 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4007 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4008 break;
4009 }
4010 case PCI_D3hot: {
4011 bnx2_setup_wol(bp);
4012 pci_wake_from_d3(bp->pdev, bp->wol);
4022 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || 4013 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4023 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) { 4014 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4024 4015
4025 if (bp->wol) 4016 if (bp->wol)
4026 pmcsr |= 3; 4017 pci_set_power_state(bp->pdev, PCI_D3hot);
4027 } 4018 } else {
4028 else { 4019 pci_set_power_state(bp->pdev, PCI_D3hot);
4029 pmcsr |= 3;
4030 }
4031 if (bp->wol) {
4032 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4033 } 4020 }
4034 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4035 pmcsr);
4036 4021
4037 /* No more memory access after this point until 4022 /* No more memory access after this point until
4038 * device is brought back to D0. 4023 * device is brought back to D0.
4039 */ 4024 */
4040 udelay(50);
4041 break; 4025 break;
4042 } 4026 }
4043 default: 4027 default:
@@ -6317,7 +6301,6 @@ bnx2_open(struct net_device *dev)
6317 6301
6318 netif_carrier_off(dev); 6302 netif_carrier_off(dev);
6319 6303
6320 bnx2_set_power_state(bp, PCI_D0);
6321 bnx2_disable_int(bp); 6304 bnx2_disable_int(bp);
6322 6305
6323 rc = bnx2_setup_int_mode(bp, disable_msi); 6306 rc = bnx2_setup_int_mode(bp, disable_msi);
@@ -6724,7 +6707,6 @@ bnx2_close(struct net_device *dev)
6724 bnx2_del_napi(bp); 6707 bnx2_del_napi(bp);
6725 bp->link_up = 0; 6708 bp->link_up = 0;
6726 netif_carrier_off(bp->dev); 6709 netif_carrier_off(bp->dev);
6727 bnx2_set_power_state(bp, PCI_D3hot);
6728 return 0; 6710 return 0;
6729} 6711}
6730 6712
@@ -7081,6 +7063,9 @@ bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7081 else { 7063 else {
7082 bp->wol = 0; 7064 bp->wol = 0;
7083 } 7065 }
7066
7067 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7068
7084 return 0; 7069 return 0;
7085} 7070}
7086 7071
@@ -7156,9 +7141,6 @@ bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7156 struct bnx2 *bp = netdev_priv(dev); 7141 struct bnx2 *bp = netdev_priv(dev);
7157 int rc; 7142 int rc;
7158 7143
7159 if (!netif_running(dev))
7160 return -EAGAIN;
7161
7162 /* parameters already validated in ethtool_get_eeprom */ 7144 /* parameters already validated in ethtool_get_eeprom */
7163 7145
7164 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); 7146 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
@@ -7173,9 +7155,6 @@ bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7173 struct bnx2 *bp = netdev_priv(dev); 7155 struct bnx2 *bp = netdev_priv(dev);
7174 int rc; 7156 int rc;
7175 7157
7176 if (!netif_running(dev))
7177 return -EAGAIN;
7178
7179 /* parameters already validated in ethtool_set_eeprom */ 7158 /* parameters already validated in ethtool_set_eeprom */
7180 7159
7181 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); 7160 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
@@ -7535,8 +7514,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7535{ 7514{
7536 struct bnx2 *bp = netdev_priv(dev); 7515 struct bnx2 *bp = netdev_priv(dev);
7537 7516
7538 bnx2_set_power_state(bp, PCI_D0);
7539
7540 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS); 7517 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7541 if (etest->flags & ETH_TEST_FL_OFFLINE) { 7518 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7542 int i; 7519 int i;
@@ -7585,8 +7562,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7585 etest->flags |= ETH_TEST_FL_FAILED; 7562 etest->flags |= ETH_TEST_FL_FAILED;
7586 7563
7587 } 7564 }
7588 if (!netif_running(bp->dev))
7589 bnx2_set_power_state(bp, PCI_D3hot);
7590} 7565}
7591 7566
7592static void 7567static void
@@ -7658,8 +7633,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7658 7633
7659 switch (state) { 7634 switch (state) {
7660 case ETHTOOL_ID_ACTIVE: 7635 case ETHTOOL_ID_ACTIVE:
7661 bnx2_set_power_state(bp, PCI_D0);
7662
7663 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG); 7636 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7664 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); 7637 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7665 return 1; /* cycle on/off once per second */ 7638 return 1; /* cycle on/off once per second */
@@ -7680,9 +7653,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7680 case ETHTOOL_ID_INACTIVE: 7653 case ETHTOOL_ID_INACTIVE:
7681 BNX2_WR(bp, BNX2_EMAC_LED, 0); 7654 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7682 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save); 7655 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7683
7684 if (!netif_running(dev))
7685 bnx2_set_power_state(bp, PCI_D3hot);
7686 break; 7656 break;
7687 } 7657 }
7688 7658
@@ -8130,8 +8100,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8130 goto err_out_release; 8100 goto err_out_release;
8131 } 8101 }
8132 8102
8133 bnx2_set_power_state(bp, PCI_D0);
8134
8135 /* Configure byte swap and enable write to the reg_window registers. 8103 /* Configure byte swap and enable write to the reg_window registers.
8136 * Rely on CPU to do target byte swapping on big endian systems 8104 * Rely on CPU to do target byte swapping on big endian systems
8137 * The chip's target access swapping will not swap all accesses 8105 * The chip's target access swapping will not swap all accesses
@@ -8170,13 +8138,13 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8170 8138
8171 if (BNX2_CHIP(bp) == BNX2_CHIP_5709 && 8139 if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8172 BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) { 8140 BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8173 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) 8141 if (pdev->msix_cap)
8174 bp->flags |= BNX2_FLAG_MSIX_CAP; 8142 bp->flags |= BNX2_FLAG_MSIX_CAP;
8175 } 8143 }
8176 8144
8177 if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 && 8145 if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8178 BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) { 8146 BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8179 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) 8147 if (pdev->msi_cap)
8180 bp->flags |= BNX2_FLAG_MSI_CAP; 8148 bp->flags |= BNX2_FLAG_MSI_CAP;
8181 } 8149 }
8182 8150
@@ -8369,6 +8337,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8369 bp->wol = 0; 8337 bp->wol = 0;
8370 } 8338 }
8371 8339
8340 if (bp->flags & BNX2_FLAG_NO_WOL)
8341 device_set_wakeup_capable(&bp->pdev->dev, false);
8342 else
8343 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8344
8372 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { 8345 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8373 bp->tx_quick_cons_trip_int = 8346 bp->tx_quick_cons_trip_int =
8374 bp->tx_quick_cons_trip; 8347 bp->tx_quick_cons_trip;
@@ -8609,46 +8582,52 @@ bnx2_remove_one(struct pci_dev *pdev)
8609} 8582}
8610 8583
8611static int 8584static int
8612bnx2_suspend(struct pci_dev *pdev, pm_message_t state) 8585bnx2_suspend(struct device *device)
8613{ 8586{
8587 struct pci_dev *pdev = to_pci_dev(device);
8614 struct net_device *dev = pci_get_drvdata(pdev); 8588 struct net_device *dev = pci_get_drvdata(pdev);
8615 struct bnx2 *bp = netdev_priv(dev); 8589 struct bnx2 *bp = netdev_priv(dev);
8616 8590
8617 /* PCI register 4 needs to be saved whether netif_running() or not. 8591 if (netif_running(dev)) {
8618 * MSI address and data need to be saved if using MSI and 8592 cancel_work_sync(&bp->reset_task);
8619 * netif_running(). 8593 bnx2_netif_stop(bp, true);
8620 */ 8594 netif_device_detach(dev);
8621 pci_save_state(pdev); 8595 del_timer_sync(&bp->timer);
8622 if (!netif_running(dev)) 8596 bnx2_shutdown_chip(bp);
8623 return 0; 8597 __bnx2_free_irq(bp);
8624 8598 bnx2_free_skbs(bp);
8625 cancel_work_sync(&bp->reset_task); 8599 }
8626 bnx2_netif_stop(bp, true); 8600 bnx2_setup_wol(bp);
8627 netif_device_detach(dev);
8628 del_timer_sync(&bp->timer);
8629 bnx2_shutdown_chip(bp);
8630 bnx2_free_skbs(bp);
8631 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8632 return 0; 8601 return 0;
8633} 8602}
8634 8603
8635static int 8604static int
8636bnx2_resume(struct pci_dev *pdev) 8605bnx2_resume(struct device *device)
8637{ 8606{
8607 struct pci_dev *pdev = to_pci_dev(device);
8638 struct net_device *dev = pci_get_drvdata(pdev); 8608 struct net_device *dev = pci_get_drvdata(pdev);
8639 struct bnx2 *bp = netdev_priv(dev); 8609 struct bnx2 *bp = netdev_priv(dev);
8640 8610
8641 pci_restore_state(pdev);
8642 if (!netif_running(dev)) 8611 if (!netif_running(dev))
8643 return 0; 8612 return 0;
8644 8613
8645 bnx2_set_power_state(bp, PCI_D0); 8614 bnx2_set_power_state(bp, PCI_D0);
8646 netif_device_attach(dev); 8615 netif_device_attach(dev);
8616 bnx2_request_irq(bp);
8647 bnx2_init_nic(bp, 1); 8617 bnx2_init_nic(bp, 1);
8648 bnx2_netif_start(bp, true); 8618 bnx2_netif_start(bp, true);
8649 return 0; 8619 return 0;
8650} 8620}
8651 8621
8622#ifdef CONFIG_PM_SLEEP
8623static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8624#define BNX2_PM_OPS (&bnx2_pm_ops)
8625
8626#else
8627
8628#define BNX2_PM_OPS NULL
8629
8630#endif /* CONFIG_PM_SLEEP */
8652/** 8631/**
8653 * bnx2_io_error_detected - called when PCI error is detected 8632 * bnx2_io_error_detected - called when PCI error is detected
8654 * @pdev: Pointer to PCI device 8633 * @pdev: Pointer to PCI device
@@ -8694,24 +8673,28 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8694{ 8673{
8695 struct net_device *dev = pci_get_drvdata(pdev); 8674 struct net_device *dev = pci_get_drvdata(pdev);
8696 struct bnx2 *bp = netdev_priv(dev); 8675 struct bnx2 *bp = netdev_priv(dev);
8697 pci_ers_result_t result; 8676 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8698 int err; 8677 int err = 0;
8699 8678
8700 rtnl_lock(); 8679 rtnl_lock();
8701 if (pci_enable_device(pdev)) { 8680 if (pci_enable_device(pdev)) {
8702 dev_err(&pdev->dev, 8681 dev_err(&pdev->dev,
8703 "Cannot re-enable PCI device after reset\n"); 8682 "Cannot re-enable PCI device after reset\n");
8704 result = PCI_ERS_RESULT_DISCONNECT;
8705 } else { 8683 } else {
8706 pci_set_master(pdev); 8684 pci_set_master(pdev);
8707 pci_restore_state(pdev); 8685 pci_restore_state(pdev);
8708 pci_save_state(pdev); 8686 pci_save_state(pdev);
8709 8687
8710 if (netif_running(dev)) { 8688 if (netif_running(dev))
8711 bnx2_set_power_state(bp, PCI_D0); 8689 err = bnx2_init_nic(bp, 1);
8712 bnx2_init_nic(bp, 1); 8690
8713 } 8691 if (!err)
8714 result = PCI_ERS_RESULT_RECOVERED; 8692 result = PCI_ERS_RESULT_RECOVERED;
8693 }
8694
8695 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8696 bnx2_napi_enable(bp);
8697 dev_close(dev);
8715 } 8698 }
8716 rtnl_unlock(); 8699 rtnl_unlock();
8717 8700
@@ -8748,6 +8731,28 @@ static void bnx2_io_resume(struct pci_dev *pdev)
8748 rtnl_unlock(); 8731 rtnl_unlock();
8749} 8732}
8750 8733
8734static void bnx2_shutdown(struct pci_dev *pdev)
8735{
8736 struct net_device *dev = pci_get_drvdata(pdev);
8737 struct bnx2 *bp;
8738
8739 if (!dev)
8740 return;
8741
8742 bp = netdev_priv(dev);
8743 if (!bp)
8744 return;
8745
8746 rtnl_lock();
8747 if (netif_running(dev))
8748 dev_close(bp->dev);
8749
8750 if (system_state == SYSTEM_POWER_OFF)
8751 bnx2_set_power_state(bp, PCI_D3hot);
8752
8753 rtnl_unlock();
8754}
8755
8751static const struct pci_error_handlers bnx2_err_handler = { 8756static const struct pci_error_handlers bnx2_err_handler = {
8752 .error_detected = bnx2_io_error_detected, 8757 .error_detected = bnx2_io_error_detected,
8753 .slot_reset = bnx2_io_slot_reset, 8758 .slot_reset = bnx2_io_slot_reset,
@@ -8759,9 +8764,9 @@ static struct pci_driver bnx2_pci_driver = {
8759 .id_table = bnx2_pci_tbl, 8764 .id_table = bnx2_pci_tbl,
8760 .probe = bnx2_init_one, 8765 .probe = bnx2_init_one,
8761 .remove = bnx2_remove_one, 8766 .remove = bnx2_remove_one,
8762 .suspend = bnx2_suspend, 8767 .driver.pm = BNX2_PM_OPS,
8763 .resume = bnx2_resume,
8764 .err_handler = &bnx2_err_handler, 8768 .err_handler = &bnx2_err_handler,
8769 .shutdown = bnx2_shutdown,
8765}; 8770};
8766 8771
8767module_pci_driver(bnx2_pci_driver); 8772module_pci_driver(bnx2_pci_driver);
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 172efbecfea2..18cb2d23e56b 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -1,6 +1,6 @@
1/* bnx2.h: Broadcom NX2 network driver. 1/* bnx2.h: Broadcom NX2 network driver.
2 * 2 *
3 * Copyright (c) 2004-2011 Broadcom Corporation 3 * Copyright (c) 2004-2013 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 00b88cbfde25..0c338026ce01 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -825,15 +825,13 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
825#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) 825#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
826 826
827#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ 827#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
828#define BNX2X_DB_SHIFT 7 /* 128 bytes*/ 828#define BNX2X_DB_SHIFT 3 /* 8 bytes*/
829#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) 829#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
830#error "Min DB doorbell stride is 8" 830#error "Min DB doorbell stride is 8"
831#endif 831#endif
832#define DPM_TRIGER_TYPE 0x40
833#define DOORBELL(bp, cid, val) \ 832#define DOORBELL(bp, cid, val) \
834 do { \ 833 do { \
835 writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \ 834 writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \
836 DPM_TRIGER_TYPE); \
837 } while (0) 835 } while (0)
838 836
839/* TX CSUM helpers */ 837/* TX CSUM helpers */
@@ -1100,13 +1098,27 @@ struct bnx2x_port {
1100extern struct workqueue_struct *bnx2x_wq; 1098extern struct workqueue_struct *bnx2x_wq;
1101 1099
1102#define BNX2X_MAX_NUM_OF_VFS 64 1100#define BNX2X_MAX_NUM_OF_VFS 64
1103#define BNX2X_VF_CID_WND 0 1101#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */
1104#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND) 1102#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
1105#define BNX2X_CLIENTS_PER_VF 1 1103
1106#define BNX2X_FIRST_VF_CID 256 1104/* We need to reserve doorbell addresses for all VF and queue combinations */
1107#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF) 1105#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
1106
1107/* The doorbell is configured to have the same number of CIDs for PFs and for
1108 * VFs. For this reason the PF CID zone is as large as the VF zone.
1109 */
1110#define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS
1111#define BNX2X_MAX_NUM_VF_QUEUES 64
1108#define BNX2X_VF_ID_INVALID 0xFF 1112#define BNX2X_VF_ID_INVALID 0xFF
1109 1113
1114/* the number of VF CIDS multiplied by the amount of bytes reserved for each
1115 * cid must not exceed the size of the VF doorbell
1116 */
1117#define BNX2X_VF_BAR_SIZE 512
1118#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT))
1119#error "VF doorbell bar size is 512"
1120#endif
1121
1110/* 1122/*
1111 * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is 1123 * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
1112 * control by the number of fast-path status blocks supported by the 1124 * control by the number of fast-path status blocks supported by the
@@ -1331,7 +1343,7 @@ enum {
1331 BNX2X_SP_RTNL_ENABLE_SRIOV, 1343 BNX2X_SP_RTNL_ENABLE_SRIOV,
1332 BNX2X_SP_RTNL_VFPF_MCAST, 1344 BNX2X_SP_RTNL_VFPF_MCAST,
1333 BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 1345 BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
1334 BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, 1346 BNX2X_SP_RTNL_RX_MODE,
1335 BNX2X_SP_RTNL_HYPERVISOR_VLAN, 1347 BNX2X_SP_RTNL_HYPERVISOR_VLAN,
1336 BNX2X_SP_RTNL_TX_STOP, 1348 BNX2X_SP_RTNL_TX_STOP,
1337 BNX2X_SP_RTNL_TX_RESUME, 1349 BNX2X_SP_RTNL_TX_RESUME,
@@ -1650,10 +1662,10 @@ struct bnx2x {
1650 dma_addr_t fw_stats_data_mapping; 1662 dma_addr_t fw_stats_data_mapping;
1651 int fw_stats_data_sz; 1663 int fw_stats_data_sz;
1652 1664
1653 /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB 1665 /* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB
1654 * context size we need 8 ILT entries. 1666 * context size we need 8 ILT entries.
1655 */ 1667 */
1656#define ILT_MAX_L2_LINES 8 1668#define ILT_MAX_L2_LINES 32
1657 struct hw_context context[ILT_MAX_L2_LINES]; 1669 struct hw_context context[ILT_MAX_L2_LINES];
1658 1670
1659 struct bnx2x_ilt *ilt; 1671 struct bnx2x_ilt *ilt;
@@ -1869,7 +1881,7 @@ extern int num_queues;
1869#define FUNC_FLG_TPA 0x0008 1881#define FUNC_FLG_TPA 0x0008
1870#define FUNC_FLG_SPQ 0x0010 1882#define FUNC_FLG_SPQ 0x0010
1871#define FUNC_FLG_LEADING 0x0020 /* PF only */ 1883#define FUNC_FLG_LEADING 0x0020 /* PF only */
1872 1884#define FUNC_FLG_LEADING_STATS 0x0040
1873struct bnx2x_func_init_params { 1885struct bnx2x_func_init_params {
1874 /* dma */ 1886 /* dma */
1875 dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ 1887 dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
@@ -2069,9 +2081,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
2069void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, 2081void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2070 bool is_pf); 2082 bool is_pf);
2071 2083
2072#define BNX2X_ILT_ZALLOC(x, y, size) \ 2084#define BNX2X_ILT_ZALLOC(x, y, size) \
2073 x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ 2085 x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
2074 GFP_KERNEL | __GFP_ZERO)
2075 2086
2076#define BNX2X_ILT_FREE(x, y, size) \ 2087#define BNX2X_ILT_FREE(x, y, size) \
2077 do { \ 2088 do { \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 0cc26110868d..2361bf236ce3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1948,7 +1948,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1948 } 1948 }
1949} 1949}
1950 1950
1951static int bnx2x_init_rss_pf(struct bnx2x *bp) 1951static int bnx2x_init_rss(struct bnx2x *bp)
1952{ 1952{
1953 int i; 1953 int i;
1954 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); 1954 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
@@ -1972,8 +1972,8 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
1972 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); 1972 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1973} 1973}
1974 1974
1975int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, 1975int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1976 bool config_hash) 1976 bool config_hash, bool enable)
1977{ 1977{
1978 struct bnx2x_config_rss_params params = {NULL}; 1978 struct bnx2x_config_rss_params params = {NULL};
1979 1979
@@ -1988,17 +1988,21 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1988 1988
1989 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags); 1989 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1990 1990
1991 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags); 1991 if (enable) {
1992 1992 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1993 /* RSS configuration */ 1993
1994 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags); 1994 /* RSS configuration */
1995 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags); 1995 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1996 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags); 1996 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1997 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags); 1997 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1998 if (rss_obj->udp_rss_v4) 1998 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1999 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags); 1999 if (rss_obj->udp_rss_v4)
2000 if (rss_obj->udp_rss_v6) 2000 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2001 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags); 2001 if (rss_obj->udp_rss_v6)
2002 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2003 } else {
2004 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2005 }
2002 2006
2003 /* Hash bits */ 2007 /* Hash bits */
2004 params.rss_result_mask = MULTI_MASK; 2008 params.rss_result_mask = MULTI_MASK;
@@ -2007,11 +2011,14 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2007 2011
2008 if (config_hash) { 2012 if (config_hash) {
2009 /* RSS keys */ 2013 /* RSS keys */
2010 prandom_bytes(params.rss_key, sizeof(params.rss_key)); 2014 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
2011 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags); 2015 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2012 } 2016 }
2013 2017
2014 return bnx2x_config_rss(bp, &params); 2018 if (IS_PF(bp))
2019 return bnx2x_config_rss(bp, &params);
2020 else
2021 return bnx2x_vfpf_config_rss(bp, &params);
2015} 2022}
2016 2023
2017static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) 2024static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
@@ -2066,7 +2073,11 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
2066 rparam.mcast_obj = &bp->mcast_obj; 2073 rparam.mcast_obj = &bp->mcast_obj;
2067 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); 2074 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2068 2075
2069 /* Add a DEL command... */ 2076 /* Add a DEL command... - Since we're doing a driver cleanup only,
2077 * we take a lock surrounding both the initial send and the CONTs,
2078 * as we don't want a true completion to disrupt us in the middle.
2079 */
2080 netif_addr_lock_bh(bp->dev);
2070 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 2081 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2071 if (rc < 0) 2082 if (rc < 0)
2072 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n", 2083 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
@@ -2078,11 +2089,13 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
2078 if (rc < 0) { 2089 if (rc < 0) {
2079 BNX2X_ERR("Failed to clean multi-cast object: %d\n", 2090 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2080 rc); 2091 rc);
2092 netif_addr_unlock_bh(bp->dev);
2081 return; 2093 return;
2082 } 2094 }
2083 2095
2084 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 2096 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2085 } 2097 }
2098 netif_addr_unlock_bh(bp->dev);
2086} 2099}
2087 2100
2088#ifndef BNX2X_STOP_ON_ERROR 2101#ifndef BNX2X_STOP_ON_ERROR
@@ -2438,9 +2451,7 @@ int bnx2x_load_cnic(struct bnx2x *bp)
2438 } 2451 }
2439 2452
2440 /* Initialize Rx filter. */ 2453 /* Initialize Rx filter. */
2441 netif_addr_lock_bh(bp->dev); 2454 bnx2x_set_rx_mode_inner(bp);
2442 bnx2x_set_rx_mode(bp->dev);
2443 netif_addr_unlock_bh(bp->dev);
2444 2455
2445 /* re-read iscsi info */ 2456 /* re-read iscsi info */
2446 bnx2x_get_iscsi_info(bp); 2457 bnx2x_get_iscsi_info(bp);
@@ -2647,38 +2658,32 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2647 2658
2648 /* initialize FW coalescing state machines in RAM */ 2659 /* initialize FW coalescing state machines in RAM */
2649 bnx2x_update_coalesce(bp); 2660 bnx2x_update_coalesce(bp);
2661 }
2650 2662
2651 /* setup the leading queue */ 2663 /* setup the leading queue */
2652 rc = bnx2x_setup_leading(bp); 2664 rc = bnx2x_setup_leading(bp);
2653 if (rc) { 2665 if (rc) {
2654 BNX2X_ERR("Setup leading failed!\n"); 2666 BNX2X_ERR("Setup leading failed!\n");
2655 LOAD_ERROR_EXIT(bp, load_error3); 2667 LOAD_ERROR_EXIT(bp, load_error3);
2656 } 2668 }
2657
2658 /* set up the rest of the queues */
2659 for_each_nondefault_eth_queue(bp, i) {
2660 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2661 if (rc) {
2662 BNX2X_ERR("Queue setup failed\n");
2663 LOAD_ERROR_EXIT(bp, load_error3);
2664 }
2665 }
2666 2669
2667 /* setup rss */ 2670 /* set up the rest of the queues */
2668 rc = bnx2x_init_rss_pf(bp); 2671 for_each_nondefault_eth_queue(bp, i) {
2672 if (IS_PF(bp))
2673 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2674 else /* VF */
2675 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2669 if (rc) { 2676 if (rc) {
2670 BNX2X_ERR("PF RSS init failed\n"); 2677 BNX2X_ERR("Queue %d setup failed\n", i);
2671 LOAD_ERROR_EXIT(bp, load_error3); 2678 LOAD_ERROR_EXIT(bp, load_error3);
2672 } 2679 }
2680 }
2673 2681
2674 } else { /* vf */ 2682 /* setup rss */
2675 for_each_eth_queue(bp, i) { 2683 rc = bnx2x_init_rss(bp);
2676 rc = bnx2x_vfpf_setup_q(bp, i); 2684 if (rc) {
2677 if (rc) { 2685 BNX2X_ERR("PF RSS init failed\n");
2678 BNX2X_ERR("Queue setup failed\n"); 2686 LOAD_ERROR_EXIT(bp, load_error3);
2679 LOAD_ERROR_EXIT(bp, load_error3);
2680 }
2681 }
2682 } 2687 }
2683 2688
2684 /* Now when Clients are configured we are ready to work */ 2689 /* Now when Clients are configured we are ready to work */
@@ -2710,9 +2715,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2710 /* Start fast path */ 2715 /* Start fast path */
2711 2716
2712 /* Initialize Rx filter. */ 2717 /* Initialize Rx filter. */
2713 netif_addr_lock_bh(bp->dev); 2718 bnx2x_set_rx_mode_inner(bp);
2714 bnx2x_set_rx_mode(bp->dev);
2715 netif_addr_unlock_bh(bp->dev);
2716 2719
2717 /* Start the Tx */ 2720 /* Start the Tx */
2718 switch (load_mode) { 2721 switch (load_mode) {
@@ -4789,6 +4792,11 @@ int bnx2x_resume(struct pci_dev *pdev)
4789void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, 4792void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4790 u32 cid) 4793 u32 cid)
4791{ 4794{
4795 if (!cxt) {
4796 BNX2X_ERR("bad context pointer %p\n", cxt);
4797 return;
4798 }
4799
4792 /* ustorm cxt validation */ 4800 /* ustorm cxt validation */
4793 cxt->ustorm_ag_context.cdu_usage = 4801 cxt->ustorm_ag_context.cdu_usage =
4794 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), 4802 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index c07a6d054cfe..da8fcaa74495 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -51,8 +51,7 @@ extern int int_mode;
51 51
52#define BNX2X_PCI_ALLOC(x, y, size) \ 52#define BNX2X_PCI_ALLOC(x, y, size) \
53 do { \ 53 do { \
54 x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ 54 x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
55 GFP_KERNEL | __GFP_ZERO); \
56 if (x == NULL) \ 55 if (x == NULL) \
57 goto alloc_mem_err; \ 56 goto alloc_mem_err; \
58 DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ 57 DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
@@ -106,9 +105,10 @@ void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
106 * @rss_obj: RSS object to use 105 * @rss_obj: RSS object to use
107 * @ind_table: indirection table to configure 106 * @ind_table: indirection table to configure
108 * @config_hash: re-configure RSS hash keys configuration 107 * @config_hash: re-configure RSS hash keys configuration
108 * @enable: enabled or disabled configuration
109 */ 109 */
110int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, 110int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
111 bool config_hash); 111 bool config_hash, bool enable);
112 112
113/** 113/**
114 * bnx2x__init_func_obj - init function object 114 * bnx2x__init_func_obj - init function object
@@ -418,6 +418,7 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
418 * netif_addr_lock_bh() 418 * netif_addr_lock_bh()
419 */ 419 */
420void bnx2x_set_rx_mode(struct net_device *dev); 420void bnx2x_set_rx_mode(struct net_device *dev);
421void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
421 422
422/** 423/**
423 * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. 424 * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
@@ -980,7 +981,7 @@ static inline int func_by_vn(struct bnx2x *bp, int vn)
980 981
981static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash) 982static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
982{ 983{
983 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash); 984 return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true);
984} 985}
985 986
986/** 987/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index c5f225101684..2612e3c715d4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3281,14 +3281,14 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
3281 DP(BNX2X_MSG_ETHTOOL, 3281 DP(BNX2X_MSG_ETHTOOL,
3282 "rss re-configured, UDP 4-tupple %s\n", 3282 "rss re-configured, UDP 4-tupple %s\n",
3283 udp_rss_requested ? "enabled" : "disabled"); 3283 udp_rss_requested ? "enabled" : "disabled");
3284 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); 3284 return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
3285 } else if ((info->flow_type == UDP_V6_FLOW) && 3285 } else if ((info->flow_type == UDP_V6_FLOW) &&
3286 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { 3286 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
3287 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; 3287 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
3288 DP(BNX2X_MSG_ETHTOOL, 3288 DP(BNX2X_MSG_ETHTOOL,
3289 "rss re-configured, UDP 4-tupple %s\n", 3289 "rss re-configured, UDP 4-tupple %s\n",
3290 udp_rss_requested ? "enabled" : "disabled"); 3290 udp_rss_requested ? "enabled" : "disabled");
3291 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); 3291 return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
3292 } 3292 }
3293 return 0; 3293 return 0;
3294 3294
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 9d64b988ab34..664568420c9b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6501,12 +6501,13 @@ static int bnx2x_link_initialize(struct link_params *params,
6501 struct bnx2x_phy *phy = &params->phy[INT_PHY]; 6501 struct bnx2x_phy *phy = &params->phy[INT_PHY];
6502 if (vars->line_speed == SPEED_AUTO_NEG && 6502 if (vars->line_speed == SPEED_AUTO_NEG &&
6503 (CHIP_IS_E1x(bp) || 6503 (CHIP_IS_E1x(bp) ||
6504 CHIP_IS_E2(bp))) 6504 CHIP_IS_E2(bp))) {
6505 bnx2x_set_parallel_detection(phy, params); 6505 bnx2x_set_parallel_detection(phy, params);
6506 if (params->phy[INT_PHY].config_init) 6506 if (params->phy[INT_PHY].config_init)
6507 params->phy[INT_PHY].config_init(phy, 6507 params->phy[INT_PHY].config_init(phy,
6508 params, 6508 params,
6509 vars); 6509 vars);
6510 }
6510 } 6511 }
6511 6512
6512 /* Init external phy*/ 6513 /* Init external phy*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 815f2dea6337..634a793c1c46 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6893,7 +6893,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6893 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); 6893 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
6894 6894
6895 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); 6895 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
6896 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); 6896
6897 if (!CHIP_REV_IS_SLOW(bp)) 6897 if (!CHIP_REV_IS_SLOW(bp))
6898 /* enable hw interrupt from doorbell Q */ 6898 /* enable hw interrupt from doorbell Q */
6899 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 6899 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
@@ -8063,7 +8063,10 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8063 8063
8064int bnx2x_setup_leading(struct bnx2x *bp) 8064int bnx2x_setup_leading(struct bnx2x *bp)
8065{ 8065{
8066 return bnx2x_setup_queue(bp, &bp->fp[0], 1); 8066 if (IS_PF(bp))
8067 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8068 else /* VF */
8069 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8067} 8070}
8068 8071
8069/** 8072/**
@@ -8077,8 +8080,10 @@ int bnx2x_set_int_mode(struct bnx2x *bp)
8077{ 8080{
8078 int rc = 0; 8081 int rc = 0;
8079 8082
8080 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) 8083 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8084 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
8081 return -EINVAL; 8085 return -EINVAL;
8086 }
8082 8087
8083 switch (int_mode) { 8088 switch (int_mode) {
8084 case BNX2X_INT_MODE_MSIX: 8089 case BNX2X_INT_MODE_MSIX:
@@ -9647,11 +9652,9 @@ sp_rtnl_not_reset:
9647 } 9652 }
9648 } 9653 }
9649 9654
9650 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, 9655 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
9651 &bp->sp_rtnl_state)) { 9656 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
9652 DP(BNX2X_MSG_SP, 9657 bnx2x_set_rx_mode_inner(bp);
9653 "sending set storm rx mode vf pf channel message from rtnl sp-task\n");
9654 bnx2x_vfpf_storm_rx_mode(bp);
9655 } 9658 }
9656 9659
9657 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, 9660 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
@@ -11649,9 +11652,11 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11649 * second status block for the L2 queue, and a third status block for 11652 * second status block for the L2 queue, and a third status block for
11650 * CNIC if supported. 11653 * CNIC if supported.
11651 */ 11654 */
11652 if (CNIC_SUPPORT(bp)) 11655 if (IS_VF(bp))
11656 bp->min_msix_vec_cnt = 1;
11657 else if (CNIC_SUPPORT(bp))
11653 bp->min_msix_vec_cnt = 3; 11658 bp->min_msix_vec_cnt = 3;
11654 else 11659 else /* PF w/o cnic */
11655 bp->min_msix_vec_cnt = 2; 11660 bp->min_msix_vec_cnt = 2;
11656 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); 11661 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
11657 11662
@@ -11868,34 +11873,48 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
11868void bnx2x_set_rx_mode(struct net_device *dev) 11873void bnx2x_set_rx_mode(struct net_device *dev)
11869{ 11874{
11870 struct bnx2x *bp = netdev_priv(dev); 11875 struct bnx2x *bp = netdev_priv(dev);
11871 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11872 11876
11873 if (bp->state != BNX2X_STATE_OPEN) { 11877 if (bp->state != BNX2X_STATE_OPEN) {
11874 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 11878 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11875 return; 11879 return;
11880 } else {
11881 /* Schedule an SP task to handle rest of change */
11882 DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
11883 smp_mb__before_clear_bit();
11884 set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
11885 smp_mb__after_clear_bit();
11886 schedule_delayed_work(&bp->sp_rtnl_task, 0);
11876 } 11887 }
11888}
11889
11890void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
11891{
11892 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11877 11893
11878 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); 11894 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
11879 11895
11880 if (dev->flags & IFF_PROMISC) 11896 netif_addr_lock_bh(bp->dev);
11897
11898 if (bp->dev->flags & IFF_PROMISC) {
11881 rx_mode = BNX2X_RX_MODE_PROMISC; 11899 rx_mode = BNX2X_RX_MODE_PROMISC;
11882 else if ((dev->flags & IFF_ALLMULTI) || 11900 } else if ((bp->dev->flags & IFF_ALLMULTI) ||
11883 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && 11901 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
11884 CHIP_IS_E1(bp))) 11902 CHIP_IS_E1(bp))) {
11885 rx_mode = BNX2X_RX_MODE_ALLMULTI; 11903 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11886 else { 11904 } else {
11887 if (IS_PF(bp)) { 11905 if (IS_PF(bp)) {
11888 /* some multicasts */ 11906 /* some multicasts */
11889 if (bnx2x_set_mc_list(bp) < 0) 11907 if (bnx2x_set_mc_list(bp) < 0)
11890 rx_mode = BNX2X_RX_MODE_ALLMULTI; 11908 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11891 11909
11910 /* release bh lock, as bnx2x_set_uc_list might sleep */
11911 netif_addr_unlock_bh(bp->dev);
11892 if (bnx2x_set_uc_list(bp) < 0) 11912 if (bnx2x_set_uc_list(bp) < 0)
11893 rx_mode = BNX2X_RX_MODE_PROMISC; 11913 rx_mode = BNX2X_RX_MODE_PROMISC;
11914 netif_addr_lock_bh(bp->dev);
11894 } else { 11915 } else {
11895 /* configuring mcast to a vf involves sleeping (when we 11916 /* configuring mcast to a vf involves sleeping (when we
11896 * wait for the pf's response). Since this function is 11917 * wait for the pf's response).
11897 * called from non sleepable context we must schedule
11898 * a work item for this purpose
11899 */ 11918 */
11900 smp_mb__before_clear_bit(); 11919 smp_mb__before_clear_bit();
11901 set_bit(BNX2X_SP_RTNL_VFPF_MCAST, 11920 set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
@@ -11913,22 +11932,20 @@ void bnx2x_set_rx_mode(struct net_device *dev)
11913 /* Schedule the rx_mode command */ 11932 /* Schedule the rx_mode command */
11914 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { 11933 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
11915 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 11934 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
11935 netif_addr_unlock_bh(bp->dev);
11916 return; 11936 return;
11917 } 11937 }
11918 11938
11919 if (IS_PF(bp)) { 11939 if (IS_PF(bp)) {
11920 bnx2x_set_storm_rx_mode(bp); 11940 bnx2x_set_storm_rx_mode(bp);
11941 netif_addr_unlock_bh(bp->dev);
11921 } else { 11942 } else {
11922 /* configuring rx mode to storms in a vf involves sleeping (when 11943 /* VF will need to request the PF to make this change, and so
11923 * we wait for the pf's response). Since this function is 11944 * the VF needs to release the bottom-half lock prior to the
11924 * called from non sleepable context we must schedule 11945 * request (as it will likely require sleep on the VF side)
11925 * a work item for this purpose
11926 */ 11946 */
11927 smp_mb__before_clear_bit(); 11947 netif_addr_unlock_bh(bp->dev);
11928 set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, 11948 bnx2x_vfpf_storm_rx_mode(bp);
11929 &bp->sp_rtnl_state);
11930 smp_mb__after_clear_bit();
11931 schedule_delayed_work(&bp->sp_rtnl_task, 0);
11932 } 11949 }
11933} 11950}
11934 11951
@@ -12550,19 +12567,16 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
12550 * @dev: pci device 12567 * @dev: pci device
12551 * 12568 *
12552 */ 12569 */
12553static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, 12570static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
12554 int cnic_cnt, bool is_vf)
12555{ 12571{
12556 int pos, index; 12572 int index;
12557 u16 control = 0; 12573 u16 control = 0;
12558 12574
12559 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
12560
12561 /* 12575 /*
12562 * If MSI-X is not supported - return number of SBs needed to support 12576 * If MSI-X is not supported - return number of SBs needed to support
12563 * one fast path queue: one FP queue + SB for CNIC 12577 * one fast path queue: one FP queue + SB for CNIC
12564 */ 12578 */
12565 if (!pos) { 12579 if (!pdev->msix_cap) {
12566 dev_info(&pdev->dev, "no msix capability found\n"); 12580 dev_info(&pdev->dev, "no msix capability found\n");
12567 return 1 + cnic_cnt; 12581 return 1 + cnic_cnt;
12568 } 12582 }
@@ -12575,11 +12589,11 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
12575 * without the default SB. 12589 * without the default SB.
12576 * For VFs there is no default SB, then we return (index+1). 12590 * For VFs there is no default SB, then we return (index+1).
12577 */ 12591 */
12578 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); 12592 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
12579 12593
12580 index = control & PCI_MSIX_FLAGS_QSIZE; 12594 index = control & PCI_MSIX_FLAGS_QSIZE;
12581 12595
12582 return is_vf ? index + 1 : index; 12596 return index;
12583} 12597}
12584 12598
12585static int set_max_cos_est(int chip_id) 12599static int set_max_cos_est(int chip_id)
@@ -12659,10 +12673,13 @@ static int bnx2x_init_one(struct pci_dev *pdev,
12659 is_vf = set_is_vf(ent->driver_data); 12673 is_vf = set_is_vf(ent->driver_data);
12660 cnic_cnt = is_vf ? 0 : 1; 12674 cnic_cnt = is_vf ? 0 : 1;
12661 12675
12662 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf); 12676 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
12677
12678 /* add another SB for VF as it has no default SB */
12679 max_non_def_sbs += is_vf ? 1 : 0;
12663 12680
12664 /* Maximum number of RSS queues: one IGU SB goes to CNIC */ 12681 /* Maximum number of RSS queues: one IGU SB goes to CNIC */
12665 rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt; 12682 rss_count = max_non_def_sbs - cnic_cnt;
12666 12683
12667 if (rss_count < 1) 12684 if (rss_count < 1)
12668 return -EINVAL; 12685 return -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 8e627b886d7b..5ecf267dc4cc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -6335,6 +6335,7 @@
6335#define PCI_ID_VAL2 0x438 6335#define PCI_ID_VAL2 0x438
6336#define PCI_ID_VAL3 0x43c 6336#define PCI_ID_VAL3 0x43c
6337 6337
6338#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C
6338#define GRC_CONFIG_REG_PF_INIT_VF 0x624 6339#define GRC_CONFIG_REG_PF_INIT_VF 0x624
6339#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf 6340#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
6340/* First VF_NUM for PF is encoded in this register. 6341/* First VF_NUM for PF is encoded in this register.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 8f03c984550f..9fbeee522d2c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -159,16 +159,6 @@ static inline void __bnx2x_exe_queue_reset_pending(
159 } 159 }
160} 160}
161 161
162static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
163 struct bnx2x_exe_queue_obj *o)
164{
165 spin_lock_bh(&o->lock);
166
167 __bnx2x_exe_queue_reset_pending(bp, o);
168
169 spin_unlock_bh(&o->lock);
170}
171
172/** 162/**
173 * bnx2x_exe_queue_step - execute one execution chunk atomically 163 * bnx2x_exe_queue_step - execute one execution chunk atomically
174 * 164 *
@@ -176,7 +166,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
176 * @o: queue 166 * @o: queue
177 * @ramrod_flags: flags 167 * @ramrod_flags: flags
178 * 168 *
179 * (Atomicity is ensured using the exe_queue->lock). 169 * (Should be called while holding the exe_queue->lock).
180 */ 170 */
181static inline int bnx2x_exe_queue_step(struct bnx2x *bp, 171static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
182 struct bnx2x_exe_queue_obj *o, 172 struct bnx2x_exe_queue_obj *o,
@@ -187,8 +177,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
187 177
188 memset(&spacer, 0, sizeof(spacer)); 178 memset(&spacer, 0, sizeof(spacer));
189 179
190 spin_lock_bh(&o->lock);
191
192 /* Next step should not be performed until the current is finished, 180 /* Next step should not be performed until the current is finished,
193 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to 181 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
194 * properly clear object internals without sending any command to the FW 182 * properly clear object internals without sending any command to the FW
@@ -200,7 +188,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
200 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n"); 188 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
201 __bnx2x_exe_queue_reset_pending(bp, o); 189 __bnx2x_exe_queue_reset_pending(bp, o);
202 } else { 190 } else {
203 spin_unlock_bh(&o->lock);
204 return 1; 191 return 1;
205 } 192 }
206 } 193 }
@@ -228,10 +215,8 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
228 } 215 }
229 216
230 /* Sanity check */ 217 /* Sanity check */
231 if (!cur_len) { 218 if (!cur_len)
232 spin_unlock_bh(&o->lock);
233 return 0; 219 return 0;
234 }
235 220
236 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); 221 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
237 if (rc < 0) 222 if (rc < 0)
@@ -245,7 +230,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
245 */ 230 */
246 __bnx2x_exe_queue_reset_pending(bp, o); 231 __bnx2x_exe_queue_reset_pending(bp, o);
247 232
248 spin_unlock_bh(&o->lock);
249 return rc; 233 return rc;
250} 234}
251 235
@@ -432,12 +416,219 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
432 return true; 416 return true;
433} 417}
434 418
419/**
420 * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
421 *
422 * @bp: device handle
423 * @o: vlan_mac object
424 *
425 * @details: Non-blocking implementation; should be called under execution
426 * queue lock.
427 */
428static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
429 struct bnx2x_vlan_mac_obj *o)
430{
431 if (o->head_reader) {
432 DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
433 return -EBUSY;
434 }
435
436 DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
437 return 0;
438}
439
440/**
441 * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
442 *
443 * @bp: device handle
444 * @o: vlan_mac object
445 *
446 * @details Should be called under execution queue lock; notice it might release
447 * and reclaim it during its run.
448 */
449static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
450 struct bnx2x_vlan_mac_obj *o)
451{
452 int rc;
453 unsigned long ramrod_flags = o->saved_ramrod_flags;
454
455 DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
456 ramrod_flags);
457 o->head_exe_request = false;
458 o->saved_ramrod_flags = 0;
459 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
460 if (rc != 0) {
461 BNX2X_ERR("execution of pending commands failed with rc %d\n",
462 rc);
463#ifdef BNX2X_STOP_ON_ERROR
464 bnx2x_panic();
465#endif
466 }
467}
468
469/**
470 * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
471 *
472 * @bp: device handle
473 * @o: vlan_mac object
474 * @ramrod_flags: ramrod flags of missed execution
475 *
476 * @details Should be called under execution queue lock.
477 */
478static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
479 struct bnx2x_vlan_mac_obj *o,
480 unsigned long ramrod_flags)
481{
482 o->head_exe_request = true;
483 o->saved_ramrod_flags = ramrod_flags;
484 DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
485 ramrod_flags);
486}
487
488/**
489 * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
490 *
491 * @bp: device handle
492 * @o: vlan_mac object
493 *
494 * @details Should be called under execution queue lock. Notice if a pending
495 * execution exists, it would perform it - possibly releasing and
496 * reclaiming the execution queue lock.
497 */
498static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
499 struct bnx2x_vlan_mac_obj *o)
500{
501 /* It's possible a new pending execution was added since this writer
502 * executed. If so, execute again. [Ad infinitum]
503 */
504 while (o->head_exe_request) {
505 DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
506 __bnx2x_vlan_mac_h_exec_pending(bp, o);
507 }
508}
509
510/**
511 * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
512 *
513 * @bp: device handle
514 * @o: vlan_mac object
515 *
516 * @details Notice if a pending execution exists, it would perform it -
517 * possibly releasing and reclaiming the execution queue lock.
518 */
519void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
520 struct bnx2x_vlan_mac_obj *o)
521{
522 spin_lock_bh(&o->exe_queue.lock);
523 __bnx2x_vlan_mac_h_write_unlock(bp, o);
524 spin_unlock_bh(&o->exe_queue.lock);
525}
526
527/**
528 * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
529 *
530 * @bp: device handle
531 * @o: vlan_mac object
532 *
533 * @details Should be called under the execution queue lock. May sleep. May
534 * release and reclaim execution queue lock during its run.
535 */
536static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
537 struct bnx2x_vlan_mac_obj *o)
538{
539 /* If we got here, we're holding lock --> no WRITER exists */
540 o->head_reader++;
541 DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
542 o->head_reader);
543
544 return 0;
545}
546
547/**
548 * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
549 *
550 * @bp: device handle
551 * @o: vlan_mac object
552 *
553 * @details May sleep. Claims and releases execution queue lock during its run.
554 */
555int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
556 struct bnx2x_vlan_mac_obj *o)
557{
558 int rc;
559
560 spin_lock_bh(&o->exe_queue.lock);
561 rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
562 spin_unlock_bh(&o->exe_queue.lock);
563
564 return rc;
565}
566
567/**
568 * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
569 *
570 * @bp: device handle
571 * @o: vlan_mac object
572 *
573 * @details Should be called under execution queue lock. Notice if a pending
574 * execution exists, it would be performed if this was the last
575 * reader. possibly releasing and reclaiming the execution queue lock.
576 */
577static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
578 struct bnx2x_vlan_mac_obj *o)
579{
580 if (!o->head_reader) {
581 BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
582#ifdef BNX2X_STOP_ON_ERROR
583 bnx2x_panic();
584#endif
585 } else {
586 o->head_reader--;
587 DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
588 o->head_reader);
589 }
590
591 /* It's possible a new pending execution was added, and that this reader
592 * was last - if so we need to execute the command.
593 */
594 if (!o->head_reader && o->head_exe_request) {
595 DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
596
597 /* Writer release will do the trick */
598 __bnx2x_vlan_mac_h_write_unlock(bp, o);
599 }
600}
601
602/**
603 * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
604 *
605 * @bp: device handle
606 * @o: vlan_mac object
607 *
608 * @details Notice if a pending execution exists, it would be performed if this
609 * was the last reader. Claims and releases the execution queue lock
610 * during its run.
611 */
612void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
613 struct bnx2x_vlan_mac_obj *o)
614{
615 spin_lock_bh(&o->exe_queue.lock);
616 __bnx2x_vlan_mac_h_read_unlock(bp, o);
617 spin_unlock_bh(&o->exe_queue.lock);
618}
619
435static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, 620static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
436 int n, u8 *base, u8 stride, u8 size) 621 int n, u8 *base, u8 stride, u8 size)
437{ 622{
438 struct bnx2x_vlan_mac_registry_elem *pos; 623 struct bnx2x_vlan_mac_registry_elem *pos;
439 u8 *next = base; 624 u8 *next = base;
440 int counter = 0; 625 int counter = 0;
626 int read_lock;
627
628 DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
629 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
630 if (read_lock != 0)
631 BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
441 632
442 /* traverse list */ 633 /* traverse list */
443 list_for_each_entry(pos, &o->head, link) { 634 list_for_each_entry(pos, &o->head, link) {
@@ -449,6 +640,12 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
449 next += stride + size; 640 next += stride + size;
450 } 641 }
451 } 642 }
643
644 if (read_lock == 0) {
645 DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
646 bnx2x_vlan_mac_h_read_unlock(bp, o);
647 }
648
452 return counter * ETH_ALEN; 649 return counter * ETH_ALEN;
453} 650}
454 651
@@ -1397,6 +1594,32 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1397 return -EBUSY; 1594 return -EBUSY;
1398} 1595}
1399 1596
1597static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
1598 struct bnx2x_vlan_mac_obj *o,
1599 unsigned long *ramrod_flags)
1600{
1601 int rc = 0;
1602
1603 spin_lock_bh(&o->exe_queue.lock);
1604
1605 DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
1606 rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
1607
1608 if (rc != 0) {
1609 __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
1610
1611 /* Calling function should not diffrentiate between this case
1612 * and the case in which there is already a pending ramrod
1613 */
1614 rc = 1;
1615 } else {
1616 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1617 }
1618 spin_unlock_bh(&o->exe_queue.lock);
1619
1620 return rc;
1621}
1622
1400/** 1623/**
1401 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod 1624 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1402 * 1625 *
@@ -1414,19 +1637,27 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1414 struct bnx2x_raw_obj *r = &o->raw; 1637 struct bnx2x_raw_obj *r = &o->raw;
1415 int rc; 1638 int rc;
1416 1639
1640 /* Clearing the pending list & raw state should be made
1641 * atomically (as execution flow assumes they represent the same).
1642 */
1643 spin_lock_bh(&o->exe_queue.lock);
1644
1417 /* Reset pending list */ 1645 /* Reset pending list */
1418 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); 1646 __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1419 1647
1420 /* Clear pending */ 1648 /* Clear pending */
1421 r->clear_pending(r); 1649 r->clear_pending(r);
1422 1650
1651 spin_unlock_bh(&o->exe_queue.lock);
1652
1423 /* If ramrod failed this is most likely a SW bug */ 1653 /* If ramrod failed this is most likely a SW bug */
1424 if (cqe->message.error) 1654 if (cqe->message.error)
1425 return -EINVAL; 1655 return -EINVAL;
1426 1656
1427 /* Run the next bulk of pending commands if requested */ 1657 /* Run the next bulk of pending commands if requested */
1428 if (test_bit(RAMROD_CONT, ramrod_flags)) { 1658 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1429 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); 1659 rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
1660
1430 if (rc < 0) 1661 if (rc < 0)
1431 return rc; 1662 return rc;
1432 } 1663 }
@@ -1719,9 +1950,8 @@ static inline int bnx2x_vlan_mac_push_new_cmd(
1719 * @p: 1950 * @p:
1720 * 1951 *
1721 */ 1952 */
1722int bnx2x_config_vlan_mac( 1953int bnx2x_config_vlan_mac(struct bnx2x *bp,
1723 struct bnx2x *bp, 1954 struct bnx2x_vlan_mac_ramrod_params *p)
1724 struct bnx2x_vlan_mac_ramrod_params *p)
1725{ 1955{
1726 int rc = 0; 1956 int rc = 0;
1727 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; 1957 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
@@ -1752,7 +1982,8 @@ int bnx2x_config_vlan_mac(
1752 /* Execute commands if required */ 1982 /* Execute commands if required */
1753 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || 1983 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1754 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { 1984 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1755 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); 1985 rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
1986 &p->ramrod_flags);
1756 if (rc < 0) 1987 if (rc < 0)
1757 return rc; 1988 return rc;
1758 } 1989 }
@@ -1775,8 +2006,9 @@ int bnx2x_config_vlan_mac(
1775 return rc; 2006 return rc;
1776 2007
1777 /* Make a next step */ 2008 /* Make a next step */
1778 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, 2009 rc = __bnx2x_vlan_mac_execute_step(bp,
1779 ramrod_flags); 2010 p->vlan_mac_obj,
2011 &p->ramrod_flags);
1780 if (rc < 0) 2012 if (rc < 0)
1781 return rc; 2013 return rc;
1782 } 2014 }
@@ -1806,10 +2038,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1806 unsigned long *ramrod_flags) 2038 unsigned long *ramrod_flags)
1807{ 2039{
1808 struct bnx2x_vlan_mac_registry_elem *pos = NULL; 2040 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1809 int rc = 0;
1810 struct bnx2x_vlan_mac_ramrod_params p; 2041 struct bnx2x_vlan_mac_ramrod_params p;
1811 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 2042 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1812 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; 2043 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
2044 int read_lock;
2045 int rc = 0;
1813 2046
1814 /* Clear pending commands first */ 2047 /* Clear pending commands first */
1815 2048
@@ -1844,6 +2077,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1844 __clear_bit(RAMROD_EXEC, &p.ramrod_flags); 2077 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1845 __clear_bit(RAMROD_CONT, &p.ramrod_flags); 2078 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1846 2079
2080 DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
2081 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
2082 if (read_lock != 0)
2083 return read_lock;
2084
1847 list_for_each_entry(pos, &o->head, link) { 2085 list_for_each_entry(pos, &o->head, link) {
1848 if (pos->vlan_mac_flags == *vlan_mac_flags) { 2086 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1849 p.user_req.vlan_mac_flags = pos->vlan_mac_flags; 2087 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
@@ -1851,11 +2089,15 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1851 rc = bnx2x_config_vlan_mac(bp, &p); 2089 rc = bnx2x_config_vlan_mac(bp, &p);
1852 if (rc < 0) { 2090 if (rc < 0) {
1853 BNX2X_ERR("Failed to add a new DEL command\n"); 2091 BNX2X_ERR("Failed to add a new DEL command\n");
2092 bnx2x_vlan_mac_h_read_unlock(bp, o);
1854 return rc; 2093 return rc;
1855 } 2094 }
1856 } 2095 }
1857 } 2096 }
1858 2097
2098 DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
2099 bnx2x_vlan_mac_h_read_unlock(bp, o);
2100
1859 p.ramrod_flags = *ramrod_flags; 2101 p.ramrod_flags = *ramrod_flags;
1860 __set_bit(RAMROD_CONT, &p.ramrod_flags); 2102 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1861 2103
@@ -1887,6 +2129,9 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1887 struct bnx2x_credit_pool_obj *vlans_pool) 2129 struct bnx2x_credit_pool_obj *vlans_pool)
1888{ 2130{
1889 INIT_LIST_HEAD(&o->head); 2131 INIT_LIST_HEAD(&o->head);
2132 o->head_reader = 0;
2133 o->head_exe_request = false;
2134 o->saved_ramrod_flags = 0;
1890 2135
1891 o->macs_pool = macs_pool; 2136 o->macs_pool = macs_pool;
1892 o->vlans_pool = vlans_pool; 2137 o->vlans_pool = vlans_pool;
@@ -4171,6 +4416,16 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4171 rss_obj->config_rss = bnx2x_setup_rss; 4416 rss_obj->config_rss = bnx2x_setup_rss;
4172} 4417}
4173 4418
4419int validate_vlan_mac(struct bnx2x *bp,
4420 struct bnx2x_vlan_mac_obj *vlan_mac)
4421{
4422 if (!vlan_mac->get_n_elements) {
4423 BNX2X_ERR("vlan mac object was not intialized\n");
4424 return -EINVAL;
4425 }
4426 return 0;
4427}
4428
4174/********************** Queue state object ***********************************/ 4429/********************** Queue state object ***********************************/
4175 4430
4176/** 4431/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 798dfe996733..658f4e33abf9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -285,6 +285,12 @@ struct bnx2x_vlan_mac_obj {
285 * entries. 285 * entries.
286 */ 286 */
287 struct list_head head; 287 struct list_head head;
288 /* Implement a simple reader/writer lock on the head list.
289 * all these fields should only be accessed under the exe_queue lock
290 */
291 u8 head_reader; /* Num. of readers accessing head list */
292 bool head_exe_request; /* Pending execution request. */
293 unsigned long saved_ramrod_flags; /* Ramrods of pending execution */
288 294
289 /* TODO: Add it's initialization in the init functions */ 295 /* TODO: Add it's initialization in the init functions */
290 struct bnx2x_exe_queue_obj exe_queue; 296 struct bnx2x_exe_queue_obj exe_queue;
@@ -1302,8 +1308,16 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
1302 struct bnx2x_credit_pool_obj *macs_pool, 1308 struct bnx2x_credit_pool_obj *macs_pool,
1303 struct bnx2x_credit_pool_obj *vlans_pool); 1309 struct bnx2x_credit_pool_obj *vlans_pool);
1304 1310
1311int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
1312 struct bnx2x_vlan_mac_obj *o);
1313void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
1314 struct bnx2x_vlan_mac_obj *o);
1315int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,
1316 struct bnx2x_vlan_mac_obj *o);
1317void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
1318 struct bnx2x_vlan_mac_obj *o);
1305int bnx2x_config_vlan_mac(struct bnx2x *bp, 1319int bnx2x_config_vlan_mac(struct bnx2x *bp,
1306 struct bnx2x_vlan_mac_ramrod_params *p); 1320 struct bnx2x_vlan_mac_ramrod_params *p);
1307 1321
1308int bnx2x_vlan_mac_move(struct bnx2x *bp, 1322int bnx2x_vlan_mac_move(struct bnx2x *bp,
1309 struct bnx2x_vlan_mac_ramrod_params *p, 1323 struct bnx2x_vlan_mac_ramrod_params *p,
@@ -1393,4 +1407,6 @@ int bnx2x_config_rss(struct bnx2x *bp,
1393void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, 1407void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
1394 u8 *ind_table); 1408 u8 *ind_table);
1395 1409
1410int validate_vlan_mac(struct bnx2x *bp,
1411 struct bnx2x_vlan_mac_obj *vlan_mac);
1396#endif /* BNX2X_SP_VERBS */ 1412#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index e8706e19f96f..b26eb83069b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -170,6 +170,11 @@ enum bnx2x_vfop_qteardown_state {
170 BNX2X_VFOP_QTEARDOWN_DONE 170 BNX2X_VFOP_QTEARDOWN_DONE
171}; 171};
172 172
173enum bnx2x_vfop_rss_state {
174 BNX2X_VFOP_RSS_CONFIG,
175 BNX2X_VFOP_RSS_DONE
176};
177
173#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) 178#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
174 179
175void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 180void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -265,11 +270,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
265 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 270 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
266 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 271 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
267 272
268 if (vfq_is_leading(q)) {
269 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
270 __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
271 }
272
273 /* Setup-op rx parameters */ 273 /* Setup-op rx parameters */
274 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 274 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
275 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 275 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
@@ -398,7 +398,11 @@ static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
398 BNX2X_Q_LOGICAL_STATE_STOPPED) { 398 BNX2X_Q_LOGICAL_STATE_STOPPED) {
399 DP(BNX2X_MSG_IOV, 399 DP(BNX2X_MSG_IOV,
400 "Entered qdtor but queue was already stopped. Aborting gracefully\n"); 400 "Entered qdtor but queue was already stopped. Aborting gracefully\n");
401 goto op_done; 401
402 /* next state */
403 vfop->state = BNX2X_VFOP_QDTOR_DONE;
404
405 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
402 } 406 }
403 407
404 /* next state */ 408 /* next state */
@@ -432,8 +436,10 @@ op_err:
432op_done: 436op_done:
433 case BNX2X_VFOP_QDTOR_DONE: 437 case BNX2X_VFOP_QDTOR_DONE:
434 /* invalidate the context */ 438 /* invalidate the context */
435 qdtor->cxt->ustorm_ag_context.cdu_usage = 0; 439 if (qdtor->cxt) {
436 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; 440 qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
441 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
442 }
437 bnx2x_vfop_end(bp, vf, vfop); 443 bnx2x_vfop_end(bp, vf, vfop);
438 return; 444 return;
439 default: 445 default:
@@ -465,7 +471,8 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
465 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, 471 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
466 cmd->block); 472 cmd->block);
467 } 473 }
468 DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid); 474 DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
475 vf->abs_vfid, vfop->rc);
469 return -ENOMEM; 476 return -ENOMEM;
470} 477}
471 478
@@ -474,10 +481,18 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
474{ 481{
475 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 482 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
476 if (vf) { 483 if (vf) {
484 /* the first igu entry belonging to VFs of this PF */
485 if (!BP_VFDB(bp)->first_vf_igu_entry)
486 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
487
488 /* the first igu entry belonging to this VF */
477 if (!vf_sb_count(vf)) 489 if (!vf_sb_count(vf))
478 vf->igu_base_id = igu_sb_id; 490 vf->igu_base_id = igu_sb_id;
491
479 ++vf_sb_count(vf); 492 ++vf_sb_count(vf);
493 ++vf->sb_count;
480 } 494 }
495 BP_VFDB(bp)->vf_sbs_pool++;
481} 496}
482 497
483/* VFOP MAC/VLAN helpers */ 498/* VFOP MAC/VLAN helpers */
@@ -491,12 +506,20 @@ static inline void bnx2x_vfop_credit(struct bnx2x *bp,
491 * and a valid credit counter 506 * and a valid credit counter
492 */ 507 */
493 if (!vfop->rc && args->credit) { 508 if (!vfop->rc && args->credit) {
494 int cnt = 0;
495 struct list_head *pos; 509 struct list_head *pos;
510 int read_lock;
511 int cnt = 0;
512
513 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
514 if (read_lock)
515 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
496 516
497 list_for_each(pos, &obj->head) 517 list_for_each(pos, &obj->head)
498 cnt++; 518 cnt++;
499 519
520 if (!read_lock)
521 bnx2x_vlan_mac_h_read_unlock(bp, obj);
522
500 atomic_set(args->credit, cnt); 523 atomic_set(args->credit, cnt);
501 } 524 }
502} 525}
@@ -692,6 +715,7 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
692 int qid, bool drv_only) 715 int qid, bool drv_only)
693{ 716{
694 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 717 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
718 int rc;
695 719
696 if (vfop) { 720 if (vfop) {
697 struct bnx2x_vfop_args_filters filters = { 721 struct bnx2x_vfop_args_filters filters = {
@@ -711,6 +735,9 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
711 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 735 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
712 736
713 /* set object */ 737 /* set object */
738 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
739 if (rc)
740 return rc;
714 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 741 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
715 742
716 /* set extra args */ 743 /* set extra args */
@@ -731,6 +758,7 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
731 int qid, bool drv_only) 758 int qid, bool drv_only)
732{ 759{
733 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 760 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
761 int rc;
734 762
735 if (vfop) { 763 if (vfop) {
736 struct bnx2x_vfop_args_filters filters = { 764 struct bnx2x_vfop_args_filters filters = {
@@ -753,6 +781,9 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
753 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 781 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
754 782
755 /* set object */ 783 /* set object */
784 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
785 if (rc)
786 return rc;
756 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 787 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
757 788
758 /* set extra args */ 789 /* set extra args */
@@ -773,6 +804,7 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
773 int qid, u16 vid, bool add) 804 int qid, u16 vid, bool add)
774{ 805{
775 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 806 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
807 int rc;
776 808
777 if (vfop) { 809 if (vfop) {
778 struct bnx2x_vfop_args_filters filters = { 810 struct bnx2x_vfop_args_filters filters = {
@@ -793,6 +825,9 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
793 ramrod->user_req.u.vlan.vlan = vid; 825 ramrod->user_req.u.vlan.vlan = vid;
794 826
795 /* set object */ 827 /* set object */
828 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
829 if (rc)
830 return rc;
796 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 831 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
797 832
798 /* set extra args */ 833 /* set extra args */
@@ -812,6 +847,7 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
812 int qid, bool drv_only) 847 int qid, bool drv_only)
813{ 848{
814 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 849 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
850 int rc;
815 851
816 if (vfop) { 852 if (vfop) {
817 struct bnx2x_vfop_args_filters filters = { 853 struct bnx2x_vfop_args_filters filters = {
@@ -831,6 +867,9 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
831 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 867 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
832 868
833 /* set object */ 869 /* set object */
870 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
871 if (rc)
872 return rc;
834 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 873 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
835 874
836 /* set extra args */ 875 /* set extra args */
@@ -851,6 +890,7 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
851 int qid, bool drv_only) 890 int qid, bool drv_only)
852{ 891{
853 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 892 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
893 int rc;
854 894
855 if (vfop) { 895 if (vfop) {
856 struct bnx2x_vfop_args_filters filters = { 896 struct bnx2x_vfop_args_filters filters = {
@@ -870,6 +910,9 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
870 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 910 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
871 911
872 /* set object */ 912 /* set object */
913 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
914 if (rc)
915 return rc;
873 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 916 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
874 917
875 /* set extra args */ 918 /* set extra args */
@@ -980,21 +1023,25 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
980 case BNX2X_VFOP_QFLR_CLR_VLAN: 1023 case BNX2X_VFOP_QFLR_CLR_VLAN:
981 /* vlan-clear-all: driver-only, don't consume credit */ 1024 /* vlan-clear-all: driver-only, don't consume credit */
982 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; 1025 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
983 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true); 1026 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)))
1027 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid,
1028 true);
984 if (vfop->rc) 1029 if (vfop->rc)
985 goto op_err; 1030 goto op_err;
986 return; 1031 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
987 1032
988 case BNX2X_VFOP_QFLR_CLR_MAC: 1033 case BNX2X_VFOP_QFLR_CLR_MAC:
989 /* mac-clear-all: driver only consume credit */ 1034 /* mac-clear-all: driver only consume credit */
990 vfop->state = BNX2X_VFOP_QFLR_TERMINATE; 1035 vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
991 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true); 1036 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)))
1037 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid,
1038 true);
992 DP(BNX2X_MSG_IOV, 1039 DP(BNX2X_MSG_IOV,
993 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", 1040 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
994 vf->abs_vfid, vfop->rc); 1041 vf->abs_vfid, vfop->rc);
995 if (vfop->rc) 1042 if (vfop->rc)
996 goto op_err; 1043 goto op_err;
997 return; 1044 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
998 1045
999 case BNX2X_VFOP_QFLR_TERMINATE: 1046 case BNX2X_VFOP_QFLR_TERMINATE:
1000 qstate = &vfop->op_p->qctor.qstate; 1047 qstate = &vfop->op_p->qctor.qstate;
@@ -1291,10 +1338,13 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
1291{ 1338{
1292 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1339 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1293 1340
1341 /* for non leading queues skip directly to qdown sate */
1294 if (vfop) { 1342 if (vfop) {
1295 vfop->args.qx.qid = qid; 1343 vfop->args.qx.qid = qid;
1296 bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE, 1344 bnx2x_vfop_opset(qid == LEADING_IDX ?
1297 bnx2x_vfop_qdown, cmd->done); 1345 BNX2X_VFOP_QTEARDOWN_RXMODE :
1346 BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
1347 cmd->done);
1298 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, 1348 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
1299 cmd->block); 1349 cmd->block);
1300 } 1350 }
@@ -1447,15 +1497,16 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
1447 * both known 1497 * both known
1448 */ 1498 */
1449static void 1499static void
1450bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) 1500bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1451{ 1501{
1502 struct vf_pf_resc_request *resc = &vf->alloc_resc;
1452 u16 vlan_count = 0; 1503 u16 vlan_count = 0;
1453 1504
1454 /* will be set only during VF-ACQUIRE */ 1505 /* will be set only during VF-ACQUIRE */
1455 resc->num_rxqs = 0; 1506 resc->num_rxqs = 0;
1456 resc->num_txqs = 0; 1507 resc->num_txqs = 0;
1457 1508
1458 /* no credit calculcis for macs (just yet) */ 1509 /* no credit calculations for macs (just yet) */
1459 resc->num_mac_filters = 1; 1510 resc->num_mac_filters = 1;
1460 1511
1461 /* divvy up vlan rules */ 1512 /* divvy up vlan rules */
@@ -1467,13 +1518,14 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
1467 resc->num_mc_filters = 0; 1518 resc->num_mc_filters = 0;
1468 1519
1469 /* num_sbs already set */ 1520 /* num_sbs already set */
1521 resc->num_sbs = vf->sb_count;
1470} 1522}
1471 1523
1472/* FLR routines: */ 1524/* FLR routines: */
1473static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1525static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1474{ 1526{
1475 /* reset the state variables */ 1527 /* reset the state variables */
1476 bnx2x_iov_static_resc(bp, &vf->alloc_resc); 1528 bnx2x_iov_static_resc(bp, vf);
1477 vf->state = VF_FREE; 1529 vf->state = VF_FREE;
1478} 1530}
1479 1531
@@ -1693,8 +1745,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
1693 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1745 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
1694 * the Pf doorbell size although the 2 are independent. 1746 * the Pf doorbell size although the 2 are independent.
1695 */ 1747 */
1696 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 1748 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1697 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
1698 1749
1699 /* No security checks for now - 1750 /* No security checks for now -
1700 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1751 * configure single rule (out of 16) mask = 0x1, value = 0x0,
@@ -1761,7 +1812,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1761{ 1812{
1762 int sb_id; 1813 int sb_id;
1763 u32 val; 1814 u32 val;
1764 u8 fid; 1815 u8 fid, current_pf = 0;
1765 1816
1766 /* IGU in normal mode - read CAM */ 1817 /* IGU in normal mode - read CAM */
1767 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1818 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
@@ -1769,16 +1820,18 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1769 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1820 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1770 continue; 1821 continue;
1771 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1822 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1772 if (!(fid & IGU_FID_ENCODE_IS_PF)) 1823 if (fid & IGU_FID_ENCODE_IS_PF)
1824 current_pf = fid & IGU_FID_PF_NUM_MASK;
1825 else if (current_pf == BP_ABS_FUNC(bp))
1773 bnx2x_vf_set_igu_info(bp, sb_id, 1826 bnx2x_vf_set_igu_info(bp, sb_id,
1774 (fid & IGU_FID_VF_NUM_MASK)); 1827 (fid & IGU_FID_VF_NUM_MASK));
1775
1776 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1828 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1777 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1829 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1778 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1830 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1779 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1831 (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1780 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1832 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1781 } 1833 }
1834 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1782} 1835}
1783 1836
1784static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1837static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
@@ -1844,23 +1897,11 @@ static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1844 return 0; 1897 return 0;
1845} 1898}
1846 1899
1847static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
1848{
1849 int i;
1850 u8 queue_count = 0;
1851
1852 if (IS_SRIOV(bp))
1853 for_each_vf(bp, i)
1854 queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
1855
1856 return queue_count;
1857}
1858
1859/* must be called after PF bars are mapped */ 1900/* must be called after PF bars are mapped */
1860int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1901int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1861 int num_vfs_param) 1902 int num_vfs_param)
1862{ 1903{
1863 int err, i, qcount; 1904 int err, i;
1864 struct bnx2x_sriov *iov; 1905 struct bnx2x_sriov *iov;
1865 struct pci_dev *dev = bp->pdev; 1906 struct pci_dev *dev = bp->pdev;
1866 1907
@@ -1958,12 +1999,13 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1958 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 1999 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1959 bnx2x_get_vf_igu_cam_info(bp); 2000 bnx2x_get_vf_igu_cam_info(bp);
1960 2001
1961 /* get the total queue count and allocate the global queue arrays */
1962 qcount = bnx2x_iov_get_max_queue_count(bp);
1963
1964 /* allocate the queue arrays for all VFs */ 2002 /* allocate the queue arrays for all VFs */
1965 bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue), 2003 bp->vfdb->vfqs = kzalloc(
1966 GFP_KERNEL); 2004 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
2005 GFP_KERNEL);
2006
2007 DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
2008
1967 if (!bp->vfdb->vfqs) { 2009 if (!bp->vfdb->vfqs) {
1968 BNX2X_ERR("failed to allocate vf queue array\n"); 2010 BNX2X_ERR("failed to allocate vf queue array\n");
1969 err = -ENOMEM; 2011 err = -ENOMEM;
@@ -2084,49 +2126,14 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
2084 q_type); 2126 q_type);
2085 2127
2086 DP(BNX2X_MSG_IOV, 2128 DP(BNX2X_MSG_IOV,
2087 "initialized vf %d's queue object. func id set to %d\n", 2129 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
2088 vf->abs_vfid, q->sp_obj.func_id); 2130 vf->abs_vfid, q->sp_obj.func_id, q->cid);
2089
2090 /* mac/vlan objects are per queue, but only those
2091 * that belong to the leading queue are initialized
2092 */
2093 if (vfq_is_leading(q)) {
2094 /* mac */
2095 bnx2x_init_mac_obj(bp, &q->mac_obj,
2096 cl_id, q->cid, func_id,
2097 bnx2x_vf_sp(bp, vf, mac_rdata),
2098 bnx2x_vf_sp_map(bp, vf, mac_rdata),
2099 BNX2X_FILTER_MAC_PENDING,
2100 &vf->filter_state,
2101 BNX2X_OBJ_TYPE_RX_TX,
2102 &bp->macs_pool);
2103 /* vlan */
2104 bnx2x_init_vlan_obj(bp, &q->vlan_obj,
2105 cl_id, q->cid, func_id,
2106 bnx2x_vf_sp(bp, vf, vlan_rdata),
2107 bnx2x_vf_sp_map(bp, vf, vlan_rdata),
2108 BNX2X_FILTER_VLAN_PENDING,
2109 &vf->filter_state,
2110 BNX2X_OBJ_TYPE_RX_TX,
2111 &bp->vlans_pool);
2112
2113 /* mcast */
2114 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
2115 q->cid, func_id, func_id,
2116 bnx2x_vf_sp(bp, vf, mcast_rdata),
2117 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
2118 BNX2X_FILTER_MCAST_PENDING,
2119 &vf->filter_state,
2120 BNX2X_OBJ_TYPE_RX_TX);
2121
2122 vf->leading_rss = cl_id;
2123 }
2124} 2131}
2125 2132
2126/* called by bnx2x_nic_load */ 2133/* called by bnx2x_nic_load */
2127int bnx2x_iov_nic_init(struct bnx2x *bp) 2134int bnx2x_iov_nic_init(struct bnx2x *bp)
2128{ 2135{
2129 int vfid, qcount, i; 2136 int vfid;
2130 2137
2131 if (!IS_SRIOV(bp)) { 2138 if (!IS_SRIOV(bp)) {
2132 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 2139 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
@@ -2155,7 +2162,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
2155 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 2162 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
2156 2163
2157 /* init statically provisioned resources */ 2164 /* init statically provisioned resources */
2158 bnx2x_iov_static_resc(bp, &vf->alloc_resc); 2165 bnx2x_iov_static_resc(bp, vf);
2159 2166
2160 /* queues are initialized during VF-ACQUIRE */ 2167 /* queues are initialized during VF-ACQUIRE */
2161 2168
@@ -2191,13 +2198,12 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
2191 } 2198 }
2192 2199
2193 /* Final VF init */ 2200 /* Final VF init */
2194 qcount = 0; 2201 for_each_vf(bp, vfid) {
2195 for_each_vf(bp, i) { 2202 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
2196 struct bnx2x_virtf *vf = BP_VF(bp, i);
2197 2203
2198 /* fill in the BDF and bars */ 2204 /* fill in the BDF and bars */
2199 vf->bus = bnx2x_vf_bus(bp, i); 2205 vf->bus = bnx2x_vf_bus(bp, vfid);
2200 vf->devfn = bnx2x_vf_devfn(bp, i); 2206 vf->devfn = bnx2x_vf_devfn(bp, vfid);
2201 bnx2x_vf_set_bars(bp, vf); 2207 bnx2x_vf_set_bars(bp, vf);
2202 2208
2203 DP(BNX2X_MSG_IOV, 2209 DP(BNX2X_MSG_IOV,
@@ -2206,10 +2212,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
2206 (unsigned)vf->bars[0].bar, vf->bars[0].size, 2212 (unsigned)vf->bars[0].bar, vf->bars[0].size,
2207 (unsigned)vf->bars[1].bar, vf->bars[1].size, 2213 (unsigned)vf->bars[1].bar, vf->bars[1].size,
2208 (unsigned)vf->bars[2].bar, vf->bars[2].size); 2214 (unsigned)vf->bars[2].bar, vf->bars[2].size);
2209
2210 /* set local queue arrays */
2211 vf->vfqs = &bp->vfdb->vfqs[qcount];
2212 qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
2213 } 2215 }
2214 2216
2215 return 0; 2217 return 0;
@@ -2515,6 +2517,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2515 for_each_vfq(vf, j) { 2517 for_each_vfq(vf, j) {
2516 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 2518 struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
2517 2519
2520 dma_addr_t q_stats_addr =
2521 vf->fw_stat_map + j * vf->stats_stride;
2522
2518 /* collect stats fro active queues only */ 2523 /* collect stats fro active queues only */
2519 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 2524 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
2520 BNX2X_Q_LOGICAL_STATE_STOPPED) 2525 BNX2X_Q_LOGICAL_STATE_STOPPED)
@@ -2522,13 +2527,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2522 2527
2523 /* create stats query entry for this queue */ 2528 /* create stats query entry for this queue */
2524 cur_query_entry->kind = STATS_TYPE_QUEUE; 2529 cur_query_entry->kind = STATS_TYPE_QUEUE;
2525 cur_query_entry->index = vfq_cl_id(vf, rxq); 2530 cur_query_entry->index = vfq_stat_id(vf, rxq);
2526 cur_query_entry->funcID = 2531 cur_query_entry->funcID =
2527 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 2532 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
2528 cur_query_entry->address.hi = 2533 cur_query_entry->address.hi =
2529 cpu_to_le32(U64_HI(vf->fw_stat_map)); 2534 cpu_to_le32(U64_HI(q_stats_addr));
2530 cur_query_entry->address.lo = 2535 cur_query_entry->address.lo =
2531 cpu_to_le32(U64_LO(vf->fw_stat_map)); 2536 cpu_to_le32(U64_LO(q_stats_addr));
2532 DP(BNX2X_MSG_IOV, 2537 DP(BNX2X_MSG_IOV,
2533 "added address %x %x for vf %d queue %d client %d\n", 2538 "added address %x %x for vf %d queue %d client %d\n",
2534 cur_query_entry->address.hi, 2539 cur_query_entry->address.hi,
@@ -2537,6 +2542,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2537 cur_query_entry++; 2542 cur_query_entry++;
2538 cur_data_offset += sizeof(struct per_queue_stats); 2543 cur_data_offset += sizeof(struct per_queue_stats);
2539 stats_count++; 2544 stats_count++;
2545
2546 /* all stats are coalesced to the leading queue */
2547 if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
2548 break;
2540 } 2549 }
2541 } 2550 }
2542 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 2551 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
@@ -2555,6 +2564,11 @@ void bnx2x_iov_sp_task(struct bnx2x *bp)
2555 for_each_vf(bp, i) { 2564 for_each_vf(bp, i) {
2556 struct bnx2x_virtf *vf = BP_VF(bp, i); 2565 struct bnx2x_virtf *vf = BP_VF(bp, i);
2557 2566
2567 if (!vf) {
2568 BNX2X_ERR("VF was null! skipping...\n");
2569 continue;
2570 }
2571
2558 if (!list_empty(&vf->op_list_head) && 2572 if (!list_empty(&vf->op_list_head) &&
2559 atomic_read(&vf->op_in_progress)) { 2573 atomic_read(&vf->op_in_progress)) {
2560 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); 2574 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
@@ -2702,7 +2716,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2702 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2716 struct bnx2x_vf_queue *q = vfq_get(vf, i);
2703 2717
2704 if (!q) { 2718 if (!q) {
2705 DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i); 2719 BNX2X_ERR("q number %d was not allocated\n", i);
2706 return -EINVAL; 2720 return -EINVAL;
2707 } 2721 }
2708 2722
@@ -2930,6 +2944,43 @@ op_done:
2930 bnx2x_vfop_end(bp, vf, vfop); 2944 bnx2x_vfop_end(bp, vf, vfop);
2931} 2945}
2932 2946
2947static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
2948{
2949 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2950 enum bnx2x_vfop_rss_state state;
2951
2952 if (!vfop) {
2953 BNX2X_ERR("vfop was null\n");
2954 return;
2955 }
2956
2957 state = vfop->state;
2958 bnx2x_vfop_reset_wq(vf);
2959
2960 if (vfop->rc < 0)
2961 goto op_err;
2962
2963 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
2964
2965 switch (state) {
2966 case BNX2X_VFOP_RSS_CONFIG:
2967 /* next state */
2968 vfop->state = BNX2X_VFOP_RSS_DONE;
2969 bnx2x_config_rss(bp, &vfop->op_p->rss);
2970 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
2971op_err:
2972 BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
2973op_done:
2974 case BNX2X_VFOP_RSS_DONE:
2975 bnx2x_vfop_end(bp, vf, vfop);
2976 return;
2977 default:
2978 bnx2x_vfop_default(state);
2979 }
2980op_pending:
2981 return;
2982}
2983
2933int bnx2x_vfop_release_cmd(struct bnx2x *bp, 2984int bnx2x_vfop_release_cmd(struct bnx2x *bp,
2934 struct bnx2x_virtf *vf, 2985 struct bnx2x_virtf *vf,
2935 struct bnx2x_vfop_cmd *cmd) 2986 struct bnx2x_vfop_cmd *cmd)
@@ -2944,6 +2995,21 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp,
2944 return -ENOMEM; 2995 return -ENOMEM;
2945} 2996}
2946 2997
2998int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
2999 struct bnx2x_virtf *vf,
3000 struct bnx2x_vfop_cmd *cmd)
3001{
3002 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
3003
3004 if (vfop) {
3005 bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
3006 cmd->done);
3007 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
3008 cmd->block);
3009 }
3010 return -ENOMEM;
3011}
3012
2947/* VF release ~ VF close + VF release-resources 3013/* VF release ~ VF close + VF release-resources
2948 * Release is the ultimate SW shutdown and is called whenever an 3014 * Release is the ultimate SW shutdown and is called whenever an
2949 * irrecoverable error is encountered. 3015 * irrecoverable error is encountered.
@@ -2955,6 +3021,8 @@ void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
2955 .block = block, 3021 .block = block,
2956 }; 3022 };
2957 int rc; 3023 int rc;
3024
3025 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
2958 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 3026 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2959 3027
2960 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); 3028 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
@@ -2983,6 +3051,12 @@ static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
2983void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3051void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2984 enum channel_tlvs tlv) 3052 enum channel_tlvs tlv)
2985{ 3053{
3054 /* we don't lock the channel for unsupported tlvs */
3055 if (!bnx2x_tlv_supported(tlv)) {
3056 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
3057 return;
3058 }
3059
2986 /* lock the channel */ 3060 /* lock the channel */
2987 mutex_lock(&vf->op_mutex); 3061 mutex_lock(&vf->op_mutex);
2988 3062
@@ -2997,19 +3071,32 @@ void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2997void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3071void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2998 enum channel_tlvs expected_tlv) 3072 enum channel_tlvs expected_tlv)
2999{ 3073{
3074 enum channel_tlvs current_tlv;
3075
3076 if (!vf) {
3077 BNX2X_ERR("VF was %p\n", vf);
3078 return;
3079 }
3080
3081 current_tlv = vf->op_current;
3082
3083 /* we don't unlock the channel for unsupported tlvs */
3084 if (!bnx2x_tlv_supported(expected_tlv))
3085 return;
3086
3000 WARN(expected_tlv != vf->op_current, 3087 WARN(expected_tlv != vf->op_current,
3001 "lock mismatch: expected %d found %d", expected_tlv, 3088 "lock mismatch: expected %d found %d", expected_tlv,
3002 vf->op_current); 3089 vf->op_current);
3003 3090
3091 /* record the locking op */
3092 vf->op_current = CHANNEL_TLV_NONE;
3093
3004 /* lock the channel */ 3094 /* lock the channel */
3005 mutex_unlock(&vf->op_mutex); 3095 mutex_unlock(&vf->op_mutex);
3006 3096
3007 /* log the unlock */ 3097 /* log the unlock */
3008 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 3098 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
3009 vf->abs_vfid, vf->op_current); 3099 vf->abs_vfid, vf->op_current);
3010
3011 /* record the locking op */
3012 vf->op_current = CHANNEL_TLV_NONE;
3013} 3100}
3014 3101
3015int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 3102int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
@@ -3040,11 +3127,77 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
3040 return bnx2x_enable_sriov(bp); 3127 return bnx2x_enable_sriov(bp);
3041 } 3128 }
3042} 3129}
3130#define IGU_ENTRY_SIZE 4
3043 3131
3044int bnx2x_enable_sriov(struct bnx2x *bp) 3132int bnx2x_enable_sriov(struct bnx2x *bp)
3045{ 3133{
3046 int rc = 0, req_vfs = bp->requested_nr_virtfn; 3134 int rc = 0, req_vfs = bp->requested_nr_virtfn;
3135 int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
3136 u32 igu_entry, address;
3137 u16 num_vf_queues;
3138
3139 if (req_vfs == 0)
3140 return 0;
3141
3142 first_vf = bp->vfdb->sriov.first_vf_in_pf;
3143
3144 /* statically distribute vf sb pool between VFs */
3145 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
3146 BP_VFDB(bp)->vf_sbs_pool / req_vfs);
3147
3148 /* zero previous values learned from igu cam */
3149 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
3150 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
3151
3152 vf->sb_count = 0;
3153 vf_sb_count(BP_VF(bp, vf_idx)) = 0;
3154 }
3155 bp->vfdb->vf_sbs_pool = 0;
3156
3157 /* prepare IGU cam */
3158 sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
3159 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
3160 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3161 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
3162 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
3163 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
3164 IGU_REG_MAPPING_MEMORY_VALID;
3165 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
3166 sb_idx, vf_idx);
3167 REG_WR(bp, address, igu_entry);
3168 sb_idx++;
3169 address += IGU_ENTRY_SIZE;
3170 }
3171 }
3172
3173 /* Reinitialize vf database according to igu cam */
3174 bnx2x_get_vf_igu_cam_info(bp);
3175
3176 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
3177 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
3178
3179 qcount = 0;
3180 for_each_vf(bp, vf_idx) {
3181 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
3182
3183 /* set local queue arrays */
3184 vf->vfqs = &bp->vfdb->vfqs[qcount];
3185 qcount += vf_sb_count(vf);
3186 }
3047 3187
3188 /* prepare msix vectors in VF configuration space */
3189 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3190 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3191 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3192 num_vf_queues);
3193 }
3194 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3195
3196 /* enable sriov. This will probe all the VFs, and consequentially cause
3197 * the "acquire" messages to appear on the VF PF channel.
3198 */
3199 DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
3200 pci_disable_sriov(bp->pdev);
3048 rc = pci_enable_sriov(bp->pdev, req_vfs); 3201 rc = pci_enable_sriov(bp->pdev, req_vfs);
3049 if (rc) { 3202 if (rc) {
3050 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 3203 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
@@ -3072,9 +3225,8 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
3072 pci_disable_sriov(bp->pdev); 3225 pci_disable_sriov(bp->pdev);
3073} 3226}
3074 3227
3075static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, 3228int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf,
3076 struct bnx2x_virtf **vf, 3229 struct pf_vf_bulletin_content **bulletin)
3077 struct pf_vf_bulletin_content **bulletin)
3078{ 3230{
3079 if (bp->state != BNX2X_STATE_OPEN) { 3231 if (bp->state != BNX2X_STATE_OPEN) {
3080 BNX2X_ERR("vf ndo called though PF is down\n"); 3232 BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3097,7 +3249,13 @@ static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
3097 *bulletin = BP_VF_BULLETIN(bp, vfidx); 3249 *bulletin = BP_VF_BULLETIN(bp, vfidx);
3098 3250
3099 if (!*vf) { 3251 if (!*vf) {
3100 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", 3252 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
3253 vfidx);
3254 return -EINVAL;
3255 }
3256
3257 if (!(*vf)->vfqs) {
3258 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
3101 vfidx); 3259 vfidx);
3102 return -EINVAL; 3260 return -EINVAL;
3103 } 3261 }
@@ -3125,8 +3283,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3125 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3283 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3126 if (rc) 3284 if (rc)
3127 return rc; 3285 return rc;
3128 mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3286 mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
3129 vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); 3287 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3130 if (!mac_obj || !vlan_obj) { 3288 if (!mac_obj || !vlan_obj) {
3131 BNX2X_ERR("VF partially initialized\n"); 3289 BNX2X_ERR("VF partially initialized\n");
3132 return -EINVAL; 3290 return -EINVAL;
@@ -3138,10 +3296,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3138 ivi->spoofchk = 1; /*always enabled */ 3296 ivi->spoofchk = 1; /*always enabled */
3139 if (vf->state == VF_ENABLED) { 3297 if (vf->state == VF_ENABLED) {
3140 /* mac and vlan are in vlan_mac objects */ 3298 /* mac and vlan are in vlan_mac objects */
3141 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 3299 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)))
3142 0, ETH_ALEN); 3300 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
3143 vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan, 3301 0, ETH_ALEN);
3144 0, VLAN_HLEN); 3302 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
3303 vlan_obj->get_n_elements(bp, vlan_obj, 1,
3304 (u8 *)&ivi->vlan, 0,
3305 VLAN_HLEN);
3145 } else { 3306 } else {
3146 /* mac */ 3307 /* mac */
3147 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 3308 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
@@ -3209,14 +3370,18 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3209 return rc; 3370 return rc;
3210 } 3371 }
3211 3372
3212 /* is vf initialized and queue set up? */
3213 q_logical_state = 3373 q_logical_state =
3214 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); 3374 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
3215 if (vf->state == VF_ENABLED && 3375 if (vf->state == VF_ENABLED &&
3216 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3376 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3217 /* configure the mac in device on this vf's queue */ 3377 /* configure the mac in device on this vf's queue */
3218 unsigned long ramrod_flags = 0; 3378 unsigned long ramrod_flags = 0;
3219 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3379 struct bnx2x_vlan_mac_obj *mac_obj =
3380 &bnx2x_leading_vfq(vf, mac_obj);
3381
3382 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
3383 if (rc)
3384 return rc;
3220 3385
3221 /* must lock vfpf channel to protect against vf flows */ 3386 /* must lock vfpf channel to protect against vf flows */
3222 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3387 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
@@ -3276,18 +3441,21 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3276 3441
3277 /* is vf initialized and queue set up? */ 3442 /* is vf initialized and queue set up? */
3278 q_logical_state = 3443 q_logical_state =
3279 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); 3444 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
3280 if (vf->state == VF_ENABLED && 3445 if (vf->state == VF_ENABLED &&
3281 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3446 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3282 /* configure the vlan in device on this vf's queue */ 3447 /* configure the vlan in device on this vf's queue */
3283 unsigned long ramrod_flags = 0; 3448 unsigned long ramrod_flags = 0;
3284 unsigned long vlan_mac_flags = 0; 3449 unsigned long vlan_mac_flags = 0;
3285 struct bnx2x_vlan_mac_obj *vlan_obj = 3450 struct bnx2x_vlan_mac_obj *vlan_obj =
3286 &bnx2x_vfq(vf, 0, vlan_obj); 3451 &bnx2x_leading_vfq(vf, vlan_obj);
3287 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 3452 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3288 struct bnx2x_queue_state_params q_params = {NULL}; 3453 struct bnx2x_queue_state_params q_params = {NULL};
3289 struct bnx2x_queue_update_params *update_params; 3454 struct bnx2x_queue_update_params *update_params;
3290 3455
3456 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
3457 if (rc)
3458 return rc;
3291 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3459 memset(&ramrod_param, 0, sizeof(ramrod_param));
3292 3460
3293 /* must lock vfpf channel to protect against vf flows */ 3461 /* must lock vfpf channel to protect against vf flows */
@@ -3307,7 +3475,7 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3307 */ 3475 */
3308 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3476 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3309 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3477 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3310 q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj); 3478 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
3311 update_params = &q_params.params.update; 3479 update_params = &q_params.params.update;
3312 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 3480 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3313 &update_params->update_flags); 3481 &update_params->update_flags);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index d143a7cdbbbe..2a8c1dc65d9c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -81,6 +81,7 @@ struct bnx2x_vf_queue {
81 u32 cid; 81 u32 cid;
82 u16 index; 82 u16 index;
83 u16 sb_idx; 83 u16 sb_idx;
84 bool is_leading;
84}; 85};
85 86
86/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: 87/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
@@ -194,6 +195,7 @@ struct bnx2x_virtf {
194#define VF_CFG_INT_SIMD 0x0008 195#define VF_CFG_INT_SIMD 0x0008
195#define VF_CACHE_LINE 0x0010 196#define VF_CACHE_LINE 0x0010
196#define VF_CFG_VLAN 0x0020 197#define VF_CFG_VLAN 0x0020
198#define VF_CFG_STATS_COALESCE 0x0040
197 199
198 u8 state; 200 u8 state;
199#define VF_FREE 0 /* VF ready to be acquired holds no resc */ 201#define VF_FREE 0 /* VF ready to be acquired holds no resc */
@@ -213,6 +215,7 @@ struct bnx2x_virtf {
213 215
214 /* dma */ 216 /* dma */
215 dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ 217 dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
218 u16 stats_stride;
216 dma_addr_t spq_map; 219 dma_addr_t spq_map;
217 dma_addr_t bulletin_map; 220 dma_addr_t bulletin_map;
218 221
@@ -239,7 +242,10 @@ struct bnx2x_virtf {
239 u8 igu_base_id; /* base igu status block id */ 242 u8 igu_base_id; /* base igu status block id */
240 243
241 struct bnx2x_vf_queue *vfqs; 244 struct bnx2x_vf_queue *vfqs;
242#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) 245#define LEADING_IDX 0
246#define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX)
247#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
248#define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var)
243 249
244 u8 index; /* index in the vf array */ 250 u8 index; /* index in the vf array */
245 u8 abs_vfid; 251 u8 abs_vfid;
@@ -358,6 +364,10 @@ struct bnx2x_vf_sp {
358 struct client_init_ramrod_data init_data; 364 struct client_init_ramrod_data init_data;
359 struct client_update_ramrod_data update_data; 365 struct client_update_ramrod_data update_data;
360 } q_data; 366 } q_data;
367
368 union {
369 struct eth_rss_update_ramrod_data e2;
370 } rss_rdata;
361}; 371};
362 372
363struct hw_dma { 373struct hw_dma {
@@ -403,6 +413,10 @@ struct bnx2x_vfdb {
403 413
404#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32) 414#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32)
405 u32 flrd_vfs[FLRD_VFS_DWORDS]; 415 u32 flrd_vfs[FLRD_VFS_DWORDS];
416
417 /* the number of msix vectors belonging to this PF designated for VFs */
418 u16 vf_sbs_pool;
419 u16 first_vf_igu_entry;
406}; 420};
407 421
408/* queue access */ 422/* queue access */
@@ -411,11 +425,6 @@ static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
411 return &(vf->vfqs[index]); 425 return &(vf->vfqs[index]);
412} 426}
413 427
414static inline bool vfq_is_leading(struct bnx2x_vf_queue *vfq)
415{
416 return (vfq->index == 0);
417}
418
419/* FW ids */ 428/* FW ids */
420static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx) 429static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
421{ 430{
@@ -434,7 +443,10 @@ static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
434 443
435static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) 444static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
436{ 445{
437 return vfq_cl_id(vf, q); 446 if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
447 return vf->leading_rss;
448 else
449 return vfq_cl_id(vf, q);
438} 450}
439 451
440static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) 452static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
@@ -691,6 +703,10 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp,
691 struct bnx2x_virtf *vf, 703 struct bnx2x_virtf *vf,
692 struct bnx2x_vfop_cmd *cmd); 704 struct bnx2x_vfop_cmd *cmd);
693 705
706int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
707 struct bnx2x_virtf *vf,
708 struct bnx2x_vfop_cmd *cmd);
709
694/* VF release ~ VF close + VF release-resources 710/* VF release ~ VF close + VF release-resources
695 * 711 *
696 * Release is the ultimate SW shutdown and is called whenever an 712 * Release is the ultimate SW shutdown and is called whenever an
@@ -730,9 +746,12 @@ int bnx2x_vfpf_release(struct bnx2x *bp);
730int bnx2x_vfpf_release(struct bnx2x *bp); 746int bnx2x_vfpf_release(struct bnx2x *bp);
731int bnx2x_vfpf_init(struct bnx2x *bp); 747int bnx2x_vfpf_init(struct bnx2x *bp);
732void bnx2x_vfpf_close_vf(struct bnx2x *bp); 748void bnx2x_vfpf_close_vf(struct bnx2x *bp);
733int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx); 749int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
750 bool is_leading);
734int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx); 751int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
735int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set); 752int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
753int bnx2x_vfpf_config_rss(struct bnx2x *bp,
754 struct bnx2x_config_rss_params *params);
736int bnx2x_vfpf_set_mcast(struct net_device *dev); 755int bnx2x_vfpf_set_mcast(struct net_device *dev);
737int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp); 756int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
738 757
@@ -758,7 +777,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp);
758void bnx2x_disable_sriov(struct bnx2x *bp); 777void bnx2x_disable_sriov(struct bnx2x *bp);
759static inline int bnx2x_vf_headroom(struct bnx2x *bp) 778static inline int bnx2x_vf_headroom(struct bnx2x *bp)
760{ 779{
761 return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF; 780 return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF;
762} 781}
763void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); 782void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
764int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); 783int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
@@ -793,7 +812,7 @@ static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
793static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } 812static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
794static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; } 813static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
795static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {} 814static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
796static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; } 815static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
797static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; } 816static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
798static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, 817static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
799 u8 vf_qid, bool set) {return 0; } 818 u8 vf_qid, bool set) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 2088063151d6..6cfb88732452 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -257,17 +257,23 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
257 257
258 /* humble our request */ 258 /* humble our request */
259 req->resc_request.num_txqs = 259 req->resc_request.num_txqs =
260 bp->acquire_resp.resc.num_txqs; 260 min(req->resc_request.num_txqs,
261 bp->acquire_resp.resc.num_txqs);
261 req->resc_request.num_rxqs = 262 req->resc_request.num_rxqs =
262 bp->acquire_resp.resc.num_rxqs; 263 min(req->resc_request.num_rxqs,
264 bp->acquire_resp.resc.num_rxqs);
263 req->resc_request.num_sbs = 265 req->resc_request.num_sbs =
264 bp->acquire_resp.resc.num_sbs; 266 min(req->resc_request.num_sbs,
267 bp->acquire_resp.resc.num_sbs);
265 req->resc_request.num_mac_filters = 268 req->resc_request.num_mac_filters =
266 bp->acquire_resp.resc.num_mac_filters; 269 min(req->resc_request.num_mac_filters,
270 bp->acquire_resp.resc.num_mac_filters);
267 req->resc_request.num_vlan_filters = 271 req->resc_request.num_vlan_filters =
268 bp->acquire_resp.resc.num_vlan_filters; 272 min(req->resc_request.num_vlan_filters,
273 bp->acquire_resp.resc.num_vlan_filters);
269 req->resc_request.num_mc_filters = 274 req->resc_request.num_mc_filters =
270 bp->acquire_resp.resc.num_mc_filters; 275 min(req->resc_request.num_mc_filters,
276 bp->acquire_resp.resc.num_mc_filters);
271 277
272 /* Clear response buffer */ 278 /* Clear response buffer */
273 memset(&bp->vf2pf_mbox->resp, 0, 279 memset(&bp->vf2pf_mbox->resp, 0,
@@ -293,7 +299,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
293 bp->common.flash_size = 0; 299 bp->common.flash_size = 0;
294 bp->flags |= 300 bp->flags |=
295 NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG; 301 NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
296 bp->igu_sb_cnt = 1; 302 bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
297 bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id; 303 bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
298 strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver, 304 strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
299 sizeof(bp->fw_ver)); 305 sizeof(bp->fw_ver));
@@ -373,6 +379,8 @@ int bnx2x_vfpf_init(struct bnx2x *bp)
373 req->stats_addr = bp->fw_stats_data_mapping + 379 req->stats_addr = bp->fw_stats_data_mapping +
374 offsetof(struct bnx2x_fw_stats_data, queue_stats); 380 offsetof(struct bnx2x_fw_stats_data, queue_stats);
375 381
382 req->stats_stride = sizeof(struct per_queue_stats);
383
376 /* add list termination tlv */ 384 /* add list termination tlv */
377 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, 385 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
378 sizeof(struct channel_list_end_tlv)); 386 sizeof(struct channel_list_end_tlv));
@@ -452,12 +460,60 @@ free_irq:
452 bnx2x_free_irq(bp); 460 bnx2x_free_irq(bp);
453} 461}
454 462
463static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
464 struct bnx2x_vf_queue *q)
465{
466 u8 cl_id = vfq_cl_id(vf, q);
467 u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
468
469 /* mac */
470 bnx2x_init_mac_obj(bp, &q->mac_obj,
471 cl_id, q->cid, func_id,
472 bnx2x_vf_sp(bp, vf, mac_rdata),
473 bnx2x_vf_sp_map(bp, vf, mac_rdata),
474 BNX2X_FILTER_MAC_PENDING,
475 &vf->filter_state,
476 BNX2X_OBJ_TYPE_RX_TX,
477 &bp->macs_pool);
478 /* vlan */
479 bnx2x_init_vlan_obj(bp, &q->vlan_obj,
480 cl_id, q->cid, func_id,
481 bnx2x_vf_sp(bp, vf, vlan_rdata),
482 bnx2x_vf_sp_map(bp, vf, vlan_rdata),
483 BNX2X_FILTER_VLAN_PENDING,
484 &vf->filter_state,
485 BNX2X_OBJ_TYPE_RX_TX,
486 &bp->vlans_pool);
487
488 /* mcast */
489 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
490 q->cid, func_id, func_id,
491 bnx2x_vf_sp(bp, vf, mcast_rdata),
492 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
493 BNX2X_FILTER_MCAST_PENDING,
494 &vf->filter_state,
495 BNX2X_OBJ_TYPE_RX_TX);
496
497 /* rss */
498 bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
499 func_id, func_id,
500 bnx2x_vf_sp(bp, vf, rss_rdata),
501 bnx2x_vf_sp_map(bp, vf, rss_rdata),
502 BNX2X_FILTER_RSS_CONF_PENDING,
503 &vf->filter_state,
504 BNX2X_OBJ_TYPE_RX_TX);
505
506 vf->leading_rss = cl_id;
507 q->is_leading = true;
508}
509
455/* ask the pf to open a queue for the vf */ 510/* ask the pf to open a queue for the vf */
456int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) 511int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
512 bool is_leading)
457{ 513{
458 struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q; 514 struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
459 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; 515 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
460 struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; 516 u8 fp_idx = fp->index;
461 u16 tpa_agg_size = 0, flags = 0; 517 u16 tpa_agg_size = 0, flags = 0;
462 int rc; 518 int rc;
463 519
@@ -473,6 +529,9 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
473 tpa_agg_size = TPA_AGG_SIZE; 529 tpa_agg_size = TPA_AGG_SIZE;
474 } 530 }
475 531
532 if (is_leading)
533 flags |= VFPF_QUEUE_FLG_LEADING_RSS;
534
476 /* calculate queue flags */ 535 /* calculate queue flags */
477 flags |= VFPF_QUEUE_FLG_STATS; 536 flags |= VFPF_QUEUE_FLG_STATS;
478 flags |= VFPF_QUEUE_FLG_CACHE_ALIGN; 537 flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
@@ -646,6 +705,71 @@ out:
646 return 0; 705 return 0;
647} 706}
648 707
708/* request pf to config rss table for vf queues*/
709int bnx2x_vfpf_config_rss(struct bnx2x *bp,
710 struct bnx2x_config_rss_params *params)
711{
712 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
713 struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
714 int rc = 0;
715
716 /* clear mailbox and prep first tlv */
717 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
718 sizeof(*req));
719
720 /* add list termination tlv */
721 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
722 sizeof(struct channel_list_end_tlv));
723
724 memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
725 memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
726 req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
727 req->rss_key_size = T_ETH_RSS_KEY;
728 req->rss_result_mask = params->rss_result_mask;
729
730 /* flags handled individually for backward/forward compatability */
731 if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
732 req->rss_flags |= VFPF_RSS_MODE_DISABLED;
733 if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
734 req->rss_flags |= VFPF_RSS_MODE_REGULAR;
735 if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
736 req->rss_flags |= VFPF_RSS_SET_SRCH;
737 if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
738 req->rss_flags |= VFPF_RSS_IPV4;
739 if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
740 req->rss_flags |= VFPF_RSS_IPV4_TCP;
741 if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
742 req->rss_flags |= VFPF_RSS_IPV4_UDP;
743 if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
744 req->rss_flags |= VFPF_RSS_IPV6;
745 if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
746 req->rss_flags |= VFPF_RSS_IPV6_TCP;
747 if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
748 req->rss_flags |= VFPF_RSS_IPV6_UDP;
749
750 DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
751
752 /* output tlvs list */
753 bnx2x_dp_tlv_list(bp, req);
754
755 /* send message to pf */
756 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
757 if (rc) {
758 BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
759 goto out;
760 }
761
762 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
763 BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n",
764 resp->hdr.status);
765 rc = -EINVAL;
766 }
767out:
768 bnx2x_vfpf_finalize(bp, &req->first_tlv);
769
770 return 0;
771}
772
649int bnx2x_vfpf_set_mcast(struct net_device *dev) 773int bnx2x_vfpf_set_mcast(struct net_device *dev)
650{ 774{
651 struct bnx2x *bp = netdev_priv(dev); 775 struct bnx2x *bp = netdev_priv(dev);
@@ -948,7 +1072,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
948 1072
949 /* fill in pfdev info */ 1073 /* fill in pfdev info */
950 resp->pfdev_info.chip_num = bp->common.chip_id; 1074 resp->pfdev_info.chip_num = bp->common.chip_id;
951 resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT); 1075 resp->pfdev_info.db_size = bp->db_size;
952 resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; 1076 resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
953 resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | 1077 resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
954 /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); 1078 /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
@@ -1054,8 +1178,13 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1054 /* record ghost addresses from vf message */ 1178 /* record ghost addresses from vf message */
1055 vf->spq_map = init->spq_addr; 1179 vf->spq_map = init->spq_addr;
1056 vf->fw_stat_map = init->stats_addr; 1180 vf->fw_stat_map = init->stats_addr;
1181 vf->stats_stride = init->stats_stride;
1057 vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); 1182 vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
1058 1183
1184 /* set VF multiqueue statistics collection mode */
1185 if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
1186 vf->cfg_flags |= VF_CFG_STATS_COALESCE;
1187
1059 /* response */ 1188 /* response */
1060 bnx2x_vf_mbx_resp(bp, vf); 1189 bnx2x_vf_mbx_resp(bp, vf);
1061} 1190}
@@ -1080,6 +1209,8 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
1080 __set_bit(BNX2X_Q_FLG_HC, sp_q_flags); 1209 __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
1081 if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) 1210 if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
1082 __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); 1211 __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
1212 if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
1213 __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
1083 1214
1084 /* outer vlan removal is set according to PF's multi function mode */ 1215 /* outer vlan removal is set according to PF's multi function mode */
1085 if (IS_MF_SD(bp)) 1216 if (IS_MF_SD(bp))
@@ -1113,6 +1244,9 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1113 struct bnx2x_queue_init_params *init_p; 1244 struct bnx2x_queue_init_params *init_p;
1114 struct bnx2x_queue_setup_params *setup_p; 1245 struct bnx2x_queue_setup_params *setup_p;
1115 1246
1247 if (bnx2x_vfq_is_leading(q))
1248 bnx2x_leading_vfq_init(bp, vf, q);
1249
1116 /* re-init the VF operation context */ 1250 /* re-init the VF operation context */
1117 memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); 1251 memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
1118 setup_p = &vf->op_params.qctor.prep_qsetup; 1252 setup_p = &vf->op_params.qctor.prep_qsetup;
@@ -1552,6 +1686,68 @@ static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1552 bnx2x_vf_mbx_resp(bp, vf); 1686 bnx2x_vf_mbx_resp(bp, vf);
1553} 1687}
1554 1688
1689static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
1690 struct bnx2x_vf_mbx *mbx)
1691{
1692 struct bnx2x_vfop_cmd cmd = {
1693 .done = bnx2x_vf_mbx_resp,
1694 .block = false,
1695 };
1696 struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
1697 struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
1698
1699 if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
1700 rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
1701 BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
1702 vf->index);
1703 vf->op_rc = -EINVAL;
1704 goto mbx_resp;
1705 }
1706
1707 /* set vfop params according to rss tlv */
1708 memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
1709 T_ETH_INDIRECTION_TABLE_SIZE);
1710 memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
1711 sizeof(rss_tlv->rss_key));
1712 vf_op_params->rss_obj = &vf->rss_conf_obj;
1713 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
1714
1715 /* flags handled individually for backward/forward compatability */
1716 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
1717 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
1718 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
1719 __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
1720 if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
1721 __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
1722 if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
1723 __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
1724 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
1725 __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
1726 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
1727 __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
1728 if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
1729 __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
1730 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
1731 __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
1732 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
1733 __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
1734
1735 if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
1736 rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
1737 (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
1738 rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
1739 BNX2X_ERR("about to hit a FW assert. aborting...\n");
1740 vf->op_rc = -EINVAL;
1741 goto mbx_resp;
1742 }
1743
1744 vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
1745
1746mbx_resp:
1747 if (vf->op_rc)
1748 bnx2x_vf_mbx_resp(bp, vf);
1749}
1750
1555/* dispatch request */ 1751/* dispatch request */
1556static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, 1752static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1557 struct bnx2x_vf_mbx *mbx) 1753 struct bnx2x_vf_mbx *mbx)
@@ -1588,6 +1784,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1588 case CHANNEL_TLV_RELEASE: 1784 case CHANNEL_TLV_RELEASE:
1589 bnx2x_vf_mbx_release_vf(bp, vf, mbx); 1785 bnx2x_vf_mbx_release_vf(bp, vf, mbx);
1590 break; 1786 break;
1787 case CHANNEL_TLV_UPDATE_RSS:
1788 bnx2x_vf_mbx_update_rss(bp, vf, mbx);
1789 break;
1591 } 1790 }
1592 1791
1593 } else { 1792 } else {
@@ -1607,7 +1806,7 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1607 /* test whether we can respond to the VF (do we have an address 1806 /* test whether we can respond to the VF (do we have an address
1608 * for it?) 1807 * for it?)
1609 */ 1808 */
1610 if (vf->state == VF_ACQUIRED) { 1809 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
1611 /* mbx_resp uses the op_rc of the VF */ 1810 /* mbx_resp uses the op_rc of the VF */
1612 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; 1811 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
1613 1812
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index f3ad174a3a63..1179fe06d0c7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -51,6 +51,7 @@ struct hw_sb_info {
51#define VFPF_QUEUE_FLG_COS 0x0080 51#define VFPF_QUEUE_FLG_COS 0x0080
52#define VFPF_QUEUE_FLG_HC 0x0100 52#define VFPF_QUEUE_FLG_HC 0x0100
53#define VFPF_QUEUE_FLG_DHC 0x0200 53#define VFPF_QUEUE_FLG_DHC 0x0200
54#define VFPF_QUEUE_FLG_LEADING_RSS 0x0400
54 55
55#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0) 56#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0)
56#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1) 57#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1)
@@ -131,6 +132,27 @@ struct vfpf_q_op_tlv {
131 u8 padding[3]; 132 u8 padding[3];
132}; 133};
133 134
135/* receive side scaling tlv */
136struct vfpf_rss_tlv {
137 struct vfpf_first_tlv first_tlv;
138 u32 rss_flags;
139#define VFPF_RSS_MODE_DISABLED (1 << 0)
140#define VFPF_RSS_MODE_REGULAR (1 << 1)
141#define VFPF_RSS_SET_SRCH (1 << 2)
142#define VFPF_RSS_IPV4 (1 << 3)
143#define VFPF_RSS_IPV4_TCP (1 << 4)
144#define VFPF_RSS_IPV4_UDP (1 << 5)
145#define VFPF_RSS_IPV6 (1 << 6)
146#define VFPF_RSS_IPV6_TCP (1 << 7)
147#define VFPF_RSS_IPV6_UDP (1 << 8)
148 u8 rss_result_mask;
149 u8 ind_table_size;
150 u8 rss_key_size;
151 u8 padding;
152 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
153 u32 rss_key[T_ETH_RSS_KEY]; /* hash values */
154};
155
134/* acquire response tlv - carries the allocated resources */ 156/* acquire response tlv - carries the allocated resources */
135struct pfvf_acquire_resp_tlv { 157struct pfvf_acquire_resp_tlv {
136 struct pfvf_tlv hdr; 158 struct pfvf_tlv hdr;
@@ -166,12 +188,20 @@ struct pfvf_acquire_resp_tlv {
166 } resc; 188 } resc;
167}; 189};
168 190
191#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
192 * stats will be coalesced on
193 * the leading RSS queue
194 */
195
169/* Init VF */ 196/* Init VF */
170struct vfpf_init_tlv { 197struct vfpf_init_tlv {
171 struct vfpf_first_tlv first_tlv; 198 struct vfpf_first_tlv first_tlv;
172 aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */ 199 aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */
173 aligned_u64 spq_addr; 200 aligned_u64 spq_addr;
174 aligned_u64 stats_addr; 201 aligned_u64 stats_addr;
202 u16 stats_stride;
203 u32 flags;
204 u32 padding[2];
175}; 205};
176 206
177/* Setup Queue */ 207/* Setup Queue */
@@ -293,13 +323,14 @@ union vfpf_tlvs {
293 struct vfpf_q_op_tlv q_op; 323 struct vfpf_q_op_tlv q_op;
294 struct vfpf_setup_q_tlv setup_q; 324 struct vfpf_setup_q_tlv setup_q;
295 struct vfpf_set_q_filters_tlv set_q_filters; 325 struct vfpf_set_q_filters_tlv set_q_filters;
296 struct vfpf_release_tlv release; 326 struct vfpf_release_tlv release;
297 struct channel_list_end_tlv list_end; 327 struct vfpf_rss_tlv update_rss;
328 struct channel_list_end_tlv list_end;
298 struct tlv_buffer_size tlv_buf_size; 329 struct tlv_buffer_size tlv_buf_size;
299}; 330};
300 331
301union pfvf_tlvs { 332union pfvf_tlvs {
302 struct pfvf_general_resp_tlv general_resp; 333 struct pfvf_general_resp_tlv general_resp;
303 struct pfvf_acquire_resp_tlv acquire_resp; 334 struct pfvf_acquire_resp_tlv acquire_resp;
304 struct channel_list_end_tlv list_end; 335 struct channel_list_end_tlv list_end;
305 struct tlv_buffer_size tlv_buf_size; 336 struct tlv_buffer_size tlv_buf_size;
@@ -355,14 +386,18 @@ enum channel_tlvs {
355 CHANNEL_TLV_INIT, 386 CHANNEL_TLV_INIT,
356 CHANNEL_TLV_SETUP_Q, 387 CHANNEL_TLV_SETUP_Q,
357 CHANNEL_TLV_SET_Q_FILTERS, 388 CHANNEL_TLV_SET_Q_FILTERS,
389 CHANNEL_TLV_ACTIVATE_Q,
390 CHANNEL_TLV_DEACTIVATE_Q,
358 CHANNEL_TLV_TEARDOWN_Q, 391 CHANNEL_TLV_TEARDOWN_Q,
359 CHANNEL_TLV_CLOSE, 392 CHANNEL_TLV_CLOSE,
360 CHANNEL_TLV_RELEASE, 393 CHANNEL_TLV_RELEASE,
394 CHANNEL_TLV_UPDATE_RSS_DEPRECATED,
361 CHANNEL_TLV_PF_RELEASE_VF, 395 CHANNEL_TLV_PF_RELEASE_VF,
362 CHANNEL_TLV_LIST_END, 396 CHANNEL_TLV_LIST_END,
363 CHANNEL_TLV_FLR, 397 CHANNEL_TLV_FLR,
364 CHANNEL_TLV_PF_SET_MAC, 398 CHANNEL_TLV_PF_SET_MAC,
365 CHANNEL_TLV_PF_SET_VLAN, 399 CHANNEL_TLV_PF_SET_VLAN,
400 CHANNEL_TLV_UPDATE_RSS,
366 CHANNEL_TLV_MAX 401 CHANNEL_TLV_MAX
367}; 402};
368 403
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index d78d4cf140ed..8142480d9770 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1,6 +1,6 @@
1/* cnic.c: Broadcom CNIC core network driver. 1/* cnic.c: Broadcom CNIC core network driver.
2 * 2 *
3 * Copyright (c) 2006-2012 Broadcom Corporation 3 * Copyright (c) 2006-2013 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -1184,6 +1184,7 @@ error:
1184static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) 1184static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1185{ 1185{
1186 struct cnic_local *cp = dev->cnic_priv; 1186 struct cnic_local *cp = dev->cnic_priv;
1187 struct bnx2x *bp = netdev_priv(dev->netdev);
1187 int ctx_blk_size = cp->ethdev->ctx_blk_size; 1188 int ctx_blk_size = cp->ethdev->ctx_blk_size;
1188 int total_mem, blks, i; 1189 int total_mem, blks, i;
1189 1190
@@ -1201,7 +1202,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1201 1202
1202 cp->ctx_blks = blks; 1203 cp->ctx_blks = blks;
1203 cp->ctx_blk_size = ctx_blk_size; 1204 cp->ctx_blk_size = ctx_blk_size;
1204 if (!BNX2X_CHIP_IS_57710(cp->chip_id)) 1205 if (!CHIP_IS_E1(bp))
1205 cp->ctx_align = 0; 1206 cp->ctx_align = 0;
1206 else 1207 else
1207 cp->ctx_align = ctx_blk_size; 1208 cp->ctx_align = ctx_blk_size;
@@ -1231,6 +1232,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1231static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) 1232static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1232{ 1233{
1233 struct cnic_local *cp = dev->cnic_priv; 1234 struct cnic_local *cp = dev->cnic_priv;
1235 struct bnx2x *bp = netdev_priv(dev->netdev);
1234 struct cnic_eth_dev *ethdev = cp->ethdev; 1236 struct cnic_eth_dev *ethdev = cp->ethdev;
1235 u32 start_cid = ethdev->starting_cid; 1237 u32 start_cid = ethdev->starting_cid;
1236 int i, j, n, ret, pages; 1238 int i, j, n, ret, pages;
@@ -1240,7 +1242,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1240 cp->iscsi_start_cid = start_cid; 1242 cp->iscsi_start_cid = start_cid;
1241 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; 1243 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1242 1244
1243 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 1245 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
1244 cp->max_cid_space += dev->max_fcoe_conn; 1246 cp->max_cid_space += dev->max_fcoe_conn;
1245 cp->fcoe_init_cid = ethdev->fcoe_init_cid; 1247 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1246 if (!cp->fcoe_init_cid) 1248 if (!cp->fcoe_init_cid)
@@ -1288,7 +1290,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1288 if (ret) 1290 if (ret)
1289 goto error; 1291 goto error;
1290 1292
1291 if (CNIC_SUPPORTS_FCOE(cp)) { 1293 if (CNIC_SUPPORTS_FCOE(bp)) {
1292 ret = cnic_alloc_kcq(dev, &cp->kcq2, true); 1294 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1293 if (ret) 1295 if (ret)
1294 goto error; 1296 goto error;
@@ -1382,6 +1384,7 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1382 u32 type, union l5cm_specific_data *l5_data) 1384 u32 type, union l5cm_specific_data *l5_data)
1383{ 1385{
1384 struct cnic_local *cp = dev->cnic_priv; 1386 struct cnic_local *cp = dev->cnic_priv;
1387 struct bnx2x *bp = netdev_priv(dev->netdev);
1385 struct l5cm_spe kwqe; 1388 struct l5cm_spe kwqe;
1386 struct kwqe_16 *kwq[1]; 1389 struct kwqe_16 *kwq[1];
1387 u16 type_16; 1390 u16 type_16;
@@ -1389,10 +1392,10 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1389 1392
1390 kwqe.hdr.conn_and_cmd_data = 1393 kwqe.hdr.conn_and_cmd_data =
1391 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | 1394 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1392 BNX2X_HW_CID(cp, cid))); 1395 BNX2X_HW_CID(bp, cid)));
1393 1396
1394 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 1397 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1395 type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & 1398 type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1396 SPE_HDR_FUNCTION_ID; 1399 SPE_HDR_FUNCTION_ID;
1397 1400
1398 kwqe.hdr.type = cpu_to_le16(type_16); 1401 kwqe.hdr.type = cpu_to_le16(type_16);
@@ -1427,13 +1430,34 @@ static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1427 rcu_read_unlock(); 1430 rcu_read_unlock();
1428} 1431}
1429 1432
1433static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1434 int en_tcp_dack)
1435{
1436 struct bnx2x *bp = netdev_priv(dev->netdev);
1437 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1438 u16 tstorm_flags = 0;
1439
1440 if (time_stamps) {
1441 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1442 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1443 }
1444 if (en_tcp_dack)
1445 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
1446
1447 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1448 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
1449
1450 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1451 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
1452}
1453
1430static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) 1454static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1431{ 1455{
1432 struct cnic_local *cp = dev->cnic_priv; 1456 struct cnic_local *cp = dev->cnic_priv;
1433 struct bnx2x *bp = netdev_priv(dev->netdev); 1457 struct bnx2x *bp = netdev_priv(dev->netdev);
1434 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; 1458 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1435 int hq_bds, pages; 1459 int hq_bds, pages;
1436 u32 pfid = cp->pfid; 1460 u32 pfid = bp->pfid;
1437 1461
1438 cp->num_iscsi_tasks = req1->num_tasks_per_conn; 1462 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1439 cp->num_ccells = req1->num_ccells_per_conn; 1463 cp->num_ccells = req1->num_ccells_per_conn;
@@ -1506,15 +1530,18 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1506 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), 1530 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1507 hq_bds); 1531 hq_bds);
1508 1532
1533 cnic_bnx2x_set_tcp_options(dev,
1534 req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
1535 req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
1536
1509 return 0; 1537 return 0;
1510} 1538}
1511 1539
1512static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) 1540static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1513{ 1541{
1514 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; 1542 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1515 struct cnic_local *cp = dev->cnic_priv;
1516 struct bnx2x *bp = netdev_priv(dev->netdev); 1543 struct bnx2x *bp = netdev_priv(dev->netdev);
1517 u32 pfid = cp->pfid; 1544 u32 pfid = bp->pfid;
1518 struct iscsi_kcqe kcqe; 1545 struct iscsi_kcqe kcqe;
1519 struct kcqe *cqes[1]; 1546 struct kcqe *cqes[1];
1520 1547
@@ -1653,6 +1680,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1653 u32 num) 1680 u32 num)
1654{ 1681{
1655 struct cnic_local *cp = dev->cnic_priv; 1682 struct cnic_local *cp = dev->cnic_priv;
1683 struct bnx2x *bp = netdev_priv(dev->netdev);
1656 struct iscsi_kwqe_conn_offload1 *req1 = 1684 struct iscsi_kwqe_conn_offload1 *req1 =
1657 (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1685 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1658 struct iscsi_kwqe_conn_offload2 *req2 = 1686 struct iscsi_kwqe_conn_offload2 *req2 =
@@ -1661,11 +1689,11 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1661 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; 1689 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1662 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1690 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1663 u32 cid = ctx->cid; 1691 u32 cid = ctx->cid;
1664 u32 hw_cid = BNX2X_HW_CID(cp, cid); 1692 u32 hw_cid = BNX2X_HW_CID(bp, cid);
1665 struct iscsi_context *ictx; 1693 struct iscsi_context *ictx;
1666 struct regpair context_addr; 1694 struct regpair context_addr;
1667 int i, j, n = 2, n_max; 1695 int i, j, n = 2, n_max;
1668 u8 port = CNIC_PORT(cp); 1696 u8 port = BP_PORT(bp);
1669 1697
1670 ctx->ctx_flags = 0; 1698 ctx->ctx_flags = 0;
1671 if (!req2->num_additional_wqes) 1699 if (!req2->num_additional_wqes)
@@ -1719,8 +1747,8 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1719 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; 1747 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1720 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type = 1748 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1721 ETH_P_8021Q; 1749 ETH_P_8021Q;
1722 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && 1750 if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
1723 cp->port_mode == CHIP_2_PORT_MODE) { 1751 bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
1724 1752
1725 port = 0; 1753 port = 0;
1726 } 1754 }
@@ -1841,6 +1869,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1841 struct iscsi_kwqe_conn_offload1 *req1; 1869 struct iscsi_kwqe_conn_offload1 *req1;
1842 struct iscsi_kwqe_conn_offload2 *req2; 1870 struct iscsi_kwqe_conn_offload2 *req2;
1843 struct cnic_local *cp = dev->cnic_priv; 1871 struct cnic_local *cp = dev->cnic_priv;
1872 struct bnx2x *bp = netdev_priv(dev->netdev);
1844 struct cnic_context *ctx; 1873 struct cnic_context *ctx;
1845 struct iscsi_kcqe kcqe; 1874 struct iscsi_kcqe kcqe;
1846 struct kcqe *cqes[1]; 1875 struct kcqe *cqes[1];
@@ -1894,7 +1923,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1894 } 1923 }
1895 1924
1896 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1925 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1897 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid); 1926 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
1898 1927
1899done: 1928done:
1900 cqes[0] = (struct kcqe *) &kcqe; 1929 cqes[0] = (struct kcqe *) &kcqe;
@@ -1930,6 +1959,7 @@ static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1930static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) 1959static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1931{ 1960{
1932 struct cnic_local *cp = dev->cnic_priv; 1961 struct cnic_local *cp = dev->cnic_priv;
1962 struct bnx2x *bp = netdev_priv(dev->netdev);
1933 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1963 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1934 union l5cm_specific_data l5_data; 1964 union l5cm_specific_data l5_data;
1935 int ret; 1965 int ret;
@@ -1938,7 +1968,7 @@ static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1938 init_waitqueue_head(&ctx->waitq); 1968 init_waitqueue_head(&ctx->waitq);
1939 ctx->wait_cond = 0; 1969 ctx->wait_cond = 0;
1940 memset(&l5_data, 0, sizeof(l5_data)); 1970 memset(&l5_data, 0, sizeof(l5_data));
1941 hw_cid = BNX2X_HW_CID(cp, ctx->cid); 1971 hw_cid = BNX2X_HW_CID(bp, ctx->cid);
1942 1972
1943 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, 1973 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1944 hw_cid, NONE_CONNECTION_TYPE, &l5_data); 1974 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
@@ -2035,9 +2065,6 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2035 xstorm_buf->pseudo_header_checksum = 2065 xstorm_buf->pseudo_header_checksum =
2036 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); 2066 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2037 2067
2038 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
2039 tstorm_buf->params |=
2040 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
2041 if (kwqe3->ka_timeout) { 2068 if (kwqe3->ka_timeout) {
2042 tstorm_buf->ka_enable = 1; 2069 tstorm_buf->ka_enable = 1;
2043 tstorm_buf->ka_timeout = kwqe3->ka_timeout; 2070 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
@@ -2049,9 +2076,8 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2049 2076
2050static void cnic_init_bnx2x_mac(struct cnic_dev *dev) 2077static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2051{ 2078{
2052 struct cnic_local *cp = dev->cnic_priv;
2053 struct bnx2x *bp = netdev_priv(dev->netdev); 2079 struct bnx2x *bp = netdev_priv(dev->netdev);
2054 u32 pfid = cp->pfid; 2080 u32 pfid = bp->pfid;
2055 u8 *mac = dev->mac_addr; 2081 u8 *mac = dev->mac_addr;
2056 2082
2057 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2083 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
@@ -2084,25 +2110,6 @@ static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2084 mac[0]); 2110 mac[0]);
2085} 2111}
2086 2112
2087static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
2088{
2089 struct cnic_local *cp = dev->cnic_priv;
2090 struct bnx2x *bp = netdev_priv(dev->netdev);
2091 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
2092 u16 tstorm_flags = 0;
2093
2094 if (tcp_ts) {
2095 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2096 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2097 }
2098
2099 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2100 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
2101
2102 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
2103 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
2104}
2105
2106static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], 2113static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2107 u32 num, int *work) 2114 u32 num, int *work)
2108{ 2115{
@@ -2176,10 +2183,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2176 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); 2183 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2177 2184
2178 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 2185 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2179 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id); 2186 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
2180
2181 cnic_bnx2x_set_tcp_timestamp(dev,
2182 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
2183 2187
2184 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, 2188 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2185 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2189 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
@@ -2248,11 +2252,12 @@ static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2248 struct fcoe_stat_ramrod_params *fcoe_stat; 2252 struct fcoe_stat_ramrod_params *fcoe_stat;
2249 union l5cm_specific_data l5_data; 2253 union l5cm_specific_data l5_data;
2250 struct cnic_local *cp = dev->cnic_priv; 2254 struct cnic_local *cp = dev->cnic_priv;
2255 struct bnx2x *bp = netdev_priv(dev->netdev);
2251 int ret; 2256 int ret;
2252 u32 cid; 2257 u32 cid;
2253 2258
2254 req = (struct fcoe_kwqe_stat *) kwqe; 2259 req = (struct fcoe_kwqe_stat *) kwqe;
2255 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2260 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2256 2261
2257 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); 2262 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2258 if (!fcoe_stat) 2263 if (!fcoe_stat)
@@ -2271,6 +2276,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2271{ 2276{
2272 int ret; 2277 int ret;
2273 struct cnic_local *cp = dev->cnic_priv; 2278 struct cnic_local *cp = dev->cnic_priv;
2279 struct bnx2x *bp = netdev_priv(dev->netdev);
2274 u32 cid; 2280 u32 cid;
2275 struct fcoe_init_ramrod_params *fcoe_init; 2281 struct fcoe_init_ramrod_params *fcoe_init;
2276 struct fcoe_kwqe_init1 *req1; 2282 struct fcoe_kwqe_init1 *req1;
@@ -2315,7 +2321,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2315 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS; 2321 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2316 cp->kcq2.sw_prod_idx = 0; 2322 cp->kcq2.sw_prod_idx = 0;
2317 2323
2318 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2324 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2319 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid, 2325 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2320 FCOE_CONNECTION_TYPE, &l5_data); 2326 FCOE_CONNECTION_TYPE, &l5_data);
2321 *work = 3; 2327 *work = 3;
@@ -2328,6 +2334,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2328 int ret = 0; 2334 int ret = 0;
2329 u32 cid = -1, l5_cid; 2335 u32 cid = -1, l5_cid;
2330 struct cnic_local *cp = dev->cnic_priv; 2336 struct cnic_local *cp = dev->cnic_priv;
2337 struct bnx2x *bp = netdev_priv(dev->netdev);
2331 struct fcoe_kwqe_conn_offload1 *req1; 2338 struct fcoe_kwqe_conn_offload1 *req1;
2332 struct fcoe_kwqe_conn_offload2 *req2; 2339 struct fcoe_kwqe_conn_offload2 *req2;
2333 struct fcoe_kwqe_conn_offload3 *req3; 2340 struct fcoe_kwqe_conn_offload3 *req3;
@@ -2370,7 +2377,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2370 2377
2371 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr); 2378 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2372 if (fctx) { 2379 if (fctx) {
2373 u32 hw_cid = BNX2X_HW_CID(cp, cid); 2380 u32 hw_cid = BNX2X_HW_CID(bp, cid);
2374 u32 val; 2381 u32 val;
2375 2382
2376 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, 2383 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
@@ -2394,7 +2401,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2394 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3)); 2401 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2395 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4)); 2402 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2396 2403
2397 cid = BNX2X_HW_CID(cp, cid); 2404 cid = BNX2X_HW_CID(bp, cid);
2398 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid, 2405 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2399 FCOE_CONNECTION_TYPE, &l5_data); 2406 FCOE_CONNECTION_TYPE, &l5_data);
2400 if (!ret) 2407 if (!ret)
@@ -2552,13 +2559,14 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2552 struct fcoe_kwqe_destroy *req; 2559 struct fcoe_kwqe_destroy *req;
2553 union l5cm_specific_data l5_data; 2560 union l5cm_specific_data l5_data;
2554 struct cnic_local *cp = dev->cnic_priv; 2561 struct cnic_local *cp = dev->cnic_priv;
2562 struct bnx2x *bp = netdev_priv(dev->netdev);
2555 int ret; 2563 int ret;
2556 u32 cid; 2564 u32 cid;
2557 2565
2558 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ); 2566 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2559 2567
2560 req = (struct fcoe_kwqe_destroy *) kwqe; 2568 req = (struct fcoe_kwqe_destroy *) kwqe;
2561 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2569 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2562 2570
2563 memset(&l5_data, 0, sizeof(l5_data)); 2571 memset(&l5_data, 0, sizeof(l5_data));
2564 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid, 2572 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
@@ -2715,7 +2723,7 @@ static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2715static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, 2723static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2716 struct kwqe *wqes[], u32 num_wqes) 2724 struct kwqe *wqes[], u32 num_wqes)
2717{ 2725{
2718 struct cnic_local *cp = dev->cnic_priv; 2726 struct bnx2x *bp = netdev_priv(dev->netdev);
2719 int i, work, ret; 2727 int i, work, ret;
2720 u32 opcode; 2728 u32 opcode;
2721 struct kwqe *kwqe; 2729 struct kwqe *kwqe;
@@ -2723,7 +2731,7 @@ static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2723 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2731 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2724 return -EAGAIN; /* bnx2 is down */ 2732 return -EAGAIN; /* bnx2 is down */
2725 2733
2726 if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) 2734 if (!BNX2X_CHIP_IS_E2_PLUS(bp))
2727 return -EINVAL; 2735 return -EINVAL;
2728 2736
2729 for (i = 0; i < num_wqes; ) { 2737 for (i = 0; i < num_wqes; ) {
@@ -3039,8 +3047,8 @@ static irqreturn_t cnic_irq(int irq, void *dev_instance)
3039static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, 3047static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3040 u16 index, u8 op, u8 update) 3048 u16 index, u8 op, u8 update)
3041{ 3049{
3042 struct cnic_local *cp = dev->cnic_priv; 3050 struct bnx2x *bp = netdev_priv(dev->netdev);
3043 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 + 3051 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
3044 COMMAND_REG_INT_ACK); 3052 COMMAND_REG_INT_ACK);
3045 struct igu_ack_register igu_ack; 3053 struct igu_ack_register igu_ack;
3046 3054
@@ -3603,6 +3611,7 @@ static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3603 csk1->rcv_buf = DEF_RCV_BUF; 3611 csk1->rcv_buf = DEF_RCV_BUF;
3604 csk1->snd_buf = DEF_SND_BUF; 3612 csk1->snd_buf = DEF_SND_BUF;
3605 csk1->seed = DEF_SEED; 3613 csk1->seed = DEF_SEED;
3614 csk1->tcp_flags = 0;
3606 3615
3607 *csk = csk1; 3616 *csk = csk1;
3608 return 0; 3617 return 0;
@@ -4020,15 +4029,18 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
4020 cnic_cm_upcall(cp, csk, opcode); 4029 cnic_cm_upcall(cp, csk, opcode);
4021 break; 4030 break;
4022 4031
4023 case L5CM_RAMROD_CMD_ID_CLOSE: 4032 case L5CM_RAMROD_CMD_ID_CLOSE: {
4024 if (l4kcqe->status != 0) { 4033 struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
4025 netdev_warn(dev->netdev, "RAMROD CLOSE compl with " 4034
4026 "status 0x%x\n", l4kcqe->status); 4035 if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) {
4036 netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
4037 l4kcqe->status, l5kcqe->completion_status);
4027 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 4038 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
4028 /* Fall through */ 4039 /* Fall through */
4029 } else { 4040 } else {
4030 break; 4041 break;
4031 } 4042 }
4043 }
4032 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 4044 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4033 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 4045 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4034 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 4046 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
@@ -4213,13 +4225,12 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4213 4225
4214static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) 4226static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4215{ 4227{
4216 struct cnic_local *cp = dev->cnic_priv;
4217 struct bnx2x *bp = netdev_priv(dev->netdev); 4228 struct bnx2x *bp = netdev_priv(dev->netdev);
4218 u32 pfid = cp->pfid; 4229 u32 pfid = bp->pfid;
4219 u32 port = CNIC_PORT(cp); 4230 u32 port = BP_PORT(bp);
4220 4231
4221 cnic_init_bnx2x_mac(dev); 4232 cnic_init_bnx2x_mac(dev);
4222 cnic_bnx2x_set_tcp_timestamp(dev, 1); 4233 cnic_bnx2x_set_tcp_options(dev, 0, 1);
4223 4234
4224 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 4235 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4225 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0); 4236 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
@@ -4897,6 +4908,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4897 struct client_init_ramrod_data *data) 4908 struct client_init_ramrod_data *data)
4898{ 4909{
4899 struct cnic_local *cp = dev->cnic_priv; 4910 struct cnic_local *cp = dev->cnic_priv;
4911 struct bnx2x *bp = netdev_priv(dev->netdev);
4900 struct cnic_uio_dev *udev = cp->udev; 4912 struct cnic_uio_dev *udev = cp->udev;
4901 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring; 4913 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4902 dma_addr_t buf_map, ring_map = udev->l2_ring_map; 4914 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
@@ -4925,7 +4937,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4925 start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS; 4937 start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
4926 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 4938 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4927 4939
4928 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) 4940 if (BNX2X_CHIP_IS_E2_PLUS(bp))
4929 pbd_e2->parsing_data = (UNICAST_ADDRESS << 4941 pbd_e2->parsing_data = (UNICAST_ADDRESS <<
4930 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT); 4942 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
4931 else 4943 else
@@ -4962,6 +4974,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4962 struct client_init_ramrod_data *data) 4974 struct client_init_ramrod_data *data)
4963{ 4975{
4964 struct cnic_local *cp = dev->cnic_priv; 4976 struct cnic_local *cp = dev->cnic_priv;
4977 struct bnx2x *bp = netdev_priv(dev->netdev);
4965 struct cnic_uio_dev *udev = cp->udev; 4978 struct cnic_uio_dev *udev = cp->udev;
4966 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + 4979 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4967 BNX2_PAGE_SIZE); 4980 BNX2_PAGE_SIZE);
@@ -4970,7 +4983,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4970 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4983 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4971 int i; 4984 int i;
4972 u32 cli = cp->ethdev->iscsi_l2_client_id; 4985 u32 cli = cp->ethdev->iscsi_l2_client_id;
4973 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); 4986 int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
4974 u32 val; 4987 u32 val;
4975 dma_addr_t ring_map = udev->l2_ring_map; 4988 dma_addr_t ring_map = udev->l2_ring_map;
4976 4989
@@ -4979,7 +4992,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4979 data->general.activate_flg = 1; 4992 data->general.activate_flg = 1;
4980 data->general.sp_client_id = cli; 4993 data->general.sp_client_id = cli;
4981 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14); 4994 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4982 data->general.func_id = cp->pfid; 4995 data->general.func_id = bp->pfid;
4983 4996
4984 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { 4997 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4985 dma_addr_t buf_map; 4998 dma_addr_t buf_map;
@@ -5029,13 +5042,13 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5029{ 5042{
5030 struct cnic_local *cp = dev->cnic_priv; 5043 struct cnic_local *cp = dev->cnic_priv;
5031 struct bnx2x *bp = netdev_priv(dev->netdev); 5044 struct bnx2x *bp = netdev_priv(dev->netdev);
5032 u32 pfid = cp->pfid; 5045 u32 pfid = bp->pfid;
5033 5046
5034 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + 5047 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
5035 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); 5048 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
5036 cp->kcq1.sw_prod_idx = 0; 5049 cp->kcq1.sw_prod_idx = 0;
5037 5050
5038 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 5051 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5039 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; 5052 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5040 5053
5041 cp->kcq1.hw_prod_idx_ptr = 5054 cp->kcq1.hw_prod_idx_ptr =
@@ -5051,7 +5064,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5051 &sb->sb.running_index[SM_RX_ID]; 5064 &sb->sb.running_index[SM_RX_ID];
5052 } 5065 }
5053 5066
5054 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 5067 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5055 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; 5068 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5056 5069
5057 cp->kcq2.io_addr = BAR_USTRORM_INTMEM + 5070 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
@@ -5073,12 +5086,10 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5073 u32 pfid; 5086 u32 pfid;
5074 5087
5075 dev->stats_addr = ethdev->addr_drv_info_to_mcp; 5088 dev->stats_addr = ethdev->addr_drv_info_to_mcp;
5076 cp->port_mode = bp->common.chip_port_mode;
5077 cp->pfid = bp->pfid;
5078 cp->func = bp->pf_num; 5089 cp->func = bp->pf_num;
5079 5090
5080 func = CNIC_FUNC(cp); 5091 func = CNIC_FUNC(cp);
5081 pfid = cp->pfid; 5092 pfid = bp->pfid;
5082 5093
5083 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, 5094 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
5084 cp->iscsi_start_cid, 0); 5095 cp->iscsi_start_cid, 0);
@@ -5086,7 +5097,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5086 if (ret) 5097 if (ret)
5087 return -ENOMEM; 5098 return -ENOMEM;
5088 5099
5089 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 5100 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5090 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn, 5101 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
5091 cp->fcoe_start_cid, 0); 5102 cp->fcoe_start_cid, 0);
5092 5103
@@ -5168,12 +5179,12 @@ static void cnic_init_rings(struct cnic_dev *dev)
5168 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; 5179 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5169 barrier(); 5180 barrier();
5170 5181
5171 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); 5182 cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
5172 5183
5173 off = BAR_USTRORM_INTMEM + 5184 off = BAR_USTRORM_INTMEM +
5174 (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? 5185 (BNX2X_CHIP_IS_E2_PLUS(bp) ?
5175 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : 5186 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5176 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli)); 5187 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
5177 5188
5178 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) 5189 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
5179 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); 5190 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
@@ -5271,6 +5282,13 @@ static int cnic_register_netdev(struct cnic_dev *dev)
5271 if (err) 5282 if (err)
5272 netdev_err(dev->netdev, "register_cnic failed\n"); 5283 netdev_err(dev->netdev, "register_cnic failed\n");
5273 5284
5285 /* Read iSCSI config again. On some bnx2x device, iSCSI config
5286 * can change after firmware is downloaded.
5287 */
5288 dev->max_iscsi_conn = ethdev->max_iscsi_conn;
5289 if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
5290 dev->max_iscsi_conn = 0;
5291
5274 return err; 5292 return err;
5275} 5293}
5276 5294
@@ -5353,7 +5371,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5353 5371
5354 cnic_free_irq(dev); 5372 cnic_free_irq(dev);
5355 5373
5356 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 5374 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5357 idx_off = offsetof(struct hc_status_block_e2, index_values) + 5375 idx_off = offsetof(struct hc_status_block_e2, index_values) +
5358 (hc_index * sizeof(u16)); 5376 (hc_index * sizeof(u16));
5359 5377
@@ -5370,7 +5388,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5370 5388
5371 *cp->kcq1.hw_prod_idx_ptr = 0; 5389 *cp->kcq1.hw_prod_idx_ptr = 0;
5372 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5390 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5373 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0); 5391 CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
5374 CNIC_WR16(dev, cp->kcq1.io_addr, 0); 5392 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5375 cnic_free_resc(dev); 5393 cnic_free_resc(dev);
5376} 5394}
@@ -5544,7 +5562,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5544 5562
5545 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) 5563 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5546 cdev->max_iscsi_conn = ethdev->max_iscsi_conn; 5564 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5547 if (CNIC_SUPPORTS_FCOE(cp)) { 5565 if (CNIC_SUPPORTS_FCOE(bp)) {
5548 cdev->max_fcoe_conn = ethdev->max_fcoe_conn; 5566 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5549 cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges; 5567 cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
5550 } 5568 }
@@ -5564,7 +5582,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5564 cp->stop_cm = cnic_cm_stop_bnx2x_hw; 5582 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5565 cp->enable_int = cnic_enable_bnx2x_int; 5583 cp->enable_int = cnic_enable_bnx2x_int;
5566 cp->disable_int_sync = cnic_disable_bnx2x_int_sync; 5584 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5567 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 5585 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5568 cp->ack_int = cnic_ack_bnx2x_e2_msix; 5586 cp->ack_int = cnic_ack_bnx2x_e2_msix;
5569 cp->arm_int = cnic_arm_bnx2x_e2_msix; 5587 cp->arm_int = cnic_arm_bnx2x_e2_msix;
5570 } else { 5588 } else {
@@ -5628,7 +5646,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5628 5646
5629 dev = cnic_from_netdev(netdev); 5647 dev = cnic_from_netdev(netdev);
5630 5648
5631 if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) { 5649 if (!dev && event == NETDEV_REGISTER) {
5632 /* Check for the hot-plug device */ 5650 /* Check for the hot-plug device */
5633 dev = is_cnic_dev(netdev); 5651 dev = is_cnic_dev(netdev);
5634 if (dev) { 5652 if (dev) {
@@ -5644,7 +5662,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5644 else if (event == NETDEV_UNREGISTER) 5662 else if (event == NETDEV_UNREGISTER)
5645 cnic_ulp_exit(dev); 5663 cnic_ulp_exit(dev);
5646 5664
5647 if (event == NETDEV_UP || (new_dev && netif_running(netdev))) { 5665 if (event == NETDEV_UP) {
5648 if (cnic_register_netdev(dev) != 0) { 5666 if (cnic_register_netdev(dev) != 0) {
5649 cnic_put(dev); 5667 cnic_put(dev);
5650 goto done; 5668 goto done;
@@ -5693,21 +5711,8 @@ static struct notifier_block cnic_netdev_notifier = {
5693 5711
5694static void cnic_release(void) 5712static void cnic_release(void)
5695{ 5713{
5696 struct cnic_dev *dev;
5697 struct cnic_uio_dev *udev; 5714 struct cnic_uio_dev *udev;
5698 5715
5699 while (!list_empty(&cnic_dev_list)) {
5700 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
5701 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5702 cnic_ulp_stop(dev);
5703 cnic_stop_hw(dev);
5704 }
5705
5706 cnic_ulp_exit(dev);
5707 cnic_unregister_netdev(dev);
5708 list_del_init(&dev->list);
5709 cnic_free_dev(dev);
5710 }
5711 while (!list_empty(&cnic_udev_list)) { 5716 while (!list_empty(&cnic_udev_list)) {
5712 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev, 5717 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5713 list); 5718 list);
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 62c670619ae6..0121a5d55192 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -1,6 +1,6 @@
1/* cnic.h: Broadcom CNIC core network driver. 1/* cnic.h: Broadcom CNIC core network driver.
2 * 2 *
3 * Copyright (c) 2006-2011 Broadcom Corporation 3 * Copyright (c) 2006-2013 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -303,8 +303,6 @@ struct cnic_local {
303 303
304 u32 chip_id; 304 u32 chip_id;
305 int func; 305 int func;
306 u32 pfid;
307 u8 port_mode;
308 306
309 u32 shmem_base; 307 u32 shmem_base;
310 308
@@ -364,47 +362,7 @@ struct bnx2x_bd_chain_next {
364 362
365#define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ 363#define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ
366 364
367#define BNX2X_CHIP_NUM_57710 0x164e 365#define BNX2X_CHIP_IS_E2_PLUS(bp) (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
368#define BNX2X_CHIP_NUM_57711 0x164f
369#define BNX2X_CHIP_NUM_57711E 0x1650
370#define BNX2X_CHIP_NUM_57712 0x1662
371#define BNX2X_CHIP_NUM_57712E 0x1663
372#define BNX2X_CHIP_NUM_57713 0x1651
373#define BNX2X_CHIP_NUM_57713E 0x1652
374#define BNX2X_CHIP_NUM_57800 0x168a
375#define BNX2X_CHIP_NUM_57810 0x168e
376#define BNX2X_CHIP_NUM_57840 0x168d
377
378#define BNX2X_CHIP_NUM(x) (x >> 16)
379#define BNX2X_CHIP_IS_57710(x) \
380 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57710)
381#define BNX2X_CHIP_IS_57711(x) \
382 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711)
383#define BNX2X_CHIP_IS_57711E(x) \
384 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E)
385#define BNX2X_CHIP_IS_E1H(x) \
386 (BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x))
387#define BNX2X_CHIP_IS_57712(x) \
388 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712)
389#define BNX2X_CHIP_IS_57712E(x) \
390 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712E)
391#define BNX2X_CHIP_IS_57713(x) \
392 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713)
393#define BNX2X_CHIP_IS_57713E(x) \
394 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713E)
395#define BNX2X_CHIP_IS_57800(x) \
396 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57800)
397#define BNX2X_CHIP_IS_57810(x) \
398 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57810)
399#define BNX2X_CHIP_IS_57840(x) \
400 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57840)
401#define BNX2X_CHIP_IS_E2(x) \
402 (BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \
403 BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x))
404#define BNX2X_CHIP_IS_E3(x) \
405 (BNX2X_CHIP_IS_57800(x) || BNX2X_CHIP_IS_57810(x) || \
406 BNX2X_CHIP_IS_57840(x))
407#define BNX2X_CHIP_IS_E2_PLUS(x) (BNX2X_CHIP_IS_E2(x) || BNX2X_CHIP_IS_E3(x))
408 366
409#define BNX2X_RX_DESC_CNT (BNX2_PAGE_SIZE / \ 367#define BNX2X_RX_DESC_CNT (BNX2_PAGE_SIZE / \
410 sizeof(struct eth_rx_bd)) 368 sizeof(struct eth_rx_bd))
@@ -439,31 +397,26 @@ struct bnx2x_bd_chain_next {
439#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H 397#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
440#endif 398#endif
441 399
442#define CNIC_PORT(cp) ((cp)->pfid & 1)
443#define CNIC_FUNC(cp) ((cp)->func) 400#define CNIC_FUNC(cp) ((cp)->func)
444#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \
445 0 : (CNIC_FUNC(cp) & 1))
446#define CNIC_E1HVN(cp) ((cp)->pfid >> 1)
447 401
448#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \ 402#define BNX2X_HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
449 (CNIC_E1HVN(cp) << 17) | (x)) 403 (BP_VN(bp) << 17) | (x))
450 404
451#define BNX2X_SW_CID(x) (x & 0x1ffff) 405#define BNX2X_SW_CID(x) (x & 0x1ffff)
452 406
453#define BNX2X_CL_QZONE_ID(cp, cli) \ 407#define BNX2X_CL_QZONE_ID(bp, cli) \
454 (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli : \ 408 (BNX2X_CHIP_IS_E2_PLUS(bp) ? cli : \
455 cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H)) 409 cli + (BP_PORT(bp) * ETH_MAX_RX_CLIENTS_E1H))
456 410
457#ifndef MAX_STAT_COUNTER_ID 411#ifndef MAX_STAT_COUNTER_ID
458#define MAX_STAT_COUNTER_ID \ 412#define MAX_STAT_COUNTER_ID \
459 (BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H : \ 413 (CHIP_IS_E1H(bp) ? MAX_STAT_COUNTER_ID_E1H : \
460 ((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\ 414 ((BNX2X_CHIP_IS_E2_PLUS(bp)) ? MAX_STAT_COUNTER_ID_E2 : \
461 MAX_STAT_COUNTER_ID_E1)) 415 MAX_STAT_COUNTER_ID_E1))
462#endif 416#endif
463 417
464#define CNIC_SUPPORTS_FCOE(cp) \ 418#define CNIC_SUPPORTS_FCOE(cp) \
465 (BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id) && \ 419 (BNX2X_CHIP_IS_E2_PLUS(bp) && !NO_FCOE(bp))
466 !((cp)->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
467 420
468#define CNIC_RAMROD_TMO (HZ / 4) 421#define CNIC_RAMROD_TMO (HZ / 4)
469 422
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index ede3db35d757..95a8e4b11c9f 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -1,7 +1,7 @@
1 1
2/* cnic.c: Broadcom CNIC core network driver. 2/* cnic.c: Broadcom CNIC core network driver.
3 * 3 *
4 * Copyright (c) 2006-2012 Broadcom Corporation 4 * Copyright (c) 2006-2013 Broadcom Corporation
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -5400,8 +5400,8 @@ struct tstorm_l5cm_tcp_flags {
5400 u16 flags; 5400 u16 flags;
5401#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0) 5401#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0)
5402#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0 5402#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0
5403#define TSTORM_L5CM_TCP_FLAGS_RSRV0 (0x1<<12) 5403#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN (0x1<<12)
5404#define TSTORM_L5CM_TCP_FLAGS_RSRV0_SHIFT 12 5404#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_SHIFT 12
5405#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13) 5405#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13)
5406#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13 5406#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13
5407#define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14) 5407#define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14)
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index ec9bb9ad4bb3..0658b43e148c 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -1,6 +1,6 @@
1/* cnic_if.h: Broadcom CNIC core network driver. 1/* cnic_if.h: Broadcom CNIC core network driver.
2 * 2 *
3 * Copyright (c) 2006-2012 Broadcom Corporation 3 * Copyright (c) 2006-2013 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -14,8 +14,8 @@
14 14
15#include "bnx2x/bnx2x_mfw_req.h" 15#include "bnx2x/bnx2x_mfw_req.h"
16 16
17#define CNIC_MODULE_VERSION "2.5.16" 17#define CNIC_MODULE_VERSION "2.5.18"
18#define CNIC_MODULE_RELDATE "Dec 05, 2012" 18#define CNIC_MODULE_RELDATE "Sept 01, 2013"
19 19
20#define CNIC_ULP_RDMA 0 20#define CNIC_ULP_RDMA 0
21#define CNIC_ULP_ISCSI 1 21#define CNIC_ULP_ISCSI 1
@@ -238,8 +238,8 @@ struct cnic_sock {
238 u16 src_port; 238 u16 src_port;
239 u16 dst_port; 239 u16 dst_port;
240 u16 vlan_id; 240 u16 vlan_id;
241 unsigned char old_ha[6]; 241 unsigned char old_ha[ETH_ALEN];
242 unsigned char ha[6]; 242 unsigned char ha[ETH_ALEN];
243 u32 mtu; 243 u32 mtu;
244 u32 cid; 244 u32 cid;
245 u32 l5_cid; 245 u32 l5_cid;
@@ -308,7 +308,7 @@ struct cnic_dev {
308#define CNIC_F_BNX2_CLASS 3 308#define CNIC_F_BNX2_CLASS 3
309#define CNIC_F_BNX2X_CLASS 4 309#define CNIC_F_BNX2X_CLASS 4
310 atomic_t ref_count; 310 atomic_t ref_count;
311 u8 mac_addr[6]; 311 u8 mac_addr[ETH_ALEN];
312 312
313 int max_iscsi_conn; 313 int max_iscsi_conn;
314 int max_fcoe_conn; 314 int max_fcoe_conn;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 0da2214ef1b9..5701f3d1a169 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
94 94
95#define DRV_MODULE_NAME "tg3" 95#define DRV_MODULE_NAME "tg3"
96#define TG3_MAJ_NUM 3 96#define TG3_MAJ_NUM 3
97#define TG3_MIN_NUM 132 97#define TG3_MIN_NUM 133
98#define DRV_MODULE_VERSION \ 98#define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100#define DRV_MODULE_RELDATE "May 21, 2013" 100#define DRV_MODULE_RELDATE "Jul 29, 2013"
101 101
102#define RESET_KIND_SHUTDOWN 0 102#define RESET_KIND_SHUTDOWN 0
103#define RESET_KIND_INIT 1 103#define RESET_KIND_INIT 1
@@ -3030,6 +3030,19 @@ static bool tg3_phy_power_bug(struct tg3 *tp)
3030 return false; 3030 return false;
3031} 3031}
3032 3032
3033static bool tg3_phy_led_bug(struct tg3 *tp)
3034{
3035 switch (tg3_asic_rev(tp)) {
3036 case ASIC_REV_5719:
3037 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3038 !tp->pci_fn)
3039 return true;
3040 return false;
3041 }
3042
3043 return false;
3044}
3045
3033static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) 3046static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3034{ 3047{
3035 u32 val; 3048 u32 val;
@@ -3077,8 +3090,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3077 } 3090 }
3078 return; 3091 return;
3079 } else if (do_low_power) { 3092 } else if (do_low_power) {
3080 tg3_writephy(tp, MII_TG3_EXT_CTRL, 3093 if (!tg3_phy_led_bug(tp))
3081 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 3094 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3095 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3082 3096
3083 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | 3097 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3084 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | 3098 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
@@ -4226,8 +4240,6 @@ static int tg3_power_down_prepare(struct tg3 *tp)
4226 4240
4227static void tg3_power_down(struct tg3 *tp) 4241static void tg3_power_down(struct tg3 *tp)
4228{ 4242{
4229 tg3_power_down_prepare(tp);
4230
4231 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); 4243 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4232 pci_set_power_state(tp->pdev, PCI_D3hot); 4244 pci_set_power_state(tp->pdev, PCI_D3hot);
4233} 4245}
@@ -6095,10 +6107,12 @@ static u64 tg3_refclk_read(struct tg3 *tp)
6095/* tp->lock must be held */ 6107/* tp->lock must be held */
6096static void tg3_refclk_write(struct tg3 *tp, u64 newval) 6108static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6097{ 6109{
6098 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP); 6110 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6111
6112 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6099 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); 6113 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6100 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); 6114 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6101 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME); 6115 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6102} 6116}
6103 6117
6104static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); 6118static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
@@ -6214,6 +6228,59 @@ static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6214static int tg3_ptp_enable(struct ptp_clock_info *ptp, 6228static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6215 struct ptp_clock_request *rq, int on) 6229 struct ptp_clock_request *rq, int on)
6216{ 6230{
6231 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6232 u32 clock_ctl;
6233 int rval = 0;
6234
6235 switch (rq->type) {
6236 case PTP_CLK_REQ_PEROUT:
6237 if (rq->perout.index != 0)
6238 return -EINVAL;
6239
6240 tg3_full_lock(tp, 0);
6241 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6242 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6243
6244 if (on) {
6245 u64 nsec;
6246
6247 nsec = rq->perout.start.sec * 1000000000ULL +
6248 rq->perout.start.nsec;
6249
6250 if (rq->perout.period.sec || rq->perout.period.nsec) {
6251 netdev_warn(tp->dev,
6252 "Device supports only a one-shot timesync output, period must be 0\n");
6253 rval = -EINVAL;
6254 goto err_out;
6255 }
6256
6257 if (nsec & (1ULL << 63)) {
6258 netdev_warn(tp->dev,
6259 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6260 rval = -EINVAL;
6261 goto err_out;
6262 }
6263
6264 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6265 tw32(TG3_EAV_WATCHDOG0_MSB,
6266 TG3_EAV_WATCHDOG0_EN |
6267 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6268
6269 tw32(TG3_EAV_REF_CLCK_CTL,
6270 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6271 } else {
6272 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6273 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6274 }
6275
6276err_out:
6277 tg3_full_unlock(tp);
6278 return rval;
6279
6280 default:
6281 break;
6282 }
6283
6217 return -EOPNOTSUPP; 6284 return -EOPNOTSUPP;
6218} 6285}
6219 6286
@@ -6223,7 +6290,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
6223 .max_adj = 250000000, 6290 .max_adj = 250000000,
6224 .n_alarm = 0, 6291 .n_alarm = 0,
6225 .n_ext_ts = 0, 6292 .n_ext_ts = 0,
6226 .n_per_out = 0, 6293 .n_per_out = 1,
6227 .pps = 0, 6294 .pps = 0,
6228 .adjfreq = tg3_ptp_adjfreq, 6295 .adjfreq = tg3_ptp_adjfreq,
6229 .adjtime = tg3_ptp_adjtime, 6296 .adjtime = tg3_ptp_adjtime,
@@ -8538,10 +8605,10 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
8538 if (!i && tg3_flag(tp, ENABLE_RSS)) 8605 if (!i && tg3_flag(tp, ENABLE_RSS))
8539 continue; 8606 continue;
8540 8607
8541 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, 8608 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8542 TG3_RX_RCB_RING_BYTES(tp), 8609 TG3_RX_RCB_RING_BYTES(tp),
8543 &tnapi->rx_rcb_mapping, 8610 &tnapi->rx_rcb_mapping,
8544 GFP_KERNEL | __GFP_ZERO); 8611 GFP_KERNEL);
8545 if (!tnapi->rx_rcb) 8612 if (!tnapi->rx_rcb)
8546 goto err_out; 8613 goto err_out;
8547 } 8614 }
@@ -8590,10 +8657,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
8590{ 8657{
8591 int i; 8658 int i;
8592 8659
8593 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, 8660 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8594 sizeof(struct tg3_hw_stats), 8661 sizeof(struct tg3_hw_stats),
8595 &tp->stats_mapping, 8662 &tp->stats_mapping, GFP_KERNEL);
8596 GFP_KERNEL | __GFP_ZERO);
8597 if (!tp->hw_stats) 8663 if (!tp->hw_stats)
8598 goto err_out; 8664 goto err_out;
8599 8665
@@ -8601,10 +8667,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
8601 struct tg3_napi *tnapi = &tp->napi[i]; 8667 struct tg3_napi *tnapi = &tp->napi[i];
8602 struct tg3_hw_status *sblk; 8668 struct tg3_hw_status *sblk;
8603 8669
8604 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, 8670 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8605 TG3_HW_STATUS_SIZE, 8671 TG3_HW_STATUS_SIZE,
8606 &tnapi->status_mapping, 8672 &tnapi->status_mapping,
8607 GFP_KERNEL | __GFP_ZERO); 8673 GFP_KERNEL);
8608 if (!tnapi->hw_status) 8674 if (!tnapi->hw_status)
8609 goto err_out; 8675 goto err_out;
8610 8676
@@ -10367,6 +10433,9 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
10367 if (tg3_flag(tp, 5755_PLUS)) 10433 if (tg3_flag(tp, 5755_PLUS))
10368 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 10434 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10369 10435
10436 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10437 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10438
10370 if (tg3_flag(tp, ENABLE_RSS)) 10439 if (tg3_flag(tp, ENABLE_RSS))
10371 tp->rx_mode |= RX_MODE_RSS_ENABLE | 10440 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10372 RX_MODE_RSS_ITBL_HASH_BITS_7 | 10441 RX_MODE_RSS_ITBL_HASH_BITS_7 |
@@ -11502,7 +11571,7 @@ static int tg3_close(struct net_device *dev)
11502 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); 11571 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11503 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); 11572 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11504 11573
11505 tg3_power_down(tp); 11574 tg3_power_down_prepare(tp);
11506 11575
11507 tg3_carrier_off(tp); 11576 tg3_carrier_off(tp);
11508 11577
@@ -11724,9 +11793,6 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
11724 if (tg3_flag(tp, NO_NVRAM)) 11793 if (tg3_flag(tp, NO_NVRAM))
11725 return -EINVAL; 11794 return -EINVAL;
11726 11795
11727 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11728 return -EAGAIN;
11729
11730 offset = eeprom->offset; 11796 offset = eeprom->offset;
11731 len = eeprom->len; 11797 len = eeprom->len;
11732 eeprom->len = 0; 11798 eeprom->len = 0;
@@ -11784,9 +11850,6 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
11784 u8 *buf; 11850 u8 *buf;
11785 __be32 start, end; 11851 __be32 start, end;
11786 11852
11787 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11788 return -EAGAIN;
11789
11790 if (tg3_flag(tp, NO_NVRAM) || 11853 if (tg3_flag(tp, NO_NVRAM) ||
11791 eeprom->magic != TG3_EEPROM_MAGIC) 11854 eeprom->magic != TG3_EEPROM_MAGIC)
11792 return -EINVAL; 11855 return -EINVAL;
@@ -13515,7 +13578,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13515 tg3_phy_start(tp); 13578 tg3_phy_start(tp);
13516 } 13579 }
13517 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 13580 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13518 tg3_power_down(tp); 13581 tg3_power_down_prepare(tp);
13519 13582
13520} 13583}
13521 13584
@@ -15917,7 +15980,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15917 */ 15980 */
15918 if (tg3_flag(tp, 5780_CLASS)) { 15981 if (tg3_flag(tp, 5780_CLASS)) {
15919 tg3_flag_set(tp, 40BIT_DMA_BUG); 15982 tg3_flag_set(tp, 40BIT_DMA_BUG);
15920 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); 15983 tp->msi_cap = tp->pdev->msi_cap;
15921 } else { 15984 } else {
15922 struct pci_dev *bridge = NULL; 15985 struct pci_dev *bridge = NULL;
15923 15986
@@ -17547,11 +17610,6 @@ static int tg3_init_one(struct pci_dev *pdev,
17547 tg3_asic_rev(tp) == ASIC_REV_5762) 17610 tg3_asic_rev(tp) == ASIC_REV_5762)
17548 tg3_flag_set(tp, PTP_CAPABLE); 17611 tg3_flag_set(tp, PTP_CAPABLE);
17549 17612
17550 if (tg3_flag(tp, 5717_PLUS)) {
17551 /* Resume a low-power mode */
17552 tg3_frob_aux_power(tp, false);
17553 }
17554
17555 tg3_timer_init(tp); 17613 tg3_timer_init(tp);
17556 17614
17557 tg3_carrier_off(tp); 17615 tg3_carrier_off(tp);
@@ -17755,6 +17813,23 @@ out:
17755 17813
17756static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); 17814static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17757 17815
17816static void tg3_shutdown(struct pci_dev *pdev)
17817{
17818 struct net_device *dev = pci_get_drvdata(pdev);
17819 struct tg3 *tp = netdev_priv(dev);
17820
17821 rtnl_lock();
17822 netif_device_detach(dev);
17823
17824 if (netif_running(dev))
17825 dev_close(dev);
17826
17827 if (system_state == SYSTEM_POWER_OFF)
17828 tg3_power_down(tp);
17829
17830 rtnl_unlock();
17831}
17832
17758/** 17833/**
17759 * tg3_io_error_detected - called when PCI error is detected 17834 * tg3_io_error_detected - called when PCI error is detected
17760 * @pdev: Pointer to PCI device 17835 * @pdev: Pointer to PCI device
@@ -17914,6 +17989,7 @@ static struct pci_driver tg3_driver = {
17914 .remove = tg3_remove_one, 17989 .remove = tg3_remove_one,
17915 .err_handler = &tg3_err_handler, 17990 .err_handler = &tg3_err_handler,
17916 .driver.pm = &tg3_pm_ops, 17991 .driver.pm = &tg3_pm_ops,
17992 .shutdown = tg3_shutdown,
17917}; 17993};
17918 17994
17919module_pci_driver(tg3_driver); 17995module_pci_driver(tg3_driver);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index cd63d1189aae..ddb8be1298ea 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -532,6 +532,7 @@
532#define RX_MODE_RSS_ITBL_HASH_BITS_7 0x00700000 532#define RX_MODE_RSS_ITBL_HASH_BITS_7 0x00700000
533#define RX_MODE_RSS_ENABLE 0x00800000 533#define RX_MODE_RSS_ENABLE 0x00800000
534#define RX_MODE_IPV6_CSUM_ENABLE 0x01000000 534#define RX_MODE_IPV6_CSUM_ENABLE 0x01000000
535#define RX_MODE_IPV4_FRAG_FIX 0x02000000
535#define MAC_RX_STATUS 0x0000046c 536#define MAC_RX_STATUS 0x0000046c
536#define RX_STATUS_REMOTE_TX_XOFFED 0x00000001 537#define RX_STATUS_REMOTE_TX_XOFFED 0x00000001
537#define RX_STATUS_XOFF_RCVD 0x00000002 538#define RX_STATUS_XOFF_RCVD 0x00000002
@@ -1818,12 +1819,21 @@
1818#define TG3_EAV_REF_CLCK_CTL 0x00006908 1819#define TG3_EAV_REF_CLCK_CTL 0x00006908
1819#define TG3_EAV_REF_CLCK_CTL_STOP 0x00000002 1820#define TG3_EAV_REF_CLCK_CTL_STOP 0x00000002
1820#define TG3_EAV_REF_CLCK_CTL_RESUME 0x00000004 1821#define TG3_EAV_REF_CLCK_CTL_RESUME 0x00000004
1822#define TG3_EAV_CTL_TSYNC_GPIO_MASK (0x3 << 16)
1823#define TG3_EAV_CTL_TSYNC_WDOG0 (1 << 17)
1824
1825#define TG3_EAV_WATCHDOG0_LSB 0x00006918
1826#define TG3_EAV_WATCHDOG0_MSB 0x0000691c
1827#define TG3_EAV_WATCHDOG0_EN (1 << 31)
1828#define TG3_EAV_WATCHDOG_MSB_MASK 0x7fffffff
1829
1821#define TG3_EAV_REF_CLK_CORRECT_CTL 0x00006928 1830#define TG3_EAV_REF_CLK_CORRECT_CTL 0x00006928
1822#define TG3_EAV_REF_CLK_CORRECT_EN (1 << 31) 1831#define TG3_EAV_REF_CLK_CORRECT_EN (1 << 31)
1823#define TG3_EAV_REF_CLK_CORRECT_NEG (1 << 30) 1832#define TG3_EAV_REF_CLK_CORRECT_NEG (1 << 30)
1824 1833
1825#define TG3_EAV_REF_CLK_CORRECT_MASK 0xffffff 1834#define TG3_EAV_REF_CLK_CORRECT_MASK 0xffffff
1826/* 0x690c --> 0x7000 unused */ 1835
1836/* 0x692c --> 0x7000 unused */
1827 1837
1828/* NVRAM Control registers */ 1838/* NVRAM Control registers */
1829#define NVRAM_CMD 0x00007000 1839#define NVRAM_CMD 0x00007000
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 57cd1bff59f1..3c07064b2bc4 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -1419,7 +1419,7 @@ static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1419 bna_bfi_rx_enet_start(rx); 1419 bna_bfi_rx_enet_start(rx);
1420} 1420}
1421 1421
1422void 1422static void
1423bna_rx_sm_stop_wait_entry(struct bna_rx *rx) 1423bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1424{ 1424{
1425} 1425}
@@ -1472,7 +1472,7 @@ static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1472 bna_rxf_start(&rx->rxf); 1472 bna_rxf_start(&rx->rxf);
1473} 1473}
1474 1474
1475void 1475static void
1476bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx) 1476bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1477{ 1477{
1478} 1478}
@@ -1528,7 +1528,7 @@ bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1528 } 1528 }
1529} 1529}
1530 1530
1531void 1531static void
1532bna_rx_sm_started_entry(struct bna_rx *rx) 1532bna_rx_sm_started_entry(struct bna_rx *rx)
1533{ 1533{
1534 struct bna_rxp *rxp; 1534 struct bna_rxp *rxp;
@@ -1593,12 +1593,12 @@ static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1593 } 1593 }
1594} 1594}
1595 1595
1596void 1596static void
1597bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx) 1597bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1598{ 1598{
1599} 1599}
1600 1600
1601void 1601static void
1602bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event) 1602bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1603{ 1603{
1604 switch (event) { 1604 switch (event) {
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index c37f706d9992..43405f654b4a 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -37,8 +37,8 @@
37 37
38extern char bfa_version[]; 38extern char bfa_version[];
39 39
40#define CNA_FW_FILE_CT "ctfw-3.2.1.0.bin" 40#define CNA_FW_FILE_CT "ctfw-3.2.1.1.bin"
41#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.0.bin" 41#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.1.bin"
42#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ 42#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
43 43
44#pragma pack(1) 44#pragma pack(1)
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index bb5d63fb2e6d..ce75de9bae9e 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -304,7 +304,7 @@ MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
304/* Detect MAC & PHY and perform ethernet interface initialization */ 304/* Detect MAC & PHY and perform ethernet interface initialization */
305static int __init at91ether_probe(struct platform_device *pdev) 305static int __init at91ether_probe(struct platform_device *pdev)
306{ 306{
307 struct macb_platform_data *board_data = pdev->dev.platform_data; 307 struct macb_platform_data *board_data = dev_get_platdata(&pdev->dev);
308 struct resource *regs; 308 struct resource *regs;
309 struct net_device *dev; 309 struct net_device *dev;
310 struct phy_device *phydev; 310 struct phy_device *phydev;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index e866608d7d91..92578690f6de 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -27,6 +27,7 @@
27#include <linux/phy.h> 27#include <linux/phy.h>
28#include <linux/of.h> 28#include <linux/of.h>
29#include <linux/of_device.h> 29#include <linux/of_device.h>
30#include <linux/of_mdio.h>
30#include <linux/of_net.h> 31#include <linux/of_net.h>
31#include <linux/pinctrl/consumer.h> 32#include <linux/pinctrl/consumer.h>
32 33
@@ -124,7 +125,7 @@ void macb_get_hwaddr(struct macb *bp)
124 u8 addr[6]; 125 u8 addr[6];
125 int i; 126 int i;
126 127
127 pdata = bp->pdev->dev.platform_data; 128 pdata = dev_get_platdata(&bp->pdev->dev);
128 129
129 /* Check all 4 address register for vaild address */ 130 /* Check all 4 address register for vaild address */
130 for (i = 0; i < 4; i++) { 131 for (i = 0; i < 4; i++) {
@@ -275,7 +276,7 @@ static int macb_mii_probe(struct net_device *dev)
275 phydev = phy_find_first(bp->mii_bus); 276 phydev = phy_find_first(bp->mii_bus);
276 if (!phydev) { 277 if (!phydev) {
277 netdev_err(dev, "no PHY found\n"); 278 netdev_err(dev, "no PHY found\n");
278 return -1; 279 return -ENXIO;
279 } 280 }
280 281
281 pdata = dev_get_platdata(&bp->pdev->dev); 282 pdata = dev_get_platdata(&bp->pdev->dev);
@@ -314,6 +315,7 @@ static int macb_mii_probe(struct net_device *dev)
314int macb_mii_init(struct macb *bp) 315int macb_mii_init(struct macb *bp)
315{ 316{
316 struct macb_platform_data *pdata; 317 struct macb_platform_data *pdata;
318 struct device_node *np;
317 int err = -ENXIO, i; 319 int err = -ENXIO, i;
318 320
319 /* Enable management port */ 321 /* Enable management port */
@@ -333,10 +335,7 @@ int macb_mii_init(struct macb *bp)
333 bp->pdev->name, bp->pdev->id); 335 bp->pdev->name, bp->pdev->id);
334 bp->mii_bus->priv = bp; 336 bp->mii_bus->priv = bp;
335 bp->mii_bus->parent = &bp->dev->dev; 337 bp->mii_bus->parent = &bp->dev->dev;
336 pdata = bp->pdev->dev.platform_data; 338 pdata = dev_get_platdata(&bp->pdev->dev);
337
338 if (pdata)
339 bp->mii_bus->phy_mask = pdata->phy_mask;
340 339
341 bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 340 bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
342 if (!bp->mii_bus->irq) { 341 if (!bp->mii_bus->irq) {
@@ -344,17 +343,45 @@ int macb_mii_init(struct macb *bp)
344 goto err_out_free_mdiobus; 343 goto err_out_free_mdiobus;
345 } 344 }
346 345
347 for (i = 0; i < PHY_MAX_ADDR; i++)
348 bp->mii_bus->irq[i] = PHY_POLL;
349
350 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 346 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
351 347
352 if (mdiobus_register(bp->mii_bus)) 348 np = bp->pdev->dev.of_node;
349 if (np) {
350 /* try dt phy registration */
351 err = of_mdiobus_register(bp->mii_bus, np);
352
353 /* fallback to standard phy registration if no phy were
354 found during dt phy registration */
355 if (!err && !phy_find_first(bp->mii_bus)) {
356 for (i = 0; i < PHY_MAX_ADDR; i++) {
357 struct phy_device *phydev;
358
359 phydev = mdiobus_scan(bp->mii_bus, i);
360 if (IS_ERR(phydev)) {
361 err = PTR_ERR(phydev);
362 break;
363 }
364 }
365
366 if (err)
367 goto err_out_unregister_bus;
368 }
369 } else {
370 for (i = 0; i < PHY_MAX_ADDR; i++)
371 bp->mii_bus->irq[i] = PHY_POLL;
372
373 if (pdata)
374 bp->mii_bus->phy_mask = pdata->phy_mask;
375
376 err = mdiobus_register(bp->mii_bus);
377 }
378
379 if (err)
353 goto err_out_free_mdio_irq; 380 goto err_out_free_mdio_irq;
354 381
355 if (macb_mii_probe(bp->dev) != 0) { 382 err = macb_mii_probe(bp->dev);
383 if (err)
356 goto err_out_unregister_bus; 384 goto err_out_unregister_bus;
357 }
358 385
359 return 0; 386 return 0;
360 387
@@ -1824,7 +1851,7 @@ static int __init macb_probe(struct platform_device *pdev)
1824 1851
1825 err = of_get_phy_mode(pdev->dev.of_node); 1852 err = of_get_phy_mode(pdev->dev.of_node);
1826 if (err < 0) { 1853 if (err < 0) {
1827 pdata = pdev->dev.platform_data; 1854 pdata = dev_get_platdata(&pdev->dev);
1828 if (pdata && pdata->is_rmii) 1855 if (pdata && pdata->is_rmii)
1829 bp->phy_interface = PHY_INTERFACE_MODE_RMII; 1856 bp->phy_interface = PHY_INTERFACE_MODE_RMII;
1830 else 1857 else
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 7cb148c495c9..78d6d6b970e1 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -353,11 +353,9 @@ struct xgmac_extra_stats {
353 /* Receive errors */ 353 /* Receive errors */
354 unsigned long rx_watchdog; 354 unsigned long rx_watchdog;
355 unsigned long rx_da_filter_fail; 355 unsigned long rx_da_filter_fail;
356 unsigned long rx_sa_filter_fail;
357 unsigned long rx_payload_error; 356 unsigned long rx_payload_error;
358 unsigned long rx_ip_header_error; 357 unsigned long rx_ip_header_error;
359 /* Tx/Rx IRQ errors */ 358 /* Tx/Rx IRQ errors */
360 unsigned long tx_undeflow;
361 unsigned long tx_process_stopped; 359 unsigned long tx_process_stopped;
362 unsigned long rx_buf_unav; 360 unsigned long rx_buf_unav;
363 unsigned long rx_process_stopped; 361 unsigned long rx_process_stopped;
@@ -393,6 +391,7 @@ struct xgmac_priv {
393 char rx_pause; 391 char rx_pause;
394 char tx_pause; 392 char tx_pause;
395 int wolopts; 393 int wolopts;
394 struct work_struct tx_timeout_work;
396}; 395};
397 396
398/* XGMAC Configuration Settings */ 397/* XGMAC Configuration Settings */
@@ -409,6 +408,9 @@ struct xgmac_priv {
409#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) 408#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
410#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) 409#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
411 410
411#define tx_dma_ring_space(p) \
412 dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ)
413
412/* XGMAC Descriptor Access Helpers */ 414/* XGMAC Descriptor Access Helpers */
413static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) 415static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
414{ 416{
@@ -421,7 +423,7 @@ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
421 423
422static inline int desc_get_buf_len(struct xgmac_dma_desc *p) 424static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
423{ 425{
424 u32 len = cpu_to_le32(p->flags); 426 u32 len = le32_to_cpu(p->buf_size);
425 return (len & DESC_BUFFER1_SZ_MASK) + 427 return (len & DESC_BUFFER1_SZ_MASK) +
426 ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); 428 ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
427} 429}
@@ -464,11 +466,23 @@ static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
464 p->flags = cpu_to_le32(tmpflags); 466 p->flags = cpu_to_le32(tmpflags);
465} 467}
466 468
469static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p)
470{
471 u32 tmpflags = le32_to_cpu(p->flags);
472 tmpflags &= TXDESC_END_RING;
473 p->flags = cpu_to_le32(tmpflags);
474}
475
467static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) 476static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
468{ 477{
469 return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; 478 return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
470} 479}
471 480
481static inline int desc_get_tx_fs(struct xgmac_dma_desc *p)
482{
483 return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG;
484}
485
472static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) 486static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
473{ 487{
474 return le32_to_cpu(p->buf1_addr); 488 return le32_to_cpu(p->buf1_addr);
@@ -609,10 +623,15 @@ static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
609{ 623{
610 u32 data; 624 u32 data;
611 625
612 data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); 626 if (addr) {
613 writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); 627 data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
614 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; 628 writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
615 writel(data, ioaddr + XGMAC_ADDR_LOW(num)); 629 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
630 writel(data, ioaddr + XGMAC_ADDR_LOW(num));
631 } else {
632 writel(0, ioaddr + XGMAC_ADDR_HIGH(num));
633 writel(0, ioaddr + XGMAC_ADDR_LOW(num));
634 }
616} 635}
617 636
618static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, 637static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
@@ -683,9 +702,14 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
683 if (unlikely(skb == NULL)) 702 if (unlikely(skb == NULL))
684 break; 703 break;
685 704
686 priv->rx_skbuff[entry] = skb;
687 paddr = dma_map_single(priv->device, skb->data, 705 paddr = dma_map_single(priv->device, skb->data,
688 bufsz, DMA_FROM_DEVICE); 706 priv->dma_buf_sz - NET_IP_ALIGN,
707 DMA_FROM_DEVICE);
708 if (dma_mapping_error(priv->device, paddr)) {
709 dev_kfree_skb_any(skb);
710 break;
711 }
712 priv->rx_skbuff[entry] = skb;
689 desc_set_buf_addr(p, paddr, priv->dma_buf_sz); 713 desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
690 } 714 }
691 715
@@ -782,20 +806,21 @@ static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
782 return; 806 return;
783 807
784 for (i = 0; i < DMA_RX_RING_SZ; i++) { 808 for (i = 0; i < DMA_RX_RING_SZ; i++) {
785 if (priv->rx_skbuff[i] == NULL) 809 struct sk_buff *skb = priv->rx_skbuff[i];
810 if (skb == NULL)
786 continue; 811 continue;
787 812
788 p = priv->dma_rx + i; 813 p = priv->dma_rx + i;
789 dma_unmap_single(priv->device, desc_get_buf_addr(p), 814 dma_unmap_single(priv->device, desc_get_buf_addr(p),
790 priv->dma_buf_sz, DMA_FROM_DEVICE); 815 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
791 dev_kfree_skb_any(priv->rx_skbuff[i]); 816 dev_kfree_skb_any(skb);
792 priv->rx_skbuff[i] = NULL; 817 priv->rx_skbuff[i] = NULL;
793 } 818 }
794} 819}
795 820
796static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) 821static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
797{ 822{
798 int i, f; 823 int i;
799 struct xgmac_dma_desc *p; 824 struct xgmac_dma_desc *p;
800 825
801 if (!priv->tx_skbuff) 826 if (!priv->tx_skbuff)
@@ -806,16 +831,15 @@ static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
806 continue; 831 continue;
807 832
808 p = priv->dma_tx + i; 833 p = priv->dma_tx + i;
809 dma_unmap_single(priv->device, desc_get_buf_addr(p), 834 if (desc_get_tx_fs(p))
810 desc_get_buf_len(p), DMA_TO_DEVICE); 835 dma_unmap_single(priv->device, desc_get_buf_addr(p),
811 836 desc_get_buf_len(p), DMA_TO_DEVICE);
812 for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) { 837 else
813 p = priv->dma_tx + i++;
814 dma_unmap_page(priv->device, desc_get_buf_addr(p), 838 dma_unmap_page(priv->device, desc_get_buf_addr(p),
815 desc_get_buf_len(p), DMA_TO_DEVICE); 839 desc_get_buf_len(p), DMA_TO_DEVICE);
816 }
817 840
818 dev_kfree_skb_any(priv->tx_skbuff[i]); 841 if (desc_get_tx_ls(p))
842 dev_kfree_skb_any(priv->tx_skbuff[i]);
819 priv->tx_skbuff[i] = NULL; 843 priv->tx_skbuff[i] = NULL;
820 } 844 }
821} 845}
@@ -852,8 +876,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
852 */ 876 */
853static void xgmac_tx_complete(struct xgmac_priv *priv) 877static void xgmac_tx_complete(struct xgmac_priv *priv)
854{ 878{
855 int i;
856
857 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { 879 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
858 unsigned int entry = priv->tx_tail; 880 unsigned int entry = priv->tx_tail;
859 struct sk_buff *skb = priv->tx_skbuff[entry]; 881 struct sk_buff *skb = priv->tx_skbuff[entry];
@@ -863,55 +885,45 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
863 if (desc_get_owner(p)) 885 if (desc_get_owner(p))
864 break; 886 break;
865 887
866 /* Verify tx error by looking at the last segment */
867 if (desc_get_tx_ls(p))
868 desc_get_tx_status(priv, p);
869
870 netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", 888 netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
871 priv->tx_head, priv->tx_tail); 889 priv->tx_head, priv->tx_tail);
872 890
873 dma_unmap_single(priv->device, desc_get_buf_addr(p), 891 if (desc_get_tx_fs(p))
874 desc_get_buf_len(p), DMA_TO_DEVICE); 892 dma_unmap_single(priv->device, desc_get_buf_addr(p),
875 893 desc_get_buf_len(p), DMA_TO_DEVICE);
876 priv->tx_skbuff[entry] = NULL; 894 else
877 priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
878
879 if (!skb) {
880 continue;
881 }
882
883 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
884 entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
885 DMA_TX_RING_SZ);
886 p = priv->dma_tx + priv->tx_tail;
887
888 dma_unmap_page(priv->device, desc_get_buf_addr(p), 895 dma_unmap_page(priv->device, desc_get_buf_addr(p),
889 desc_get_buf_len(p), DMA_TO_DEVICE); 896 desc_get_buf_len(p), DMA_TO_DEVICE);
897
898 /* Check tx error on the last segment */
899 if (desc_get_tx_ls(p)) {
900 desc_get_tx_status(priv, p);
901 dev_kfree_skb(skb);
890 } 902 }
891 903
892 dev_kfree_skb(skb); 904 priv->tx_skbuff[entry] = NULL;
905 priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
893 } 906 }
894 907
895 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > 908 /* Ensure tx_tail is visible to xgmac_xmit */
896 MAX_SKB_FRAGS) 909 smp_mb();
910 if (unlikely(netif_queue_stopped(priv->dev) &&
911 (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)))
897 netif_wake_queue(priv->dev); 912 netif_wake_queue(priv->dev);
898} 913}
899 914
900/** 915static void xgmac_tx_timeout_work(struct work_struct *work)
901 * xgmac_tx_err:
902 * @priv: pointer to the private device structure
903 * Description: it cleans the descriptors and restarts the transmission
904 * in case of errors.
905 */
906static void xgmac_tx_err(struct xgmac_priv *priv)
907{ 916{
908 u32 reg, value, inten; 917 u32 reg, value;
918 struct xgmac_priv *priv =
919 container_of(work, struct xgmac_priv, tx_timeout_work);
909 920
910 netif_stop_queue(priv->dev); 921 napi_disable(&priv->napi);
911 922
912 inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
913 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 923 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
914 924
925 netif_tx_lock(priv->dev);
926
915 reg = readl(priv->base + XGMAC_DMA_CONTROL); 927 reg = readl(priv->base + XGMAC_DMA_CONTROL);
916 writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); 928 writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
917 do { 929 do {
@@ -927,9 +939,15 @@ static void xgmac_tx_err(struct xgmac_priv *priv)
927 939
928 writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, 940 writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
929 priv->base + XGMAC_DMA_STATUS); 941 priv->base + XGMAC_DMA_STATUS);
930 writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
931 942
943 netif_tx_unlock(priv->dev);
932 netif_wake_queue(priv->dev); 944 netif_wake_queue(priv->dev);
945
946 napi_enable(&priv->napi);
947
948 /* Enable interrupts */
949 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS);
950 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
933} 951}
934 952
935static int xgmac_hw_init(struct net_device *dev) 953static int xgmac_hw_init(struct net_device *dev)
@@ -957,9 +975,7 @@ static int xgmac_hw_init(struct net_device *dev)
957 DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; 975 DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
958 writel(value, ioaddr + XGMAC_DMA_BUS_MODE); 976 writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
959 977
960 /* Enable interrupts */ 978 writel(0, ioaddr + XGMAC_DMA_INTR_ENA);
961 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
962 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
963 979
964 /* Mask power mgt interrupt */ 980 /* Mask power mgt interrupt */
965 writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); 981 writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
@@ -1027,6 +1043,10 @@ static int xgmac_open(struct net_device *dev)
1027 napi_enable(&priv->napi); 1043 napi_enable(&priv->napi);
1028 netif_start_queue(dev); 1044 netif_start_queue(dev);
1029 1045
1046 /* Enable interrupts */
1047 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
1048 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
1049
1030 return 0; 1050 return 0;
1031} 1051}
1032 1052
@@ -1087,7 +1107,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1087 paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); 1107 paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
1088 if (dma_mapping_error(priv->device, paddr)) { 1108 if (dma_mapping_error(priv->device, paddr)) {
1089 dev_kfree_skb(skb); 1109 dev_kfree_skb(skb);
1090 return -EIO; 1110 return NETDEV_TX_OK;
1091 } 1111 }
1092 priv->tx_skbuff[entry] = skb; 1112 priv->tx_skbuff[entry] = skb;
1093 desc_set_buf_addr_and_size(desc, paddr, len); 1113 desc_set_buf_addr_and_size(desc, paddr, len);
@@ -1099,14 +1119,12 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1099 1119
1100 paddr = skb_frag_dma_map(priv->device, frag, 0, len, 1120 paddr = skb_frag_dma_map(priv->device, frag, 0, len,
1101 DMA_TO_DEVICE); 1121 DMA_TO_DEVICE);
1102 if (dma_mapping_error(priv->device, paddr)) { 1122 if (dma_mapping_error(priv->device, paddr))
1103 dev_kfree_skb(skb); 1123 goto dma_err;
1104 return -EIO;
1105 }
1106 1124
1107 entry = dma_ring_incr(entry, DMA_TX_RING_SZ); 1125 entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
1108 desc = priv->dma_tx + entry; 1126 desc = priv->dma_tx + entry;
1109 priv->tx_skbuff[entry] = NULL; 1127 priv->tx_skbuff[entry] = skb;
1110 1128
1111 desc_set_buf_addr_and_size(desc, paddr, len); 1129 desc_set_buf_addr_and_size(desc, paddr, len);
1112 if (i < (nfrags - 1)) 1130 if (i < (nfrags - 1))
@@ -1124,13 +1142,35 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1124 wmb(); 1142 wmb();
1125 desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); 1143 desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
1126 1144
1145 writel(1, priv->base + XGMAC_DMA_TX_POLL);
1146
1127 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); 1147 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
1128 1148
1129 writel(1, priv->base + XGMAC_DMA_TX_POLL); 1149 /* Ensure tx_head update is visible to tx completion */
1130 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < 1150 smp_mb();
1131 MAX_SKB_FRAGS) 1151 if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) {
1132 netif_stop_queue(dev); 1152 netif_stop_queue(dev);
1153 /* Ensure netif_stop_queue is visible to tx completion */
1154 smp_mb();
1155 if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)
1156 netif_start_queue(dev);
1157 }
1158 return NETDEV_TX_OK;
1133 1159
1160dma_err:
1161 entry = priv->tx_head;
1162 for ( ; i > 0; i--) {
1163 entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
1164 desc = priv->dma_tx + entry;
1165 priv->tx_skbuff[entry] = NULL;
1166 dma_unmap_page(priv->device, desc_get_buf_addr(desc),
1167 desc_get_buf_len(desc), DMA_TO_DEVICE);
1168 desc_clear_tx_owner(desc);
1169 }
1170 desc = first;
1171 dma_unmap_single(priv->device, desc_get_buf_addr(desc),
1172 desc_get_buf_len(desc), DMA_TO_DEVICE);
1173 dev_kfree_skb(skb);
1134 return NETDEV_TX_OK; 1174 return NETDEV_TX_OK;
1135} 1175}
1136 1176
@@ -1174,7 +1214,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
1174 1214
1175 skb_put(skb, frame_len); 1215 skb_put(skb, frame_len);
1176 dma_unmap_single(priv->device, desc_get_buf_addr(p), 1216 dma_unmap_single(priv->device, desc_get_buf_addr(p),
1177 frame_len, DMA_FROM_DEVICE); 1217 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
1178 1218
1179 skb->protocol = eth_type_trans(skb, priv->dev); 1219 skb->protocol = eth_type_trans(skb, priv->dev);
1180 skb->ip_summed = ip_checksum; 1220 skb->ip_summed = ip_checksum;
@@ -1225,9 +1265,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
1225static void xgmac_tx_timeout(struct net_device *dev) 1265static void xgmac_tx_timeout(struct net_device *dev)
1226{ 1266{
1227 struct xgmac_priv *priv = netdev_priv(dev); 1267 struct xgmac_priv *priv = netdev_priv(dev);
1228 1268 schedule_work(&priv->tx_timeout_work);
1229 /* Clear Tx resources and restart transmitting again */
1230 xgmac_tx_err(priv);
1231} 1269}
1232 1270
1233/** 1271/**
@@ -1286,6 +1324,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
1286 if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { 1324 if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
1287 use_hash = true; 1325 use_hash = true;
1288 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; 1326 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
1327 } else {
1328 use_hash = false;
1289 } 1329 }
1290 netdev_for_each_mc_addr(ha, dev) { 1330 netdev_for_each_mc_addr(ha, dev) {
1291 if (use_hash) { 1331 if (use_hash) {
@@ -1302,6 +1342,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
1302 } 1342 }
1303 1343
1304out: 1344out:
1345 for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++)
1346 xgmac_set_mac_addr(ioaddr, NULL, reg);
1305 for (i = 0; i < XGMAC_NUM_HASH; i++) 1347 for (i = 0; i < XGMAC_NUM_HASH; i++)
1306 writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); 1348 writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
1307 1349
@@ -1366,7 +1408,6 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
1366static irqreturn_t xgmac_interrupt(int irq, void *dev_id) 1408static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1367{ 1409{
1368 u32 intr_status; 1410 u32 intr_status;
1369 bool tx_err = false;
1370 struct net_device *dev = (struct net_device *)dev_id; 1411 struct net_device *dev = (struct net_device *)dev_id;
1371 struct xgmac_priv *priv = netdev_priv(dev); 1412 struct xgmac_priv *priv = netdev_priv(dev);
1372 struct xgmac_extra_stats *x = &priv->xstats; 1413 struct xgmac_extra_stats *x = &priv->xstats;
@@ -1396,16 +1437,12 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1396 if (intr_status & DMA_STATUS_TPS) { 1437 if (intr_status & DMA_STATUS_TPS) {
1397 netdev_err(priv->dev, "transmit process stopped\n"); 1438 netdev_err(priv->dev, "transmit process stopped\n");
1398 x->tx_process_stopped++; 1439 x->tx_process_stopped++;
1399 tx_err = true; 1440 schedule_work(&priv->tx_timeout_work);
1400 } 1441 }
1401 if (intr_status & DMA_STATUS_FBI) { 1442 if (intr_status & DMA_STATUS_FBI) {
1402 netdev_err(priv->dev, "fatal bus error\n"); 1443 netdev_err(priv->dev, "fatal bus error\n");
1403 x->fatal_bus_error++; 1444 x->fatal_bus_error++;
1404 tx_err = true;
1405 } 1445 }
1406
1407 if (tx_err)
1408 xgmac_tx_err(priv);
1409 } 1446 }
1410 1447
1411 /* TX/RX NORMAL interrupts */ 1448 /* TX/RX NORMAL interrupts */
@@ -1569,7 +1606,6 @@ static const struct xgmac_stats xgmac_gstrings_stats[] = {
1569 XGMAC_STAT(rx_payload_error), 1606 XGMAC_STAT(rx_payload_error),
1570 XGMAC_STAT(rx_ip_header_error), 1607 XGMAC_STAT(rx_ip_header_error),
1571 XGMAC_STAT(rx_da_filter_fail), 1608 XGMAC_STAT(rx_da_filter_fail),
1572 XGMAC_STAT(rx_sa_filter_fail),
1573 XGMAC_STAT(fatal_bus_error), 1609 XGMAC_STAT(fatal_bus_error),
1574 XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), 1610 XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
1575 XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), 1611 XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
@@ -1708,6 +1744,7 @@ static int xgmac_probe(struct platform_device *pdev)
1708 ndev->netdev_ops = &xgmac_netdev_ops; 1744 ndev->netdev_ops = &xgmac_netdev_ops;
1709 SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); 1745 SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
1710 spin_lock_init(&priv->stats_lock); 1746 spin_lock_init(&priv->stats_lock);
1747 INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
1711 1748
1712 priv->device = &pdev->dev; 1749 priv->device = &pdev->dev;
1713 priv->dev = ndev; 1750 priv->dev = ndev;
@@ -1759,7 +1796,7 @@ static int xgmac_probe(struct platform_device *pdev)
1759 if (device_can_wakeup(priv->device)) 1796 if (device_can_wakeup(priv->device))
1760 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ 1797 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
1761 1798
1762 ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; 1799 ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
1763 if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) 1800 if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
1764 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1801 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1765 NETIF_F_RXCSUM; 1802 NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 038df4b96139..79ac77cf62d9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3982,7 +3982,6 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
3982 struct inet6_ifaddr *ifa = data; 3982 struct inet6_ifaddr *ifa = data;
3983 struct net_device *event_dev; 3983 struct net_device *event_dev;
3984 int ret = NOTIFY_DONE; 3984 int ret = NOTIFY_DONE;
3985 int cnt;
3986 struct bonding *bond = netdev_priv(ifa->idev->dev); 3985 struct bonding *bond = netdev_priv(ifa->idev->dev);
3987 struct slave *slave; 3986 struct slave *slave;
3988 struct pci_dev *first_pdev = NULL; 3987 struct pci_dev *first_pdev = NULL;
@@ -3996,7 +3995,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
3996 * in all of them only once. 3995 * in all of them only once.
3997 */ 3996 */
3998 read_lock(&bond->lock); 3997 read_lock(&bond->lock);
3999 bond_for_each_slave(bond, slave, cnt) { 3998 bond_for_each_slave(bond, slave) {
4000 if (!first_pdev) { 3999 if (!first_pdev) {
4001 ret = clip_add(slave->dev, ifa, event); 4000 ret = clip_add(slave->dev, ifa, event);
4002 /* If clip_add is success then only initialize 4001 /* If clip_add is success then only initialize
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index e3d4ec836f8b..ec88de4ac162 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -814,7 +814,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
814 814
815 if (pdev == NULL) 815 if (pdev == NULL)
816 return -ENODEV; 816 return -ENODEV;
817 data = pdev->dev.platform_data; 817 data = dev_get_platdata(&pdev->dev);
818 818
819 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 819 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
820 irq = platform_get_irq(pdev, 0); 820 irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile
index 9d4974bba247..239e1e46545d 100644
--- a/drivers/net/ethernet/cisco/enic/Makefile
+++ b/drivers/net/ethernet/cisco/enic/Makefile
@@ -1,5 +1,6 @@
1obj-$(CONFIG_ENIC) := enic.o 1obj-$(CONFIG_ENIC) := enic.o
2 2
3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ 3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
4 enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o 4 enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
5 enic_ethtool.o enic_api.o
5 6
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index afe9b1662b8c..e9f7c656ddda 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -32,12 +32,12 @@
32 32
33#define DRV_NAME "enic" 33#define DRV_NAME "enic"
34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
35#define DRV_VERSION "2.1.1.39" 35#define DRV_VERSION "2.1.1.50"
36#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
39 39
40#define ENIC_WQ_MAX 1 40#define ENIC_WQ_MAX 8
41#define ENIC_RQ_MAX 8 41#define ENIC_RQ_MAX 8
42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) 42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) 43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
@@ -96,6 +96,7 @@ struct enic {
96#ifdef CONFIG_PCI_IOV 96#ifdef CONFIG_PCI_IOV
97 u16 num_vfs; 97 u16 num_vfs;
98#endif 98#endif
99 spinlock_t enic_api_lock;
99 struct enic_port_profile *pp; 100 struct enic_port_profile *pp;
100 101
101 /* work queue cache line section */ 102 /* work queue cache line section */
@@ -127,9 +128,57 @@ static inline struct device *enic_get_dev(struct enic *enic)
127 return &(enic->pdev->dev); 128 return &(enic->pdev->dev);
128} 129}
129 130
131static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
132{
133 return rq;
134}
135
136static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
137{
138 return enic->rq_count + wq;
139}
140
141static inline unsigned int enic_legacy_io_intr(void)
142{
143 return 0;
144}
145
146static inline unsigned int enic_legacy_err_intr(void)
147{
148 return 1;
149}
150
151static inline unsigned int enic_legacy_notify_intr(void)
152{
153 return 2;
154}
155
156static inline unsigned int enic_msix_rq_intr(struct enic *enic,
157 unsigned int rq)
158{
159 return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
160}
161
162static inline unsigned int enic_msix_wq_intr(struct enic *enic,
163 unsigned int wq)
164{
165 return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
166}
167
168static inline unsigned int enic_msix_err_intr(struct enic *enic)
169{
170 return enic->rq_count + enic->wq_count;
171}
172
173static inline unsigned int enic_msix_notify_intr(struct enic *enic)
174{
175 return enic->rq_count + enic->wq_count + 1;
176}
177
130void enic_reset_addr_lists(struct enic *enic); 178void enic_reset_addr_lists(struct enic *enic);
131int enic_sriov_enabled(struct enic *enic); 179int enic_sriov_enabled(struct enic *enic);
132int enic_is_valid_vf(struct enic *enic, int vf); 180int enic_is_valid_vf(struct enic *enic, int vf);
133int enic_is_dynamic(struct enic *enic); 181int enic_is_dynamic(struct enic *enic);
182void enic_set_ethtool_ops(struct net_device *netdev);
134 183
135#endif /* _ENIC_H_ */ 184#endif /* _ENIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
new file mode 100644
index 000000000000..e13efbdaa2ed
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_api.c
@@ -0,0 +1,48 @@
1/**
2 * Copyright 2013 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#include <linux/netdevice.h>
20#include <linux/spinlock.h>
21
22#include "vnic_dev.h"
23#include "vnic_devcmd.h"
24
25#include "enic_res.h"
26#include "enic.h"
27#include "enic_api.h"
28
29int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
30 enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
31{
32 int err;
33 struct enic *enic = netdev_priv(netdev);
34 struct vnic_dev *vdev = enic->vdev;
35
36 spin_lock(&enic->enic_api_lock);
37 spin_lock(&enic->devcmd_lock);
38
39 vnic_dev_cmd_proxy_by_index_start(vdev, vf);
40 err = vnic_dev_cmd(vdev, cmd, a0, a1, wait);
41 vnic_dev_cmd_proxy_end(vdev);
42
43 spin_unlock(&enic->devcmd_lock);
44 spin_unlock(&enic->enic_api_lock);
45
46 return err;
47}
48EXPORT_SYMBOL(enic_api_devcmd_proxy_by_index);
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.h b/drivers/net/ethernet/cisco/enic/enic_api.h
new file mode 100644
index 000000000000..6b9f9255af28
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_api.h
@@ -0,0 +1,30 @@
1/**
2 * Copyright 2013 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef __ENIC_API_H__
20#define __ENIC_API_H__
21
22#include <linux/netdevice.h>
23
24#include "vnic_dev.h"
25#include "vnic_devcmd.h"
26
27int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
28 enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait);
29
30#endif
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 08bded051b93..129b14a4efb0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -20,6 +20,7 @@
20#define _ENIC_DEV_H_ 20#define _ENIC_DEV_H_
21 21
22#include "vnic_dev.h" 22#include "vnic_dev.h"
23#include "vnic_vic.h"
23 24
24/* 25/*
25 * Calls the devcmd function given by argument vnicdevcmdfn. 26 * Calls the devcmd function given by argument vnicdevcmdfn.
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
new file mode 100644
index 000000000000..47e3562f4866
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -0,0 +1,257 @@
1/**
2 * Copyright 2013 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#include <linux/netdevice.h>
20#include <linux/ethtool.h>
21
22#include "enic_res.h"
23#include "enic.h"
24#include "enic_dev.h"
25
26struct enic_stat {
27 char name[ETH_GSTRING_LEN];
28 unsigned int index;
29};
30
31#define ENIC_TX_STAT(stat) { \
32 .name = #stat, \
33 .index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
34}
35
36#define ENIC_RX_STAT(stat) { \
37 .name = #stat, \
38 .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
39}
40
41static const struct enic_stat enic_tx_stats[] = {
42 ENIC_TX_STAT(tx_frames_ok),
43 ENIC_TX_STAT(tx_unicast_frames_ok),
44 ENIC_TX_STAT(tx_multicast_frames_ok),
45 ENIC_TX_STAT(tx_broadcast_frames_ok),
46 ENIC_TX_STAT(tx_bytes_ok),
47 ENIC_TX_STAT(tx_unicast_bytes_ok),
48 ENIC_TX_STAT(tx_multicast_bytes_ok),
49 ENIC_TX_STAT(tx_broadcast_bytes_ok),
50 ENIC_TX_STAT(tx_drops),
51 ENIC_TX_STAT(tx_errors),
52 ENIC_TX_STAT(tx_tso),
53};
54
55static const struct enic_stat enic_rx_stats[] = {
56 ENIC_RX_STAT(rx_frames_ok),
57 ENIC_RX_STAT(rx_frames_total),
58 ENIC_RX_STAT(rx_unicast_frames_ok),
59 ENIC_RX_STAT(rx_multicast_frames_ok),
60 ENIC_RX_STAT(rx_broadcast_frames_ok),
61 ENIC_RX_STAT(rx_bytes_ok),
62 ENIC_RX_STAT(rx_unicast_bytes_ok),
63 ENIC_RX_STAT(rx_multicast_bytes_ok),
64 ENIC_RX_STAT(rx_broadcast_bytes_ok),
65 ENIC_RX_STAT(rx_drop),
66 ENIC_RX_STAT(rx_no_bufs),
67 ENIC_RX_STAT(rx_errors),
68 ENIC_RX_STAT(rx_rss),
69 ENIC_RX_STAT(rx_crc_errors),
70 ENIC_RX_STAT(rx_frames_64),
71 ENIC_RX_STAT(rx_frames_127),
72 ENIC_RX_STAT(rx_frames_255),
73 ENIC_RX_STAT(rx_frames_511),
74 ENIC_RX_STAT(rx_frames_1023),
75 ENIC_RX_STAT(rx_frames_1518),
76 ENIC_RX_STAT(rx_frames_to_max),
77};
78
79static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
80static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
81
82static int enic_get_settings(struct net_device *netdev,
83 struct ethtool_cmd *ecmd)
84{
85 struct enic *enic = netdev_priv(netdev);
86
87 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
88 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
89 ecmd->port = PORT_FIBRE;
90 ecmd->transceiver = XCVR_EXTERNAL;
91
92 if (netif_carrier_ok(netdev)) {
93 ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
94 ecmd->duplex = DUPLEX_FULL;
95 } else {
96 ethtool_cmd_speed_set(ecmd, -1);
97 ecmd->duplex = -1;
98 }
99
100 ecmd->autoneg = AUTONEG_DISABLE;
101
102 return 0;
103}
104
105static void enic_get_drvinfo(struct net_device *netdev,
106 struct ethtool_drvinfo *drvinfo)
107{
108 struct enic *enic = netdev_priv(netdev);
109 struct vnic_devcmd_fw_info *fw_info;
110
111 enic_dev_fw_info(enic, &fw_info);
112
113 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
114 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
115 strlcpy(drvinfo->fw_version, fw_info->fw_version,
116 sizeof(drvinfo->fw_version));
117 strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
118 sizeof(drvinfo->bus_info));
119}
120
121static void enic_get_strings(struct net_device *netdev, u32 stringset,
122 u8 *data)
123{
124 unsigned int i;
125
126 switch (stringset) {
127 case ETH_SS_STATS:
128 for (i = 0; i < enic_n_tx_stats; i++) {
129 memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
130 data += ETH_GSTRING_LEN;
131 }
132 for (i = 0; i < enic_n_rx_stats; i++) {
133 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
134 data += ETH_GSTRING_LEN;
135 }
136 break;
137 }
138}
139
140static int enic_get_sset_count(struct net_device *netdev, int sset)
141{
142 switch (sset) {
143 case ETH_SS_STATS:
144 return enic_n_tx_stats + enic_n_rx_stats;
145 default:
146 return -EOPNOTSUPP;
147 }
148}
149
150static void enic_get_ethtool_stats(struct net_device *netdev,
151 struct ethtool_stats *stats, u64 *data)
152{
153 struct enic *enic = netdev_priv(netdev);
154 struct vnic_stats *vstats;
155 unsigned int i;
156
157 enic_dev_stats_dump(enic, &vstats);
158
159 for (i = 0; i < enic_n_tx_stats; i++)
160 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
161 for (i = 0; i < enic_n_rx_stats; i++)
162 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
163}
164
165static u32 enic_get_msglevel(struct net_device *netdev)
166{
167 struct enic *enic = netdev_priv(netdev);
168 return enic->msg_enable;
169}
170
171static void enic_set_msglevel(struct net_device *netdev, u32 value)
172{
173 struct enic *enic = netdev_priv(netdev);
174 enic->msg_enable = value;
175}
176
177static int enic_get_coalesce(struct net_device *netdev,
178 struct ethtool_coalesce *ecmd)
179{
180 struct enic *enic = netdev_priv(netdev);
181
182 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
183 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
184
185 return 0;
186}
187
188static int enic_set_coalesce(struct net_device *netdev,
189 struct ethtool_coalesce *ecmd)
190{
191 struct enic *enic = netdev_priv(netdev);
192 u32 tx_coalesce_usecs;
193 u32 rx_coalesce_usecs;
194 unsigned int i, intr;
195
196 tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
197 vnic_dev_get_intr_coal_timer_max(enic->vdev));
198 rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
199 vnic_dev_get_intr_coal_timer_max(enic->vdev));
200
201 switch (vnic_dev_get_intr_mode(enic->vdev)) {
202 case VNIC_DEV_INTR_MODE_INTX:
203 if (tx_coalesce_usecs != rx_coalesce_usecs)
204 return -EINVAL;
205
206 intr = enic_legacy_io_intr();
207 vnic_intr_coalescing_timer_set(&enic->intr[intr],
208 tx_coalesce_usecs);
209 break;
210 case VNIC_DEV_INTR_MODE_MSI:
211 if (tx_coalesce_usecs != rx_coalesce_usecs)
212 return -EINVAL;
213
214 vnic_intr_coalescing_timer_set(&enic->intr[0],
215 tx_coalesce_usecs);
216 break;
217 case VNIC_DEV_INTR_MODE_MSIX:
218 for (i = 0; i < enic->wq_count; i++) {
219 intr = enic_msix_wq_intr(enic, i);
220 vnic_intr_coalescing_timer_set(&enic->intr[intr],
221 tx_coalesce_usecs);
222 }
223
224 for (i = 0; i < enic->rq_count; i++) {
225 intr = enic_msix_rq_intr(enic, i);
226 vnic_intr_coalescing_timer_set(&enic->intr[intr],
227 rx_coalesce_usecs);
228 }
229
230 break;
231 default:
232 break;
233 }
234
235 enic->tx_coalesce_usecs = tx_coalesce_usecs;
236 enic->rx_coalesce_usecs = rx_coalesce_usecs;
237
238 return 0;
239}
240
241static const struct ethtool_ops enic_ethtool_ops = {
242 .get_settings = enic_get_settings,
243 .get_drvinfo = enic_get_drvinfo,
244 .get_msglevel = enic_get_msglevel,
245 .set_msglevel = enic_set_msglevel,
246 .get_link = ethtool_op_get_link,
247 .get_strings = enic_get_strings,
248 .get_sset_count = enic_get_sset_count,
249 .get_ethtool_stats = enic_get_ethtool_stats,
250 .get_coalesce = enic_get_coalesce,
251 .set_coalesce = enic_set_coalesce,
252};
253
254void enic_set_ethtool_ops(struct net_device *netdev)
255{
256 SET_ETHTOOL_OPS(netdev, &enic_ethtool_ops);
257}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 992ec2ee64d9..7b756cf9474a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -31,7 +31,6 @@
31#include <linux/if.h> 31#include <linux/if.h>
32#include <linux/if_ether.h> 32#include <linux/if_ether.h>
33#include <linux/if_vlan.h> 33#include <linux/if_vlan.h>
34#include <linux/ethtool.h>
35#include <linux/in.h> 34#include <linux/in.h>
36#include <linux/ip.h> 35#include <linux/ip.h>
37#include <linux/ipv6.h> 36#include <linux/ipv6.h>
@@ -73,57 +72,6 @@ MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_VERSION); 72MODULE_VERSION(DRV_VERSION);
74MODULE_DEVICE_TABLE(pci, enic_id_table); 73MODULE_DEVICE_TABLE(pci, enic_id_table);
75 74
76struct enic_stat {
77 char name[ETH_GSTRING_LEN];
78 unsigned int offset;
79};
80
81#define ENIC_TX_STAT(stat) \
82 { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
83#define ENIC_RX_STAT(stat) \
84 { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
85
86static const struct enic_stat enic_tx_stats[] = {
87 ENIC_TX_STAT(tx_frames_ok),
88 ENIC_TX_STAT(tx_unicast_frames_ok),
89 ENIC_TX_STAT(tx_multicast_frames_ok),
90 ENIC_TX_STAT(tx_broadcast_frames_ok),
91 ENIC_TX_STAT(tx_bytes_ok),
92 ENIC_TX_STAT(tx_unicast_bytes_ok),
93 ENIC_TX_STAT(tx_multicast_bytes_ok),
94 ENIC_TX_STAT(tx_broadcast_bytes_ok),
95 ENIC_TX_STAT(tx_drops),
96 ENIC_TX_STAT(tx_errors),
97 ENIC_TX_STAT(tx_tso),
98};
99
100static const struct enic_stat enic_rx_stats[] = {
101 ENIC_RX_STAT(rx_frames_ok),
102 ENIC_RX_STAT(rx_frames_total),
103 ENIC_RX_STAT(rx_unicast_frames_ok),
104 ENIC_RX_STAT(rx_multicast_frames_ok),
105 ENIC_RX_STAT(rx_broadcast_frames_ok),
106 ENIC_RX_STAT(rx_bytes_ok),
107 ENIC_RX_STAT(rx_unicast_bytes_ok),
108 ENIC_RX_STAT(rx_multicast_bytes_ok),
109 ENIC_RX_STAT(rx_broadcast_bytes_ok),
110 ENIC_RX_STAT(rx_drop),
111 ENIC_RX_STAT(rx_no_bufs),
112 ENIC_RX_STAT(rx_errors),
113 ENIC_RX_STAT(rx_rss),
114 ENIC_RX_STAT(rx_crc_errors),
115 ENIC_RX_STAT(rx_frames_64),
116 ENIC_RX_STAT(rx_frames_127),
117 ENIC_RX_STAT(rx_frames_255),
118 ENIC_RX_STAT(rx_frames_511),
119 ENIC_RX_STAT(rx_frames_1023),
120 ENIC_RX_STAT(rx_frames_1518),
121 ENIC_RX_STAT(rx_frames_to_max),
122};
123
124static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
125static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
126
127int enic_is_dynamic(struct enic *enic) 75int enic_is_dynamic(struct enic *enic)
128{ 76{
129 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; 77 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -148,222 +96,6 @@ int enic_is_valid_vf(struct enic *enic, int vf)
148#endif 96#endif
149} 97}
150 98
151static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
152{
153 return rq;
154}
155
156static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
157{
158 return enic->rq_count + wq;
159}
160
161static inline unsigned int enic_legacy_io_intr(void)
162{
163 return 0;
164}
165
166static inline unsigned int enic_legacy_err_intr(void)
167{
168 return 1;
169}
170
171static inline unsigned int enic_legacy_notify_intr(void)
172{
173 return 2;
174}
175
176static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
177{
178 return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
179}
180
181static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
182{
183 return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
184}
185
186static inline unsigned int enic_msix_err_intr(struct enic *enic)
187{
188 return enic->rq_count + enic->wq_count;
189}
190
191static inline unsigned int enic_msix_notify_intr(struct enic *enic)
192{
193 return enic->rq_count + enic->wq_count + 1;
194}
195
196static int enic_get_settings(struct net_device *netdev,
197 struct ethtool_cmd *ecmd)
198{
199 struct enic *enic = netdev_priv(netdev);
200
201 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
202 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
203 ecmd->port = PORT_FIBRE;
204 ecmd->transceiver = XCVR_EXTERNAL;
205
206 if (netif_carrier_ok(netdev)) {
207 ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
208 ecmd->duplex = DUPLEX_FULL;
209 } else {
210 ethtool_cmd_speed_set(ecmd, -1);
211 ecmd->duplex = -1;
212 }
213
214 ecmd->autoneg = AUTONEG_DISABLE;
215
216 return 0;
217}
218
219static void enic_get_drvinfo(struct net_device *netdev,
220 struct ethtool_drvinfo *drvinfo)
221{
222 struct enic *enic = netdev_priv(netdev);
223 struct vnic_devcmd_fw_info *fw_info;
224
225 enic_dev_fw_info(enic, &fw_info);
226
227 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
228 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
229 strlcpy(drvinfo->fw_version, fw_info->fw_version,
230 sizeof(drvinfo->fw_version));
231 strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
232 sizeof(drvinfo->bus_info));
233}
234
235static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
236{
237 unsigned int i;
238
239 switch (stringset) {
240 case ETH_SS_STATS:
241 for (i = 0; i < enic_n_tx_stats; i++) {
242 memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
243 data += ETH_GSTRING_LEN;
244 }
245 for (i = 0; i < enic_n_rx_stats; i++) {
246 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
247 data += ETH_GSTRING_LEN;
248 }
249 break;
250 }
251}
252
253static int enic_get_sset_count(struct net_device *netdev, int sset)
254{
255 switch (sset) {
256 case ETH_SS_STATS:
257 return enic_n_tx_stats + enic_n_rx_stats;
258 default:
259 return -EOPNOTSUPP;
260 }
261}
262
263static void enic_get_ethtool_stats(struct net_device *netdev,
264 struct ethtool_stats *stats, u64 *data)
265{
266 struct enic *enic = netdev_priv(netdev);
267 struct vnic_stats *vstats;
268 unsigned int i;
269
270 enic_dev_stats_dump(enic, &vstats);
271
272 for (i = 0; i < enic_n_tx_stats; i++)
273 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
274 for (i = 0; i < enic_n_rx_stats; i++)
275 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
276}
277
278static u32 enic_get_msglevel(struct net_device *netdev)
279{
280 struct enic *enic = netdev_priv(netdev);
281 return enic->msg_enable;
282}
283
284static void enic_set_msglevel(struct net_device *netdev, u32 value)
285{
286 struct enic *enic = netdev_priv(netdev);
287 enic->msg_enable = value;
288}
289
290static int enic_get_coalesce(struct net_device *netdev,
291 struct ethtool_coalesce *ecmd)
292{
293 struct enic *enic = netdev_priv(netdev);
294
295 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
296 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
297
298 return 0;
299}
300
301static int enic_set_coalesce(struct net_device *netdev,
302 struct ethtool_coalesce *ecmd)
303{
304 struct enic *enic = netdev_priv(netdev);
305 u32 tx_coalesce_usecs;
306 u32 rx_coalesce_usecs;
307 unsigned int i, intr;
308
309 tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
310 vnic_dev_get_intr_coal_timer_max(enic->vdev));
311 rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
312 vnic_dev_get_intr_coal_timer_max(enic->vdev));
313
314 switch (vnic_dev_get_intr_mode(enic->vdev)) {
315 case VNIC_DEV_INTR_MODE_INTX:
316 if (tx_coalesce_usecs != rx_coalesce_usecs)
317 return -EINVAL;
318
319 intr = enic_legacy_io_intr();
320 vnic_intr_coalescing_timer_set(&enic->intr[intr],
321 tx_coalesce_usecs);
322 break;
323 case VNIC_DEV_INTR_MODE_MSI:
324 if (tx_coalesce_usecs != rx_coalesce_usecs)
325 return -EINVAL;
326
327 vnic_intr_coalescing_timer_set(&enic->intr[0],
328 tx_coalesce_usecs);
329 break;
330 case VNIC_DEV_INTR_MODE_MSIX:
331 for (i = 0; i < enic->wq_count; i++) {
332 intr = enic_msix_wq_intr(enic, i);
333 vnic_intr_coalescing_timer_set(&enic->intr[intr],
334 tx_coalesce_usecs);
335 }
336
337 for (i = 0; i < enic->rq_count; i++) {
338 intr = enic_msix_rq_intr(enic, i);
339 vnic_intr_coalescing_timer_set(&enic->intr[intr],
340 rx_coalesce_usecs);
341 }
342
343 break;
344 default:
345 break;
346 }
347
348 enic->tx_coalesce_usecs = tx_coalesce_usecs;
349 enic->rx_coalesce_usecs = rx_coalesce_usecs;
350
351 return 0;
352}
353
354static const struct ethtool_ops enic_ethtool_ops = {
355 .get_settings = enic_get_settings,
356 .get_drvinfo = enic_get_drvinfo,
357 .get_msglevel = enic_get_msglevel,
358 .set_msglevel = enic_set_msglevel,
359 .get_link = ethtool_op_get_link,
360 .get_strings = enic_get_strings,
361 .get_sset_count = enic_get_sset_count,
362 .get_ethtool_stats = enic_get_ethtool_stats,
363 .get_coalesce = enic_get_coalesce,
364 .set_coalesce = enic_set_coalesce,
365};
366
367static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) 99static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
368{ 100{
369 struct enic *enic = vnic_dev_priv(wq->vdev); 101 struct enic *enic = vnic_dev_priv(wq->vdev);
@@ -396,10 +128,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
396 completed_index, enic_wq_free_buf, 128 completed_index, enic_wq_free_buf,
397 opaque); 129 opaque);
398 130
399 if (netif_queue_stopped(enic->netdev) && 131 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
400 vnic_wq_desc_avail(&enic->wq[q_number]) >= 132 vnic_wq_desc_avail(&enic->wq[q_number]) >=
401 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) 133 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
402 netif_wake_queue(enic->netdev); 134 netif_wake_subqueue(enic->netdev, q_number);
403 135
404 spin_unlock(&enic->wq_lock[q_number]); 136 spin_unlock(&enic->wq_lock[q_number]);
405 137
@@ -560,10 +292,15 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data)
560static irqreturn_t enic_isr_msix_wq(int irq, void *data) 292static irqreturn_t enic_isr_msix_wq(int irq, void *data)
561{ 293{
562 struct enic *enic = data; 294 struct enic *enic = data;
563 unsigned int cq = enic_cq_wq(enic, 0); 295 unsigned int cq;
564 unsigned int intr = enic_msix_wq_intr(enic, 0); 296 unsigned int intr;
565 unsigned int wq_work_to_do = -1; /* no limit */ 297 unsigned int wq_work_to_do = -1; /* no limit */
566 unsigned int wq_work_done; 298 unsigned int wq_work_done;
299 unsigned int wq_irq;
300
301 wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector;
302 cq = enic_cq_wq(enic, wq_irq);
303 intr = enic_msix_wq_intr(enic, wq_irq);
567 304
568 wq_work_done = vnic_cq_service(&enic->cq[cq], 305 wq_work_done = vnic_cq_service(&enic->cq[cq],
569 wq_work_to_do, enic_wq_service, NULL); 306 wq_work_to_do, enic_wq_service, NULL);
@@ -779,14 +516,18 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
779 struct net_device *netdev) 516 struct net_device *netdev)
780{ 517{
781 struct enic *enic = netdev_priv(netdev); 518 struct enic *enic = netdev_priv(netdev);
782 struct vnic_wq *wq = &enic->wq[0]; 519 struct vnic_wq *wq;
783 unsigned long flags; 520 unsigned long flags;
521 unsigned int txq_map;
784 522
785 if (skb->len <= 0) { 523 if (skb->len <= 0) {
786 dev_kfree_skb(skb); 524 dev_kfree_skb(skb);
787 return NETDEV_TX_OK; 525 return NETDEV_TX_OK;
788 } 526 }
789 527
528 txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
529 wq = &enic->wq[txq_map];
530
790 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, 531 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
791 * which is very likely. In the off chance it's going to take 532 * which is very likely. In the off chance it's going to take
792 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. 533 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
@@ -799,23 +540,23 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
799 return NETDEV_TX_OK; 540 return NETDEV_TX_OK;
800 } 541 }
801 542
802 spin_lock_irqsave(&enic->wq_lock[0], flags); 543 spin_lock_irqsave(&enic->wq_lock[txq_map], flags);
803 544
804 if (vnic_wq_desc_avail(wq) < 545 if (vnic_wq_desc_avail(wq) <
805 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { 546 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
806 netif_stop_queue(netdev); 547 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
807 /* This is a hard error, log it */ 548 /* This is a hard error, log it */
808 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); 549 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
809 spin_unlock_irqrestore(&enic->wq_lock[0], flags); 550 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
810 return NETDEV_TX_BUSY; 551 return NETDEV_TX_BUSY;
811 } 552 }
812 553
813 enic_queue_wq_skb(enic, wq, skb); 554 enic_queue_wq_skb(enic, wq, skb);
814 555
815 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) 556 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
816 netif_stop_queue(netdev); 557 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
817 558
818 spin_unlock_irqrestore(&enic->wq_lock[0], flags); 559 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
819 560
820 return NETDEV_TX_OK; 561 return NETDEV_TX_OK;
821} 562}
@@ -1293,6 +1034,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1293 1034
1294 skb_put(skb, bytes_written); 1035 skb_put(skb, bytes_written);
1295 skb->protocol = eth_type_trans(skb, netdev); 1036 skb->protocol = eth_type_trans(skb, netdev);
1037 skb_record_rx_queue(skb, q_number);
1038 if (netdev->features & NETIF_F_RXHASH) {
1039 skb->rxhash = rss_hash;
1040 if (rss_type & (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
1041 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
1042 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4))
1043 skb->l4_rxhash = true;
1044 }
1296 1045
1297 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) { 1046 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
1298 skb->csum = htons(checksum); 1047 skb->csum = htons(checksum);
@@ -1637,7 +1386,7 @@ static int enic_open(struct net_device *netdev)
1637 1386
1638 enic_set_rx_mode(netdev); 1387 enic_set_rx_mode(netdev);
1639 1388
1640 netif_wake_queue(netdev); 1389 netif_tx_wake_all_queues(netdev);
1641 1390
1642 for (i = 0; i < enic->rq_count; i++) 1391 for (i = 0; i < enic->rq_count; i++)
1643 napi_enable(&enic->napi[i]); 1392 napi_enable(&enic->napi[i]);
@@ -2001,6 +1750,7 @@ static void enic_reset(struct work_struct *work)
2001 1750
2002 rtnl_lock(); 1751 rtnl_lock();
2003 1752
1753 spin_lock(&enic->enic_api_lock);
2004 enic_dev_hang_notify(enic); 1754 enic_dev_hang_notify(enic);
2005 enic_stop(enic->netdev); 1755 enic_stop(enic->netdev);
2006 enic_dev_hang_reset(enic); 1756 enic_dev_hang_reset(enic);
@@ -2009,6 +1759,8 @@ static void enic_reset(struct work_struct *work)
2009 enic_set_rss_nic_cfg(enic); 1759 enic_set_rss_nic_cfg(enic);
2010 enic_dev_set_ig_vlan_rewrite_mode(enic); 1760 enic_dev_set_ig_vlan_rewrite_mode(enic);
2011 enic_open(enic->netdev); 1761 enic_open(enic->netdev);
1762 spin_unlock(&enic->enic_api_lock);
1763 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
2012 1764
2013 rtnl_unlock(); 1765 rtnl_unlock();
2014} 1766}
@@ -2297,7 +2049,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2297 * instance data is initialized to zero. 2049 * instance data is initialized to zero.
2298 */ 2050 */
2299 2051
2300 netdev = alloc_etherdev(sizeof(struct enic)); 2052 netdev = alloc_etherdev_mqs(sizeof(struct enic),
2053 ENIC_RQ_MAX, ENIC_WQ_MAX);
2301 if (!netdev) 2054 if (!netdev)
2302 return -ENOMEM; 2055 return -ENOMEM;
2303 2056
@@ -2327,11 +2080,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2327 pci_set_master(pdev); 2080 pci_set_master(pdev);
2328 2081
2329 /* Query PCI controller on system for DMA addressing 2082 /* Query PCI controller on system for DMA addressing
2330 * limitation for the device. Try 40-bit first, and 2083 * limitation for the device. Try 64-bit first, and
2331 * fail to 32-bit. 2084 * fail to 32-bit.
2332 */ 2085 */
2333 2086
2334 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); 2087 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2335 if (err) { 2088 if (err) {
2336 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2089 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2337 if (err) { 2090 if (err) {
@@ -2345,10 +2098,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2345 goto err_out_release_regions; 2098 goto err_out_release_regions;
2346 } 2099 }
2347 } else { 2100 } else {
2348 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); 2101 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2349 if (err) { 2102 if (err) {
2350 dev_err(dev, "Unable to obtain %u-bit DMA " 2103 dev_err(dev, "Unable to obtain %u-bit DMA "
2351 "for consistent allocations, aborting\n", 40); 2104 "for consistent allocations, aborting\n", 64);
2352 goto err_out_release_regions; 2105 goto err_out_release_regions;
2353 } 2106 }
2354 using_dac = 1; 2107 using_dac = 1;
@@ -2421,6 +2174,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2421 */ 2174 */
2422 2175
2423 spin_lock_init(&enic->devcmd_lock); 2176 spin_lock_init(&enic->devcmd_lock);
2177 spin_lock_init(&enic->enic_api_lock);
2424 2178
2425 /* 2179 /*
2426 * Set ingress vlan rewrite mode before vnic initialization 2180 * Set ingress vlan rewrite mode before vnic initialization
@@ -2462,6 +2216,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2462 goto err_out_dev_close; 2216 goto err_out_dev_close;
2463 } 2217 }
2464 2218
2219 netif_set_real_num_tx_queues(netdev, enic->wq_count);
2220 netif_set_real_num_rx_queues(netdev, enic->rq_count);
2221
2465 /* Setup notification timer, HW reset task, and wq locks 2222 /* Setup notification timer, HW reset task, and wq locks
2466 */ 2223 */
2467 2224
@@ -2496,7 +2253,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2496 netdev->netdev_ops = &enic_netdev_ops; 2253 netdev->netdev_ops = &enic_netdev_ops;
2497 2254
2498 netdev->watchdog_timeo = 2 * HZ; 2255 netdev->watchdog_timeo = 2 * HZ;
2499 netdev->ethtool_ops = &enic_ethtool_ops; 2256 enic_set_ethtool_ops(netdev);
2500 2257
2501 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 2258 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2502 if (ENIC_SETTING(enic, LOOP)) { 2259 if (ENIC_SETTING(enic, LOOP)) {
@@ -2510,6 +2267,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2510 if (ENIC_SETTING(enic, TSO)) 2267 if (ENIC_SETTING(enic, TSO))
2511 netdev->hw_features |= NETIF_F_TSO | 2268 netdev->hw_features |= NETIF_F_TSO |
2512 NETIF_F_TSO6 | NETIF_F_TSO_ECN; 2269 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2270 if (ENIC_SETTING(enic, RSS))
2271 netdev->hw_features |= NETIF_F_RXHASH;
2513 if (ENIC_SETTING(enic, RXCSUM)) 2272 if (ENIC_SETTING(enic, RXCSUM))
2514 netdev->hw_features |= NETIF_F_RXCSUM; 2273 netdev->hw_features |= NETIF_F_RXCSUM;
2515 2274
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
index 25be2734c3fe..69f60afd6577 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.h
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
@@ -47,6 +47,9 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
47 int offload_mode, int cq_entry, int sop, int eop, int loopback) 47 int offload_mode, int cq_entry, int sop, int eop, int loopback)
48{ 48{
49 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); 49 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
50 u8 desc_skip_cnt = 1;
51 u8 compressed_send = 0;
52 u64 wrid = 0;
50 53
51 wq_enet_desc_enc(desc, 54 wq_enet_desc_enc(desc,
52 (u64)dma_addr | VNIC_PADDR_TARGET, 55 (u64)dma_addr | VNIC_PADDR_TARGET,
@@ -59,7 +62,8 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
59 (u16)vlan_tag, 62 (u16)vlan_tag,
60 (u8)loopback); 63 (u8)loopback);
61 64
62 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); 65 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt,
66 (u8)cq_entry, compressed_send, wrid);
63} 67}
64 68
65static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, 69static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
@@ -120,6 +124,7 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
120 dma_addr_t dma_addr, unsigned int len) 124 dma_addr_t dma_addr, unsigned int len)
121{ 125{
122 struct rq_enet_desc *desc = vnic_rq_next_desc(rq); 126 struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
127 u64 wrid = 0;
123 u8 type = os_buf_index ? 128 u8 type = os_buf_index ?
124 RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP; 129 RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
125 130
@@ -127,7 +132,7 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
127 (u64)dma_addr | VNIC_PADDR_TARGET, 132 (u64)dma_addr | VNIC_PADDR_TARGET,
128 type, (u16)len); 133 type, (u16)len);
129 134
130 vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len); 135 vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid);
131} 136}
132 137
133struct enic; 138struct enic;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 97455c573db5..69dd92598b7e 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -175,6 +175,7 @@ unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
175{ 175{
176 return vdev->res[type].count; 176 return vdev->res[type].count;
177} 177}
178EXPORT_SYMBOL(vnic_dev_get_res_count);
178 179
179void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, 180void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
180 unsigned int index) 181 unsigned int index)
@@ -193,6 +194,7 @@ void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
193 return (char __iomem *)vdev->res[type].vaddr; 194 return (char __iomem *)vdev->res[type].vaddr;
194 } 195 }
195} 196}
197EXPORT_SYMBOL(vnic_dev_get_res);
196 198
197static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, 199static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
198 unsigned int desc_count, unsigned int desc_size) 200 unsigned int desc_count, unsigned int desc_size)
@@ -942,6 +944,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
942 kfree(vdev); 944 kfree(vdev);
943 } 945 }
944} 946}
947EXPORT_SYMBOL(vnic_dev_unregister);
945 948
946struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, 949struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
947 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, 950 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
@@ -969,6 +972,13 @@ err_out:
969 vnic_dev_unregister(vdev); 972 vnic_dev_unregister(vdev);
970 return NULL; 973 return NULL;
971} 974}
975EXPORT_SYMBOL(vnic_dev_register);
976
977struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
978{
979 return vdev->pdev;
980}
981EXPORT_SYMBOL(vnic_dev_get_pdev);
972 982
973int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len) 983int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
974{ 984{
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index f3d9b79ba77e..e670029862a1 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -127,6 +127,7 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
127struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, 127struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
128 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, 128 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
129 unsigned int num_bars); 129 unsigned int num_bars);
130struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev);
130int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len); 131int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len);
131int vnic_dev_enable2(struct vnic_dev *vdev, int active); 132int vnic_dev_enable2(struct vnic_dev *vdev, int active);
132int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status); 133int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index 23d555255cf8..b9a0d78fd639 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -281,11 +281,25 @@ enum vnic_devcmd_cmd {
281 * 0 if no VIF-CONFIG-INFO TLV was ever received. */ 281 * 0 if no VIF-CONFIG-INFO TLV was ever received. */
282 CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44), 282 CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
283 283
284 /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
285 * (u32)a1=INT13_CMD_xxx
286 */
287 CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
288
289 /* Set default vlan:
290 * in: (u16)a0=new default vlan
291 * (u16)a1=zero for overriding vlan with param a0,
292 * non-zero for resetting vlan to the default
293 * out: (u16)a0=old default vlan
294 */
295 CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46),
296
284 /* init_prov_info2: 297 /* init_prov_info2:
285 * Variant of CMD_INIT_PROV_INFO, where it will not try to enable 298 * Variant of CMD_INIT_PROV_INFO, where it will not try to enable
286 * the vnic until CMD_ENABLE2 is issued. 299 * the vnic until CMD_ENABLE2 is issued.
287 * (u64)a0=paddr of vnic_devcmd_provinfo 300 * (u64)a0=paddr of vnic_devcmd_provinfo
288 * (u32)a1=sizeof provision info */ 301 * (u32)a1=sizeof provision info
302 */
289 CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47), 303 CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47),
290 304
291 /* enable2: 305 /* enable2:
@@ -339,16 +353,57 @@ enum vnic_devcmd_cmd {
339 CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50), 353 CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50),
340 354
341 /* 355 /*
342 * cmd_set_mac_addr 356 * Set the predefined mac address as default
343 * set mac address
344 * in: 357 * in:
345 * (u48)a0 = mac addr 358 * (u48)a0 = mac addr
346 *
347 */ 359 */
348 CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55), 360 CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55),
361
362 /* Update the provisioning info of the given VIF
363 * (u64)a0=paddr of vnic_devcmd_provinfo
364 * (u32)a1=sizeof provision info
365 */
366 CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
367
368 /* Add a filter.
369 * in: (u64) a0= filter address
370 * (u32) a1= size of filter
371 * out: (u32) a0=filter identifier
372 */
373 CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58),
374
375 /* Delete a filter.
376 * in: (u32) a0=filter identifier
377 */
378 CMD_DEL_FILTER = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 59),
379
380 /* Enable a Queue Pair in User space NIC
381 * in: (u32) a0=Queue Pair number
382 * (u32) a1= command
383 */
384 CMD_QP_ENABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 60),
385
386 /* Disable a Queue Pair in User space NIC
387 * in: (u32) a0=Queue Pair number
388 * (u32) a1= command
389 */
390 CMD_QP_DISABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 61),
391
392 /* Stats dump Queue Pair in User space NIC
393 * in: (u32) a0=Queue Pair number
394 * (u64) a1=host buffer addr for status dump
395 * (u32) a2=length of the buffer
396 */
397 CMD_QP_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 62),
398
399 /* Clear stats for Queue Pair in User space NIC
400 * in: (u32) a0=Queue Pair number
401 */
402 CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63),
349}; 403};
350 404
351/* CMD_ENABLE2 flags */ 405/* CMD_ENABLE2 flags */
406#define CMD_ENABLE2_STANDBY 0x0
352#define CMD_ENABLE2_ACTIVE 0x1 407#define CMD_ENABLE2_ACTIVE 0x1
353 408
354/* flags for CMD_OPEN */ 409/* flags for CMD_OPEN */
@@ -364,6 +419,9 @@ enum vnic_devcmd_cmd {
364#define CMD_PFILTER_PROMISCUOUS 0x08 419#define CMD_PFILTER_PROMISCUOUS 0x08
365#define CMD_PFILTER_ALL_MULTICAST 0x10 420#define CMD_PFILTER_ALL_MULTICAST 0x10
366 421
422/* Commands for CMD_QP_ENABLE/CM_QP_DISABLE */
423#define CMD_QP_RQWQ 0x0
424
367/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */ 425/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */
368#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0 426#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0
369#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1 427#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1
@@ -390,6 +448,7 @@ enum vnic_devcmd_error {
390 ERR_EMAXRES = 10, 448 ERR_EMAXRES = 10,
391 ERR_ENOTSUPPORTED = 11, 449 ERR_ENOTSUPPORTED = 11,
392 ERR_EINPROGRESS = 12, 450 ERR_EINPROGRESS = 12,
451 ERR_MAX
393}; 452};
394 453
395/* 454/*
@@ -435,6 +494,115 @@ struct vnic_devcmd_provinfo {
435 u8 data[0]; 494 u8 data[0];
436}; 495};
437 496
497/* These are used in flags field of different filters to denote
498 * valid fields used.
499 */
500#define FILTER_FIELD_VALID(fld) (1 << (fld - 1))
501
502#define FILTER_FIELDS_USNIC ( \
503 FILTER_FIELD_VALID(1) | \
504 FILTER_FIELD_VALID(2) | \
505 FILTER_FIELD_VALID(3) | \
506 FILTER_FIELD_VALID(4))
507
508#define FILTER_FIELDS_IPV4_5TUPLE ( \
509 FILTER_FIELD_VALID(1) | \
510 FILTER_FIELD_VALID(2) | \
511 FILTER_FIELD_VALID(3) | \
512 FILTER_FIELD_VALID(4) | \
513 FILTER_FIELD_VALID(5))
514
515#define FILTER_FIELDS_MAC_VLAN ( \
516 FILTER_FIELD_VALID(1) | \
517 FILTER_FIELD_VALID(2))
518
519#define FILTER_FIELD_USNIC_VLAN FILTER_FIELD_VALID(1)
520#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2)
521#define FILTER_FIELD_USNIC_PROTO FILTER_FIELD_VALID(3)
522#define FILTER_FIELD_USNIC_ID FILTER_FIELD_VALID(4)
523
524struct filter_usnic_id {
525 u32 flags;
526 u16 vlan;
527 u16 ethtype;
528 u8 proto_version;
529 u32 usnic_id;
530} __packed;
531
532#define FILTER_FIELD_5TUP_PROTO FILTER_FIELD_VALID(1)
533#define FILTER_FIELD_5TUP_SRC_AD FILTER_FIELD_VALID(2)
534#define FILTER_FIELD_5TUP_DST_AD FILTER_FIELD_VALID(3)
535#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4)
536#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5)
537
538/* Enums for the protocol field. */
539enum protocol_e {
540 PROTO_UDP = 0,
541 PROTO_TCP = 1,
542};
543
544struct filter_ipv4_5tuple {
545 u32 flags;
546 u32 protocol;
547 u32 src_addr;
548 u32 dst_addr;
549 u16 src_port;
550 u16 dst_port;
551} __packed;
552
553#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1)
554#define FILTER_FIELD_VMQ_MAC FILTER_FIELD_VALID(2)
555
556struct filter_mac_vlan {
557 u32 flags;
558 u16 vlan;
559 u8 mac_addr[6];
560} __packed;
561
562/* Specifies the filter_action type. */
563enum {
564 FILTER_ACTION_RQ_STEERING = 0,
565 FILTER_ACTION_MAX
566};
567
568struct filter_action {
569 u32 type;
570 union {
571 u32 rq_idx;
572 } u;
573} __packed;
574
575/* Specifies the filter type. */
576enum filter_type {
577 FILTER_USNIC_ID = 0,
578 FILTER_IPV4_5TUPLE = 1,
579 FILTER_MAC_VLAN = 2,
580 FILTER_MAX
581};
582
583struct filter {
584 u32 type;
585 union {
586 struct filter_usnic_id usnic;
587 struct filter_ipv4_5tuple ipv4;
588 struct filter_mac_vlan mac_vlan;
589 } u;
590} __packed;
591
592enum {
593 CLSF_TLV_FILTER = 0,
594 CLSF_TLV_ACTION = 1,
595};
596
597/* Maximum size of buffer to CMD_ADD_FILTER */
598#define FILTER_MAX_BUF_SIZE 100
599
600struct filter_tlv {
601 u_int32_t type;
602 u_int32_t length;
603 u_int32_t val[0];
604};
605
438/* 606/*
439 * Writing cmd register causes STAT_BUSY to get set in status register. 607 * Writing cmd register causes STAT_BUSY to get set in status register.
440 * When cmd completes, STAT_BUSY will be cleared. 608 * When cmd completes, STAT_BUSY will be cleared.
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index 7e1488fc8ab2..36a2ed606c91 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -30,12 +30,9 @@
30static int vnic_rq_alloc_bufs(struct vnic_rq *rq) 30static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
31{ 31{
32 struct vnic_rq_buf *buf; 32 struct vnic_rq_buf *buf;
33 struct vnic_dev *vdev;
34 unsigned int i, j, count = rq->ring.desc_count; 33 unsigned int i, j, count = rq->ring.desc_count;
35 unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count); 34 unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
36 35
37 vdev = rq->vdev;
38
39 for (i = 0; i < blks; i++) { 36 for (i = 0; i < blks; i++) {
40 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC); 37 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
41 if (!rq->bufs[i]) 38 if (!rq->bufs[i])
@@ -141,7 +138,7 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
141 unsigned int error_interrupt_enable, 138 unsigned int error_interrupt_enable,
142 unsigned int error_interrupt_offset) 139 unsigned int error_interrupt_offset)
143{ 140{
144 u32 fetch_index; 141 u32 fetch_index = 0;
145 142
146 /* Use current fetch_index as the ring starting point */ 143 /* Use current fetch_index as the ring starting point */
147 fetch_index = ioread32(&rq->ctrl->fetch_index); 144 fetch_index = ioread32(&rq->ctrl->fetch_index);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index 2056586f4d4b..ee7bc95af278 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -72,6 +72,7 @@ struct vnic_rq_buf {
72 unsigned int len; 72 unsigned int len;
73 unsigned int index; 73 unsigned int index;
74 void *desc; 74 void *desc;
75 uint64_t wr_id;
75}; 76};
76 77
77struct vnic_rq { 78struct vnic_rq {
@@ -110,7 +111,8 @@ static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
110 111
111static inline void vnic_rq_post(struct vnic_rq *rq, 112static inline void vnic_rq_post(struct vnic_rq *rq,
112 void *os_buf, unsigned int os_buf_index, 113 void *os_buf, unsigned int os_buf_index,
113 dma_addr_t dma_addr, unsigned int len) 114 dma_addr_t dma_addr, unsigned int len,
115 uint64_t wrid)
114{ 116{
115 struct vnic_rq_buf *buf = rq->to_use; 117 struct vnic_rq_buf *buf = rq->to_use;
116 118
@@ -118,6 +120,7 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
118 buf->os_buf_index = os_buf_index; 120 buf->os_buf_index = os_buf_index;
119 buf->dma_addr = dma_addr; 121 buf->dma_addr = dma_addr;
120 buf->len = len; 122 buf->len = len;
123 buf->wr_id = wrid;
121 124
122 buf = buf->next; 125 buf = buf->next;
123 rq->to_use = buf; 126 rq->to_use = buf;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index 5e0d7a2be9bc..3e6b8d54dafc 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -30,12 +30,9 @@
30static int vnic_wq_alloc_bufs(struct vnic_wq *wq) 30static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
31{ 31{
32 struct vnic_wq_buf *buf; 32 struct vnic_wq_buf *buf;
33 struct vnic_dev *vdev;
34 unsigned int i, j, count = wq->ring.desc_count; 33 unsigned int i, j, count = wq->ring.desc_count;
35 unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); 34 unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
36 35
37 vdev = wq->vdev;
38
39 for (i = 0; i < blks; i++) { 36 for (i = 0; i < blks; i++) {
40 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); 37 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
41 if (!wq->bufs[i]) 38 if (!wq->bufs[i])
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 7dd937ac11c2..2c6c70804a39 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -58,6 +58,10 @@ struct vnic_wq_buf {
58 unsigned int index; 58 unsigned int index;
59 int sop; 59 int sop;
60 void *desc; 60 void *desc;
61 uint64_t wr_id; /* Cookie */
62 uint8_t cq_entry; /* Gets completion event from hw */
63 uint8_t desc_skip_cnt; /* Num descs to occupy */
64 uint8_t compressed_send; /* Both hdr and payload in one desc */
61}; 65};
62 66
63/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */ 67/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
@@ -102,14 +106,20 @@ static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
102 106
103static inline void vnic_wq_post(struct vnic_wq *wq, 107static inline void vnic_wq_post(struct vnic_wq *wq,
104 void *os_buf, dma_addr_t dma_addr, 108 void *os_buf, dma_addr_t dma_addr,
105 unsigned int len, int sop, int eop) 109 unsigned int len, int sop, int eop,
110 uint8_t desc_skip_cnt, uint8_t cq_entry,
111 uint8_t compressed_send, uint64_t wrid)
106{ 112{
107 struct vnic_wq_buf *buf = wq->to_use; 113 struct vnic_wq_buf *buf = wq->to_use;
108 114
109 buf->sop = sop; 115 buf->sop = sop;
116 buf->cq_entry = cq_entry;
117 buf->compressed_send = compressed_send;
118 buf->desc_skip_cnt = desc_skip_cnt;
110 buf->os_buf = eop ? os_buf : NULL; 119 buf->os_buf = eop ? os_buf : NULL;
111 buf->dma_addr = dma_addr; 120 buf->dma_addr = dma_addr;
112 buf->len = len; 121 buf->len = len;
122 buf->wr_id = wrid;
113 123
114 buf = buf->next; 124 buf = buf->next;
115 if (eop) { 125 if (eop) {
@@ -123,7 +133,7 @@ static inline void vnic_wq_post(struct vnic_wq *wq,
123 } 133 }
124 wq->to_use = buf; 134 wq->to_use = buf;
125 135
126 wq->ring.desc_avail--; 136 wq->ring.desc_avail -= desc_skip_cnt;
127} 137}
128 138
129static inline void vnic_wq_service(struct vnic_wq *wq, 139static inline void vnic_wq_service(struct vnic_wq *wq,
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index a13b312b50f2..5f5896e522d2 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1384,7 +1384,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
1384static int 1384static int
1385dm9000_probe(struct platform_device *pdev) 1385dm9000_probe(struct platform_device *pdev)
1386{ 1386{
1387 struct dm9000_plat_data *pdata = pdev->dev.platform_data; 1387 struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
1388 struct board_info *db; /* Point a board information structure */ 1388 struct board_info *db; /* Point a board information structure */
1389 struct net_device *ndev; 1389 struct net_device *ndev;
1390 const unsigned char *mac_src; 1390 const unsigned char *mac_src;
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 4c830030fb06..2db6c573cec7 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -2319,7 +2319,7 @@ static void de4x5_pci_remove(struct pci_dev *pdev)
2319 struct net_device *dev; 2319 struct net_device *dev;
2320 u_long iobase; 2320 u_long iobase;
2321 2321
2322 dev = dev_get_drvdata(&pdev->dev); 2322 dev = pci_get_drvdata(pdev);
2323 iobase = dev->base_addr; 2323 iobase = dev->base_addr;
2324 2324
2325 unregister_netdev (dev); 2325 unregister_netdev (dev);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index c94152f1c6be..4e8cfa2ac803 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1304,7 +1304,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1304{ 1304{
1305 struct tulip_private *tp; 1305 struct tulip_private *tp;
1306 /* See note below on the multiport cards. */ 1306 /* See note below on the multiport cards. */
1307 static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'}; 1307 static unsigned char last_phys_addr[ETH_ALEN] = {
1308 0x00, 'L', 'i', 'n', 'u', 'x'
1309 };
1308 static int last_irq; 1310 static int last_irq;
1309 static int multiport_cnt; /* For four-port boards w/one EEPROM */ 1311 static int multiport_cnt; /* For four-port boards w/one EEPROM */
1310 int i, irq; 1312 int i, irq;
@@ -1627,8 +1629,8 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1627 dev->dev_addr[i] = last_phys_addr[i] + 1; 1629 dev->dev_addr[i] = last_phys_addr[i] + 1;
1628#if defined(CONFIG_SPARC) 1630#if defined(CONFIG_SPARC)
1629 addr = of_get_property(dp, "local-mac-address", &len); 1631 addr = of_get_property(dp, "local-mac-address", &len);
1630 if (addr && len == 6) 1632 if (addr && len == ETH_ALEN)
1631 memcpy(dev->dev_addr, addr, 6); 1633 memcpy(dev->dev_addr, addr, ETH_ALEN);
1632#endif 1634#endif
1633#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ 1635#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
1634 if (last_irq) 1636 if (last_irq)
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 50d9c6315930..bf3bf6f22c99 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -469,6 +469,17 @@ static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
469 } 469 }
470} 470}
471 471
472#ifdef CONFIG_NET_POLL_CONTROLLER
473static void sundance_poll_controller(struct net_device *dev)
474{
475 struct netdev_private *np = netdev_priv(dev);
476
477 disable_irq(np->pci_dev->irq);
478 intr_handler(np->pci_dev->irq, dev);
479 enable_irq(np->pci_dev->irq);
480}
481#endif
482
472static const struct net_device_ops netdev_ops = { 483static const struct net_device_ops netdev_ops = {
473 .ndo_open = netdev_open, 484 .ndo_open = netdev_open,
474 .ndo_stop = netdev_close, 485 .ndo_stop = netdev_close,
@@ -480,6 +491,9 @@ static const struct net_device_ops netdev_ops = {
480 .ndo_change_mtu = change_mtu, 491 .ndo_change_mtu = change_mtu,
481 .ndo_set_mac_address = sundance_set_mac_addr, 492 .ndo_set_mac_address = sundance_set_mac_addr,
482 .ndo_validate_addr = eth_validate_addr, 493 .ndo_validate_addr = eth_validate_addr,
494#ifdef CONFIG_NET_POLL_CONTROLLER
495 .ndo_poll_controller = sundance_poll_controller,
496#endif
483}; 497};
484 498
485static int sundance_probe1(struct pci_dev *pdev, 499static int sundance_probe1(struct pci_dev *pdev,
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index c827b1b6b1ce..ace5050dba38 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
34#include "be_hw.h" 34#include "be_hw.h"
35#include "be_roce.h" 35#include "be_roce.h"
36 36
37#define DRV_VER "4.6.62.0u" 37#define DRV_VER "4.9.134.0u"
38#define DRV_NAME "be2net" 38#define DRV_NAME "be2net"
39#define BE_NAME "Emulex BladeEngine2" 39#define BE_NAME "Emulex BladeEngine2"
40#define BE3_NAME "Emulex BladeEngine3" 40#define BE3_NAME "Emulex BladeEngine3"
@@ -99,14 +99,18 @@ static inline char *nic_name(struct pci_dev *pdev)
99#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */ 99#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
100#define MCC_CQ_LEN 256 100#define MCC_CQ_LEN 256
101 101
102#define BE3_MAX_RSS_QS 8
103#define BE2_MAX_RSS_QS 4 102#define BE2_MAX_RSS_QS 4
104#define MAX_RSS_QS BE3_MAX_RSS_QS 103#define BE3_MAX_RSS_QS 16
105#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */ 104#define BE3_MAX_TX_QS 16
105#define BE3_MAX_EVT_QS 16
106
107#define MAX_RX_QS 32
108#define MAX_EVT_QS 32
109#define MAX_TX_QS 32
106 110
107#define MAX_TX_QS 8
108#define MAX_ROCE_EQS 5 111#define MAX_ROCE_EQS 5
109#define MAX_MSIX_VECTORS (MAX_RSS_QS + MAX_ROCE_EQS) /* RSS qs + RoCE */ 112#define MAX_MSIX_VECTORS 32
113#define MIN_MSIX_VECTORS 1
110#define BE_TX_BUDGET 256 114#define BE_TX_BUDGET 256
111#define BE_NAPI_WEIGHT 64 115#define BE_NAPI_WEIGHT 64
112#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ 116#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
@@ -189,6 +193,7 @@ struct be_eq_obj {
189 u32 cur_eqd; /* in usecs */ 193 u32 cur_eqd; /* in usecs */
190 194
191 u8 idx; /* array index */ 195 u8 idx; /* array index */
196 u8 msix_idx;
192 u16 tx_budget; 197 u16 tx_budget;
193 u16 spurious_intr; 198 u16 spurious_intr;
194 struct napi_struct napi; 199 struct napi_struct napi;
@@ -352,6 +357,18 @@ struct phy_info {
352 u32 supported; 357 u32 supported;
353}; 358};
354 359
360struct be_resources {
361 u16 max_vfs; /* Total VFs "really" supported by FW/HW */
362 u16 max_mcast_mac;
363 u16 max_tx_qs;
364 u16 max_rss_qs;
365 u16 max_rx_qs;
366 u16 max_uc_mac; /* Max UC MACs programmable */
367 u16 max_vlans; /* Number of vlans supported */
368 u16 max_evt_qs;
369 u32 if_cap_flags;
370};
371
355struct be_adapter { 372struct be_adapter {
356 struct pci_dev *pdev; 373 struct pci_dev *pdev;
357 struct net_device *netdev; 374 struct net_device *netdev;
@@ -369,18 +386,19 @@ struct be_adapter {
369 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ 386 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
370 spinlock_t mcc_cq_lock; 387 spinlock_t mcc_cq_lock;
371 388
372 u32 num_msix_vec; 389 u16 cfg_num_qs; /* configured via set-channels */
373 u32 num_evt_qs; 390 u16 num_evt_qs;
374 struct be_eq_obj eq_obj[MAX_MSIX_VECTORS]; 391 u16 num_msix_vec;
392 struct be_eq_obj eq_obj[MAX_EVT_QS];
375 struct msix_entry msix_entries[MAX_MSIX_VECTORS]; 393 struct msix_entry msix_entries[MAX_MSIX_VECTORS];
376 bool isr_registered; 394 bool isr_registered;
377 395
378 /* TX Rings */ 396 /* TX Rings */
379 u32 num_tx_qs; 397 u16 num_tx_qs;
380 struct be_tx_obj tx_obj[MAX_TX_QS]; 398 struct be_tx_obj tx_obj[MAX_TX_QS];
381 399
382 /* Rx rings */ 400 /* Rx rings */
383 u32 num_rx_qs; 401 u16 num_rx_qs;
384 struct be_rx_obj rx_obj[MAX_RX_QS]; 402 struct be_rx_obj rx_obj[MAX_RX_QS];
385 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 403 u32 big_page_size; /* Compounded page size shared by rx wrbs */
386 404
@@ -430,8 +448,8 @@ struct be_adapter {
430 u32 flash_status; 448 u32 flash_status;
431 struct completion flash_compl; 449 struct completion flash_compl;
432 450
433 u32 num_vfs; /* Number of VFs provisioned by PF driver */ 451 struct be_resources res; /* resources available for the func */
434 u32 dev_num_vfs; /* Number of VFs supported by HW */ 452 u16 num_vfs; /* Number of VFs provisioned by PF */
435 u8 virtfn; 453 u8 virtfn;
436 struct be_vf_cfg *vf_cfg; 454 struct be_vf_cfg *vf_cfg;
437 bool be3_native; 455 bool be3_native;
@@ -446,21 +464,13 @@ struct be_adapter {
446 u16 qnq_vid; 464 u16 qnq_vid;
447 u32 msg_enable; 465 u32 msg_enable;
448 int be_get_temp_freq; 466 int be_get_temp_freq;
449 u16 max_mcast_mac;
450 u16 max_tx_queues;
451 u16 max_rss_queues;
452 u16 max_rx_queues;
453 u16 max_pmac_cnt;
454 u16 max_vlans;
455 u16 max_event_queues;
456 u32 if_cap_flags;
457 u8 pf_number; 467 u8 pf_number;
458 u64 rss_flags; 468 u64 rss_flags;
459}; 469};
460 470
461#define be_physfn(adapter) (!adapter->virtfn) 471#define be_physfn(adapter) (!adapter->virtfn)
462#define sriov_enabled(adapter) (adapter->num_vfs > 0) 472#define sriov_enabled(adapter) (adapter->num_vfs > 0)
463#define sriov_want(adapter) (adapter->dev_num_vfs && num_vfs && \ 473#define sriov_want(adapter) (be_max_vfs(adapter) && num_vfs && \
464 be_physfn(adapter)) 474 be_physfn(adapter))
465#define for_all_vfs(adapter, vf_cfg, i) \ 475#define for_all_vfs(adapter, vf_cfg, i) \
466 for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \ 476 for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
@@ -469,6 +479,26 @@ struct be_adapter {
469#define ON 1 479#define ON 1
470#define OFF 0 480#define OFF 0
471 481
482#define be_max_vlans(adapter) (adapter->res.max_vlans)
483#define be_max_uc(adapter) (adapter->res.max_uc_mac)
484#define be_max_mc(adapter) (adapter->res.max_mcast_mac)
485#define be_max_vfs(adapter) (adapter->res.max_vfs)
486#define be_max_rss(adapter) (adapter->res.max_rss_qs)
487#define be_max_txqs(adapter) (adapter->res.max_tx_qs)
488#define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs)
489#define be_max_rxqs(adapter) (adapter->res.max_rx_qs)
490#define be_max_eqs(adapter) (adapter->res.max_evt_qs)
491#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags)
492
493static inline u16 be_max_qs(struct be_adapter *adapter)
494{
495 /* If no RSS, need atleast the one def RXQ */
496 u16 num = max_t(u16, be_max_rss(adapter), 1);
497
498 num = min(num, be_max_eqs(adapter));
499 return min_t(u16, num, num_online_cpus());
500}
501
472#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \ 502#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
473 adapter->pdev->device == OC_DEVICE_ID4) 503 adapter->pdev->device == OC_DEVICE_ID4)
474 504
@@ -672,6 +702,8 @@ extern int be_load_fw(struct be_adapter *adapter, u8 *func);
672extern bool be_is_wol_supported(struct be_adapter *adapter); 702extern bool be_is_wol_supported(struct be_adapter *adapter);
673extern bool be_pause_supported(struct be_adapter *adapter); 703extern bool be_pause_supported(struct be_adapter *adapter);
674extern u32 be_get_fw_log_level(struct be_adapter *adapter); 704extern u32 be_get_fw_log_level(struct be_adapter *adapter);
705int be_update_queues(struct be_adapter *adapter);
706int be_poll(struct napi_struct *napi, int budget);
675 707
676/* 708/*
677 * internal function to initialize-cleanup roce device. 709 * internal function to initialize-cleanup roce device.
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 8ec5d74ad44d..1ab5dab11eff 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -258,7 +258,8 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
258 (struct be_async_event_grp5_pvid_state *)evt); 258 (struct be_async_event_grp5_pvid_state *)evt);
259 break; 259 break;
260 default: 260 default:
261 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n"); 261 dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
262 event_type);
262 break; 263 break;
263 } 264 }
264} 265}
@@ -279,7 +280,8 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
279 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD; 280 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
280 break; 281 break;
281 default: 282 default:
282 dev_warn(&adapter->pdev->dev, "Unknown debug event\n"); 283 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
284 event_type);
283 break; 285 break;
284 } 286 }
285} 287}
@@ -631,6 +633,12 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
631 return &wrb->payload.sgl[0]; 633 return &wrb->payload.sgl[0];
632} 634}
633 635
636static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
637 unsigned long addr)
638{
639 wrb->tag0 = addr & 0xFFFFFFFF;
640 wrb->tag1 = upper_32_bits(addr);
641}
634 642
635/* Don't touch the hdr after it's prepared */ 643/* Don't touch the hdr after it's prepared */
636/* mem will be NULL for embedded commands */ 644/* mem will be NULL for embedded commands */
@@ -639,17 +647,12 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
639 struct be_mcc_wrb *wrb, struct be_dma_mem *mem) 647 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
640{ 648{
641 struct be_sge *sge; 649 struct be_sge *sge;
642 unsigned long addr = (unsigned long)req_hdr;
643 u64 req_addr = addr;
644 650
645 req_hdr->opcode = opcode; 651 req_hdr->opcode = opcode;
646 req_hdr->subsystem = subsystem; 652 req_hdr->subsystem = subsystem;
647 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 653 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
648 req_hdr->version = 0; 654 req_hdr->version = 0;
649 655 fill_wrb_tags(wrb, (ulong) req_hdr);
650 wrb->tag0 = req_addr & 0xFFFFFFFF;
651 wrb->tag1 = upper_32_bits(req_addr);
652
653 wrb->payload_length = cmd_len; 656 wrb->payload_length = cmd_len;
654 if (mem) { 657 if (mem) {
655 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) << 658 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
@@ -676,31 +679,6 @@ static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
676 } 679 }
677} 680}
678 681
679/* Converts interrupt delay in microseconds to multiplier value */
680static u32 eq_delay_to_mult(u32 usec_delay)
681{
682#define MAX_INTR_RATE 651042
683 const u32 round = 10;
684 u32 multiplier;
685
686 if (usec_delay == 0)
687 multiplier = 0;
688 else {
689 u32 interrupt_rate = 1000000 / usec_delay;
690 /* Max delay, corresponding to the lowest interrupt rate */
691 if (interrupt_rate == 0)
692 multiplier = 1023;
693 else {
694 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
695 multiplier /= interrupt_rate;
696 /* Round the multiplier to the closest value.*/
697 multiplier = (multiplier + round/2) / round;
698 multiplier = min(multiplier, (u32)1023);
699 }
700 }
701 return multiplier;
702}
703
704static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter) 682static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
705{ 683{
706 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 684 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
@@ -728,6 +706,78 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
728 return wrb; 706 return wrb;
729} 707}
730 708
709static bool use_mcc(struct be_adapter *adapter)
710{
711 return adapter->mcc_obj.q.created;
712}
713
714/* Must be used only in process context */
715static int be_cmd_lock(struct be_adapter *adapter)
716{
717 if (use_mcc(adapter)) {
718 spin_lock_bh(&adapter->mcc_lock);
719 return 0;
720 } else {
721 return mutex_lock_interruptible(&adapter->mbox_lock);
722 }
723}
724
725/* Must be used only in process context */
726static void be_cmd_unlock(struct be_adapter *adapter)
727{
728 if (use_mcc(adapter))
729 spin_unlock_bh(&adapter->mcc_lock);
730 else
731 return mutex_unlock(&adapter->mbox_lock);
732}
733
734static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
735 struct be_mcc_wrb *wrb)
736{
737 struct be_mcc_wrb *dest_wrb;
738
739 if (use_mcc(adapter)) {
740 dest_wrb = wrb_from_mccq(adapter);
741 if (!dest_wrb)
742 return NULL;
743 } else {
744 dest_wrb = wrb_from_mbox(adapter);
745 }
746
747 memcpy(dest_wrb, wrb, sizeof(*wrb));
748 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
749 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
750
751 return dest_wrb;
752}
753
754/* Must be used only in process context */
755static int be_cmd_notify_wait(struct be_adapter *adapter,
756 struct be_mcc_wrb *wrb)
757{
758 struct be_mcc_wrb *dest_wrb;
759 int status;
760
761 status = be_cmd_lock(adapter);
762 if (status)
763 return status;
764
765 dest_wrb = be_cmd_copy(adapter, wrb);
766 if (!dest_wrb)
767 return -EBUSY;
768
769 if (use_mcc(adapter))
770 status = be_mcc_notify_wait(adapter);
771 else
772 status = be_mbox_notify_wait(adapter);
773
774 if (!status)
775 memcpy(wrb, dest_wrb, sizeof(*wrb));
776
777 be_cmd_unlock(adapter);
778 return status;
779}
780
731/* Tell fw we're about to start firing cmds by writing a 781/* Tell fw we're about to start firing cmds by writing a
732 * special pattern across the wrb hdr; uses mbox 782 * special pattern across the wrb hdr; uses mbox
733 */ 783 */
@@ -788,13 +838,12 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
788 return status; 838 return status;
789} 839}
790 840
791int be_cmd_eq_create(struct be_adapter *adapter, 841int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
792 struct be_queue_info *eq, int eq_delay)
793{ 842{
794 struct be_mcc_wrb *wrb; 843 struct be_mcc_wrb *wrb;
795 struct be_cmd_req_eq_create *req; 844 struct be_cmd_req_eq_create *req;
796 struct be_dma_mem *q_mem = &eq->dma_mem; 845 struct be_dma_mem *q_mem = &eqo->q.dma_mem;
797 int status; 846 int status, ver = 0;
798 847
799 if (mutex_lock_interruptible(&adapter->mbox_lock)) 848 if (mutex_lock_interruptible(&adapter->mbox_lock))
800 return -1; 849 return -1;
@@ -805,15 +854,18 @@ int be_cmd_eq_create(struct be_adapter *adapter,
805 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 854 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
806 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL); 855 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
807 856
857 /* Support for EQ_CREATEv2 available only SH-R onwards */
858 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
859 ver = 2;
860
861 req->hdr.version = ver;
808 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 862 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
809 863
810 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); 864 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
811 /* 4byte eqe*/ 865 /* 4byte eqe*/
812 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); 866 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
813 AMAP_SET_BITS(struct amap_eq_context, count, req->context, 867 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
814 __ilog2_u32(eq->len/256)); 868 __ilog2_u32(eqo->q.len / 256));
815 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
816 eq_delay_to_mult(eq_delay));
817 be_dws_cpu_to_le(req->context, sizeof(req->context)); 869 be_dws_cpu_to_le(req->context, sizeof(req->context));
818 870
819 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 871 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -821,8 +873,10 @@ int be_cmd_eq_create(struct be_adapter *adapter,
821 status = be_mbox_notify_wait(adapter); 873 status = be_mbox_notify_wait(adapter);
822 if (!status) { 874 if (!status) {
823 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); 875 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
824 eq->id = le16_to_cpu(resp->eq_id); 876 eqo->q.id = le16_to_cpu(resp->eq_id);
825 eq->created = true; 877 eqo->msix_idx =
878 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
879 eqo->q.created = true;
826 } 880 }
827 881
828 mutex_unlock(&adapter->mbox_lock); 882 mutex_unlock(&adapter->mbox_lock);
@@ -1010,9 +1064,9 @@ static u32 be_encoded_q_len(int q_len)
1010 return len_encoded; 1064 return len_encoded;
1011} 1065}
1012 1066
1013int be_cmd_mccq_ext_create(struct be_adapter *adapter, 1067static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1014 struct be_queue_info *mccq, 1068 struct be_queue_info *mccq,
1015 struct be_queue_info *cq) 1069 struct be_queue_info *cq)
1016{ 1070{
1017 struct be_mcc_wrb *wrb; 1071 struct be_mcc_wrb *wrb;
1018 struct be_cmd_req_mcc_ext_create *req; 1072 struct be_cmd_req_mcc_ext_create *req;
@@ -1068,9 +1122,9 @@ int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1068 return status; 1122 return status;
1069} 1123}
1070 1124
1071int be_cmd_mccq_org_create(struct be_adapter *adapter, 1125static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1072 struct be_queue_info *mccq, 1126 struct be_queue_info *mccq,
1073 struct be_queue_info *cq) 1127 struct be_queue_info *cq)
1074{ 1128{
1075 struct be_mcc_wrb *wrb; 1129 struct be_mcc_wrb *wrb;
1076 struct be_cmd_req_mcc_create *req; 1130 struct be_cmd_req_mcc_create *req;
@@ -1128,25 +1182,16 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
1128 1182
1129int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) 1183int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1130{ 1184{
1131 struct be_mcc_wrb *wrb; 1185 struct be_mcc_wrb wrb = {0};
1132 struct be_cmd_req_eth_tx_create *req; 1186 struct be_cmd_req_eth_tx_create *req;
1133 struct be_queue_info *txq = &txo->q; 1187 struct be_queue_info *txq = &txo->q;
1134 struct be_queue_info *cq = &txo->cq; 1188 struct be_queue_info *cq = &txo->cq;
1135 struct be_dma_mem *q_mem = &txq->dma_mem; 1189 struct be_dma_mem *q_mem = &txq->dma_mem;
1136 int status, ver = 0; 1190 int status, ver = 0;
1137 1191
1138 spin_lock_bh(&adapter->mcc_lock); 1192 req = embedded_payload(&wrb);
1139
1140 wrb = wrb_from_mccq(adapter);
1141 if (!wrb) {
1142 status = -EBUSY;
1143 goto err;
1144 }
1145
1146 req = embedded_payload(wrb);
1147
1148 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1193 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1149 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL); 1194 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1150 1195
1151 if (lancer_chip(adapter)) { 1196 if (lancer_chip(adapter)) {
1152 req->hdr.version = 1; 1197 req->hdr.version = 1;
@@ -1164,12 +1209,11 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1164 req->cq_id = cpu_to_le16(cq->id); 1209 req->cq_id = cpu_to_le16(cq->id);
1165 req->queue_size = be_encoded_q_len(txq->len); 1210 req->queue_size = be_encoded_q_len(txq->len);
1166 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1211 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1167
1168 ver = req->hdr.version; 1212 ver = req->hdr.version;
1169 1213
1170 status = be_mcc_notify_wait(adapter); 1214 status = be_cmd_notify_wait(adapter, &wrb);
1171 if (!status) { 1215 if (!status) {
1172 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); 1216 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1173 txq->id = le16_to_cpu(resp->cid); 1217 txq->id = le16_to_cpu(resp->cid);
1174 if (ver == 2) 1218 if (ver == 2)
1175 txo->db_offset = le32_to_cpu(resp->db_offset); 1219 txo->db_offset = le32_to_cpu(resp->db_offset);
@@ -1178,9 +1222,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1178 txq->created = true; 1222 txq->created = true;
1179 } 1223 }
1180 1224
1181err:
1182 spin_unlock_bh(&adapter->mcc_lock);
1183
1184 return status; 1225 return status;
1185} 1226}
1186 1227
@@ -1309,40 +1350,32 @@ err:
1309} 1350}
1310 1351
1311/* Create an rx filtering policy configuration on an i/f 1352/* Create an rx filtering policy configuration on an i/f
1312 * Uses MCCQ 1353 * Will use MBOX only if MCCQ has not been created.
1313 */ 1354 */
1314int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, 1355int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1315 u32 *if_handle, u32 domain) 1356 u32 *if_handle, u32 domain)
1316{ 1357{
1317 struct be_mcc_wrb *wrb; 1358 struct be_mcc_wrb wrb = {0};
1318 struct be_cmd_req_if_create *req; 1359 struct be_cmd_req_if_create *req;
1319 int status; 1360 int status;
1320 1361
1321 spin_lock_bh(&adapter->mcc_lock); 1362 req = embedded_payload(&wrb);
1322
1323 wrb = wrb_from_mccq(adapter);
1324 if (!wrb) {
1325 status = -EBUSY;
1326 goto err;
1327 }
1328 req = embedded_payload(wrb);
1329
1330 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1363 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1331 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL); 1364 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL);
1332 req->hdr.domain = domain; 1365 req->hdr.domain = domain;
1333 req->capability_flags = cpu_to_le32(cap_flags); 1366 req->capability_flags = cpu_to_le32(cap_flags);
1334 req->enable_flags = cpu_to_le32(en_flags); 1367 req->enable_flags = cpu_to_le32(en_flags);
1335
1336 req->pmac_invalid = true; 1368 req->pmac_invalid = true;
1337 1369
1338 status = be_mcc_notify_wait(adapter); 1370 status = be_cmd_notify_wait(adapter, &wrb);
1339 if (!status) { 1371 if (!status) {
1340 struct be_cmd_resp_if_create *resp = embedded_payload(wrb); 1372 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1341 *if_handle = le32_to_cpu(resp->interface_id); 1373 *if_handle = le32_to_cpu(resp->interface_id);
1342 }
1343 1374
1344err: 1375 /* Hack to retrieve VF's pmac-id on BE3 */
1345 spin_unlock_bh(&adapter->mcc_lock); 1376 if (BE3_chip(adapter) && !be_physfn(adapter))
1377 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1378 }
1346 return status; 1379 return status;
1347} 1380}
1348 1381
@@ -1460,6 +1493,12 @@ static int be_mac_to_link_speed(int mac_speed)
1460 return 1000; 1493 return 1000;
1461 case PHY_LINK_SPEED_10GBPS: 1494 case PHY_LINK_SPEED_10GBPS:
1462 return 10000; 1495 return 10000;
1496 case PHY_LINK_SPEED_20GBPS:
1497 return 20000;
1498 case PHY_LINK_SPEED_25GBPS:
1499 return 25000;
1500 case PHY_LINK_SPEED_40GBPS:
1501 return 40000;
1463 } 1502 }
1464 return 0; 1503 return 0;
1465} 1504}
@@ -1520,7 +1559,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
1520{ 1559{
1521 struct be_mcc_wrb *wrb; 1560 struct be_mcc_wrb *wrb;
1522 struct be_cmd_req_get_cntl_addnl_attribs *req; 1561 struct be_cmd_req_get_cntl_addnl_attribs *req;
1523 int status; 1562 int status = 0;
1524 1563
1525 spin_lock_bh(&adapter->mcc_lock); 1564 spin_lock_bh(&adapter->mcc_lock);
1526 1565
@@ -1785,8 +1824,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1785 */ 1824 */
1786 req->if_flags_mask |= 1825 req->if_flags_mask |=
1787 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS & 1826 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1788 adapter->if_cap_flags); 1827 be_if_cap_flags(adapter));
1789
1790 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); 1828 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1791 netdev_for_each_mc_addr(ha, adapter->netdev) 1829 netdev_for_each_mc_addr(ha, adapter->netdev)
1792 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); 1830 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
@@ -2444,6 +2482,12 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2444 le16_to_cpu(resp_phy_info->fixed_speeds_supported); 2482 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2445 adapter->phy.misc_params = 2483 adapter->phy.misc_params =
2446 le32_to_cpu(resp_phy_info->misc_params); 2484 le32_to_cpu(resp_phy_info->misc_params);
2485
2486 if (BE2_chip(adapter)) {
2487 adapter->phy.fixed_speeds_supported =
2488 BE_SUPPORTED_SPEED_10GBPS |
2489 BE_SUPPORTED_SPEED_1GBPS;
2490 }
2447 } 2491 }
2448 pci_free_consistent(adapter->pdev, cmd.size, 2492 pci_free_consistent(adapter->pdev, cmd.size,
2449 cmd.va, cmd.dma); 2493 cmd.va, cmd.dma);
@@ -2606,9 +2650,44 @@ err:
2606 return status; 2650 return status;
2607} 2651}
2608 2652
2609/* Uses synchronous MCCQ */ 2653/* Set privilege(s) for a function */
2654int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2655 u32 domain)
2656{
2657 struct be_mcc_wrb *wrb;
2658 struct be_cmd_req_set_fn_privileges *req;
2659 int status;
2660
2661 spin_lock_bh(&adapter->mcc_lock);
2662
2663 wrb = wrb_from_mccq(adapter);
2664 if (!wrb) {
2665 status = -EBUSY;
2666 goto err;
2667 }
2668
2669 req = embedded_payload(wrb);
2670 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2671 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
2672 wrb, NULL);
2673 req->hdr.domain = domain;
2674 if (lancer_chip(adapter))
2675 req->privileges_lancer = cpu_to_le32(privileges);
2676 else
2677 req->privileges = cpu_to_le32(privileges);
2678
2679 status = be_mcc_notify_wait(adapter);
2680err:
2681 spin_unlock_bh(&adapter->mcc_lock);
2682 return status;
2683}
2684
2685/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2686 * pmac_id_valid: false => pmac_id or MAC address is requested.
2687 * If pmac_id is returned, pmac_id_valid is returned as true
2688 */
2610int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, 2689int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2611 bool *pmac_id_active, u32 *pmac_id, u8 domain) 2690 bool *pmac_id_valid, u32 *pmac_id, u8 domain)
2612{ 2691{
2613 struct be_mcc_wrb *wrb; 2692 struct be_mcc_wrb *wrb;
2614 struct be_cmd_req_get_mac_list *req; 2693 struct be_cmd_req_get_mac_list *req;
@@ -2644,12 +2723,25 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2644 get_mac_list_cmd.size, wrb, &get_mac_list_cmd); 2723 get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
2645 req->hdr.domain = domain; 2724 req->hdr.domain = domain;
2646 req->mac_type = MAC_ADDRESS_TYPE_NETWORK; 2725 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2647 req->perm_override = 1; 2726 if (*pmac_id_valid) {
2727 req->mac_id = cpu_to_le32(*pmac_id);
2728 req->iface_id = cpu_to_le16(adapter->if_handle);
2729 req->perm_override = 0;
2730 } else {
2731 req->perm_override = 1;
2732 }
2648 2733
2649 status = be_mcc_notify_wait(adapter); 2734 status = be_mcc_notify_wait(adapter);
2650 if (!status) { 2735 if (!status) {
2651 struct be_cmd_resp_get_mac_list *resp = 2736 struct be_cmd_resp_get_mac_list *resp =
2652 get_mac_list_cmd.va; 2737 get_mac_list_cmd.va;
2738
2739 if (*pmac_id_valid) {
2740 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
2741 ETH_ALEN);
2742 goto out;
2743 }
2744
2653 mac_count = resp->true_mac_count + resp->pseudo_mac_count; 2745 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2654 /* Mac list returned could contain one or more active mac_ids 2746 /* Mac list returned could contain one or more active mac_ids
2655 * or one or more true or pseudo permanant mac addresses. 2747 * or one or more true or pseudo permanant mac addresses.
@@ -2667,14 +2759,14 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2667 * is 6 bytes 2759 * is 6 bytes
2668 */ 2760 */
2669 if (mac_addr_size == sizeof(u32)) { 2761 if (mac_addr_size == sizeof(u32)) {
2670 *pmac_id_active = true; 2762 *pmac_id_valid = true;
2671 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id; 2763 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2672 *pmac_id = le32_to_cpu(mac_id); 2764 *pmac_id = le32_to_cpu(mac_id);
2673 goto out; 2765 goto out;
2674 } 2766 }
2675 } 2767 }
2676 /* If no active mac_id found, return first mac addr */ 2768 /* If no active mac_id found, return first mac addr */
2677 *pmac_id_active = false; 2769 *pmac_id_valid = false;
2678 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, 2770 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2679 ETH_ALEN); 2771 ETH_ALEN);
2680 } 2772 }
@@ -2686,6 +2778,41 @@ out:
2686 return status; 2778 return status;
2687} 2779}
2688 2780
2781int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac)
2782{
2783 bool active = true;
2784
2785 if (BEx_chip(adapter))
2786 return be_cmd_mac_addr_query(adapter, mac, false,
2787 adapter->if_handle, curr_pmac_id);
2788 else
2789 /* Fetch the MAC address using pmac_id */
2790 return be_cmd_get_mac_from_list(adapter, mac, &active,
2791 &curr_pmac_id, 0);
2792}
2793
2794int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
2795{
2796 int status;
2797 bool pmac_valid = false;
2798
2799 memset(mac, 0, ETH_ALEN);
2800
2801 if (BEx_chip(adapter)) {
2802 if (be_physfn(adapter))
2803 status = be_cmd_mac_addr_query(adapter, mac, true, 0,
2804 0);
2805 else
2806 status = be_cmd_mac_addr_query(adapter, mac, false,
2807 adapter->if_handle, 0);
2808 } else {
2809 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
2810 NULL, 0);
2811 }
2812
2813 return status;
2814}
2815
2689/* Uses synchronous MCCQ */ 2816/* Uses synchronous MCCQ */
2690int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, 2817int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2691 u8 mac_count, u32 domain) 2818 u8 mac_count, u32 domain)
@@ -2729,8 +2856,27 @@ err:
2729 return status; 2856 return status;
2730} 2857}
2731 2858
2859/* Wrapper to delete any active MACs and provision the new mac.
2860 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
2861 * current list are active.
2862 */
2863int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
2864{
2865 bool active_mac = false;
2866 u8 old_mac[ETH_ALEN];
2867 u32 pmac_id;
2868 int status;
2869
2870 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
2871 &pmac_id, dom);
2872 if (!status && active_mac)
2873 be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
2874
2875 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
2876}
2877
2732int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, 2878int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2733 u32 domain, u16 intf_id) 2879 u32 domain, u16 intf_id, u16 hsw_mode)
2734{ 2880{
2735 struct be_mcc_wrb *wrb; 2881 struct be_mcc_wrb *wrb;
2736 struct be_cmd_req_set_hsw_config *req; 2882 struct be_cmd_req_set_hsw_config *req;
@@ -2757,6 +2903,13 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2757 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); 2903 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2758 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); 2904 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2759 } 2905 }
2906 if (!BEx_chip(adapter) && hsw_mode) {
2907 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
2908 ctxt, adapter->hba_port_num);
2909 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
2910 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
2911 ctxt, hsw_mode);
2912 }
2760 2913
2761 be_dws_cpu_to_le(req->context, sizeof(req->context)); 2914 be_dws_cpu_to_le(req->context, sizeof(req->context));
2762 status = be_mcc_notify_wait(adapter); 2915 status = be_mcc_notify_wait(adapter);
@@ -2768,7 +2921,7 @@ err:
2768 2921
2769/* Get Hyper switch config */ 2922/* Get Hyper switch config */
2770int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, 2923int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2771 u32 domain, u16 intf_id) 2924 u32 domain, u16 intf_id, u8 *mode)
2772{ 2925{
2773 struct be_mcc_wrb *wrb; 2926 struct be_mcc_wrb *wrb;
2774 struct be_cmd_req_get_hsw_config *req; 2927 struct be_cmd_req_get_hsw_config *req;
@@ -2791,9 +2944,15 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2791 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL); 2944 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2792 2945
2793 req->hdr.domain = domain; 2946 req->hdr.domain = domain;
2794 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt, 2947 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
2795 intf_id); 2948 ctxt, intf_id);
2796 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); 2949 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2950
2951 if (!BEx_chip(adapter)) {
2952 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
2953 ctxt, adapter->hba_port_num);
2954 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
2955 }
2797 be_dws_cpu_to_le(req->context, sizeof(req->context)); 2956 be_dws_cpu_to_le(req->context, sizeof(req->context));
2798 2957
2799 status = be_mcc_notify_wait(adapter); 2958 status = be_mcc_notify_wait(adapter);
@@ -2804,7 +2963,11 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2804 sizeof(resp->context)); 2963 sizeof(resp->context));
2805 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 2964 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2806 pvid, &resp->context); 2965 pvid, &resp->context);
2807 *pvid = le16_to_cpu(vid); 2966 if (pvid)
2967 *pvid = le16_to_cpu(vid);
2968 if (mode)
2969 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2970 port_fwd_type, &resp->context);
2808 } 2971 }
2809 2972
2810err: 2973err:
@@ -2967,30 +3130,63 @@ err:
2967 return status; 3130 return status;
2968} 3131}
2969 3132
2970static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count, 3133static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count)
2971 u32 max_buf_size)
2972{ 3134{
2973 struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf; 3135 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
2974 int i; 3136 int i;
2975 3137
2976 for (i = 0; i < desc_count; i++) { 3138 for (i = 0; i < desc_count; i++) {
2977 desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE; 3139 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
2978 if (((void *)desc + desc->desc_len) > 3140 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
2979 (void *)(buf + max_buf_size)) 3141 return (struct be_nic_res_desc *)hdr;
2980 return NULL;
2981 3142
2982 if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 || 3143 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
2983 desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1) 3144 hdr = (void *)hdr + hdr->desc_len;
2984 return desc;
2985
2986 desc = (void *)desc + desc->desc_len;
2987 } 3145 }
3146 return NULL;
3147}
2988 3148
3149static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3150 u32 desc_count)
3151{
3152 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3153 struct be_pcie_res_desc *pcie;
3154 int i;
3155
3156 for (i = 0; i < desc_count; i++) {
3157 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3158 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3159 pcie = (struct be_pcie_res_desc *)hdr;
3160 if (pcie->pf_num == devfn)
3161 return pcie;
3162 }
3163
3164 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3165 hdr = (void *)hdr + hdr->desc_len;
3166 }
2989 return NULL; 3167 return NULL;
2990} 3168}
2991 3169
3170static void be_copy_nic_desc(struct be_resources *res,
3171 struct be_nic_res_desc *desc)
3172{
3173 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3174 res->max_vlans = le16_to_cpu(desc->vlan_count);
3175 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3176 res->max_tx_qs = le16_to_cpu(desc->txq_count);
3177 res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3178 res->max_rx_qs = le16_to_cpu(desc->rq_count);
3179 res->max_evt_qs = le16_to_cpu(desc->eq_count);
3180 /* Clear flags that driver is not interested in */
3181 res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3182 BE_IF_CAP_FLAGS_WANT;
3183 /* Need 1 RXQ as the default RXQ */
3184 if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
3185 res->max_rss_qs -= 1;
3186}
3187
2992/* Uses Mbox */ 3188/* Uses Mbox */
2993int be_cmd_get_func_config(struct be_adapter *adapter) 3189int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
2994{ 3190{
2995 struct be_mcc_wrb *wrb; 3191 struct be_mcc_wrb *wrb;
2996 struct be_cmd_req_get_func_config *req; 3192 struct be_cmd_req_get_func_config *req;
@@ -3029,28 +3225,16 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
3029 if (!status) { 3225 if (!status) {
3030 struct be_cmd_resp_get_func_config *resp = cmd.va; 3226 struct be_cmd_resp_get_func_config *resp = cmd.va;
3031 u32 desc_count = le32_to_cpu(resp->desc_count); 3227 u32 desc_count = le32_to_cpu(resp->desc_count);
3032 struct be_nic_resource_desc *desc; 3228 struct be_nic_res_desc *desc;
3033 3229
3034 desc = be_get_nic_desc(resp->func_param, desc_count, 3230 desc = be_get_nic_desc(resp->func_param, desc_count);
3035 sizeof(resp->func_param));
3036 if (!desc) { 3231 if (!desc) {
3037 status = -EINVAL; 3232 status = -EINVAL;
3038 goto err; 3233 goto err;
3039 } 3234 }
3040 3235
3041 adapter->pf_number = desc->pf_num; 3236 adapter->pf_number = desc->pf_num;
3042 adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count); 3237 be_copy_nic_desc(res, desc);
3043 adapter->max_vlans = le16_to_cpu(desc->vlan_count);
3044 adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3045 adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
3046 adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
3047 adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
3048
3049 adapter->max_event_queues = le16_to_cpu(desc->eq_count);
3050 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
3051
3052 /* Clear flags that driver is not interested in */
3053 adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT;
3054 } 3238 }
3055err: 3239err:
3056 mutex_unlock(&adapter->mbox_lock); 3240 mutex_unlock(&adapter->mbox_lock);
@@ -3060,8 +3244,8 @@ err:
3060} 3244}
3061 3245
3062/* Uses mbox */ 3246/* Uses mbox */
3063int be_cmd_get_profile_config_mbox(struct be_adapter *adapter, 3247static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
3064 u8 domain, struct be_dma_mem *cmd) 3248 u8 domain, struct be_dma_mem *cmd)
3065{ 3249{
3066 struct be_mcc_wrb *wrb; 3250 struct be_mcc_wrb *wrb;
3067 struct be_cmd_req_get_profile_config *req; 3251 struct be_cmd_req_get_profile_config *req;
@@ -3088,8 +3272,8 @@ int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
3088} 3272}
3089 3273
3090/* Uses sync mcc */ 3274/* Uses sync mcc */
3091int be_cmd_get_profile_config_mccq(struct be_adapter *adapter, 3275static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
3092 u8 domain, struct be_dma_mem *cmd) 3276 u8 domain, struct be_dma_mem *cmd)
3093{ 3277{
3094 struct be_mcc_wrb *wrb; 3278 struct be_mcc_wrb *wrb;
3095 struct be_cmd_req_get_profile_config *req; 3279 struct be_cmd_req_get_profile_config *req;
@@ -3121,54 +3305,51 @@ err:
3121} 3305}
3122 3306
3123/* Uses sync mcc, if MCCQ is already created otherwise mbox */ 3307/* Uses sync mcc, if MCCQ is already created otherwise mbox */
3124int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags, 3308int be_cmd_get_profile_config(struct be_adapter *adapter,
3125 u16 *txq_count, u8 domain) 3309 struct be_resources *res, u8 domain)
3126{ 3310{
3311 struct be_cmd_resp_get_profile_config *resp;
3312 struct be_pcie_res_desc *pcie;
3313 struct be_nic_res_desc *nic;
3127 struct be_queue_info *mccq = &adapter->mcc_obj.q; 3314 struct be_queue_info *mccq = &adapter->mcc_obj.q;
3128 struct be_dma_mem cmd; 3315 struct be_dma_mem cmd;
3316 u32 desc_count;
3129 int status; 3317 int status;
3130 3318
3131 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3319 memset(&cmd, 0, sizeof(struct be_dma_mem));
3132 if (!lancer_chip(adapter)) 3320 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3133 cmd.size = sizeof(struct be_cmd_resp_get_profile_config_v1); 3321 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3134 else 3322 if (!cmd.va)
3135 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3136 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3137 &cmd.dma);
3138 if (!cmd.va) {
3139 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3140 return -ENOMEM; 3323 return -ENOMEM;
3141 }
3142 3324
3143 if (!mccq->created) 3325 if (!mccq->created)
3144 status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd); 3326 status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
3145 else 3327 else
3146 status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd); 3328 status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
3147 if (!status) { 3329 if (status)
3148 struct be_cmd_resp_get_profile_config *resp = cmd.va; 3330 goto err;
3149 u32 desc_count = le32_to_cpu(resp->desc_count);
3150 struct be_nic_resource_desc *desc;
3151 3331
3152 desc = be_get_nic_desc(resp->func_param, desc_count, 3332 resp = cmd.va;
3153 sizeof(resp->func_param)); 3333 desc_count = le32_to_cpu(resp->desc_count);
3334
3335 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3336 desc_count);
3337 if (pcie)
3338 res->max_vfs = le16_to_cpu(pcie->num_vfs);
3339
3340 nic = be_get_nic_desc(resp->func_param, desc_count);
3341 if (nic)
3342 be_copy_nic_desc(res, nic);
3154 3343
3155 if (!desc) {
3156 status = -EINVAL;
3157 goto err;
3158 }
3159 if (cap_flags)
3160 *cap_flags = le32_to_cpu(desc->cap_flags);
3161 if (txq_count)
3162 *txq_count = le32_to_cpu(desc->txq_count);
3163 }
3164err: 3344err:
3165 if (cmd.va) 3345 if (cmd.va)
3166 pci_free_consistent(adapter->pdev, cmd.size, 3346 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3167 cmd.va, cmd.dma);
3168 return status; 3347 return status;
3169} 3348}
3170 3349
3171/* Uses sync mcc */ 3350/* Currently only Lancer uses this command and it supports version 0 only
3351 * Uses sync mcc
3352 */
3172int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, 3353int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3173 u8 domain) 3354 u8 domain)
3174{ 3355{
@@ -3189,12 +3370,10 @@ int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3189 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3370 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3190 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req), 3371 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3191 wrb, NULL); 3372 wrb, NULL);
3192
3193 req->hdr.domain = domain; 3373 req->hdr.domain = domain;
3194 req->desc_count = cpu_to_le32(1); 3374 req->desc_count = cpu_to_le32(1);
3195 3375 req->nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3196 req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_V0; 3376 req->nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3197 req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
3198 req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV); 3377 req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3199 req->nic_desc.pf_num = adapter->pf_number; 3378 req->nic_desc.pf_num = adapter->pf_number;
3200 req->nic_desc.vf_num = domain; 3379 req->nic_desc.vf_num = domain;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 1b3b9e886412..d026226db88c 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -202,6 +202,7 @@ struct be_mcc_mailbox {
202#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 202#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
203#define OPCODE_COMMON_GET_PORT_NAME 77 203#define OPCODE_COMMON_GET_PORT_NAME 77
204#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89 204#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89
205#define OPCODE_COMMON_SET_FN_PRIVILEGES 100
205#define OPCODE_COMMON_GET_PHY_DETAILS 102 206#define OPCODE_COMMON_GET_PHY_DETAILS 102
206#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103 207#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
207#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121 208#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
@@ -306,7 +307,7 @@ struct be_cmd_req_eq_create {
306struct be_cmd_resp_eq_create { 307struct be_cmd_resp_eq_create {
307 struct be_cmd_resp_hdr resp_hdr; 308 struct be_cmd_resp_hdr resp_hdr;
308 u16 eq_id; /* sword */ 309 u16 eq_id; /* sword */
309 u16 rsvd0; /* sword */ 310 u16 msix_idx; /* available only in v2 */
310} __packed; 311} __packed;
311 312
312/******************** Mac query ***************************/ 313/******************** Mac query ***************************/
@@ -965,7 +966,10 @@ enum {
965 PHY_LINK_SPEED_10MBPS = 0x1, 966 PHY_LINK_SPEED_10MBPS = 0x1,
966 PHY_LINK_SPEED_100MBPS = 0x2, 967 PHY_LINK_SPEED_100MBPS = 0x2,
967 PHY_LINK_SPEED_1GBPS = 0x3, 968 PHY_LINK_SPEED_1GBPS = 0x3,
968 PHY_LINK_SPEED_10GBPS = 0x4 969 PHY_LINK_SPEED_10GBPS = 0x4,
970 PHY_LINK_SPEED_20GBPS = 0x5,
971 PHY_LINK_SPEED_25GBPS = 0x6,
972 PHY_LINK_SPEED_40GBPS = 0x7
969}; 973};
970 974
971struct be_cmd_resp_link_status { 975struct be_cmd_resp_link_status {
@@ -1480,6 +1484,11 @@ struct be_cmd_resp_get_fn_privileges {
1480 u32 privilege_mask; 1484 u32 privilege_mask;
1481}; 1485};
1482 1486
1487struct be_cmd_req_set_fn_privileges {
1488 struct be_cmd_req_hdr hdr;
1489 u32 privileges; /* Used by BE3, SH-R */
1490 u32 privileges_lancer; /* Used by Lancer */
1491};
1483 1492
1484/******************** GET/SET_MACLIST **************************/ 1493/******************** GET/SET_MACLIST **************************/
1485#define BE_MAX_MAC 64 1494#define BE_MAX_MAC 64
@@ -1524,12 +1533,17 @@ struct be_cmd_req_set_mac_list {
1524} __packed; 1533} __packed;
1525 1534
1526/*********************** HSW Config ***********************/ 1535/*********************** HSW Config ***********************/
1536#define PORT_FWD_TYPE_VEPA 0x3
1537#define PORT_FWD_TYPE_VEB 0x2
1538
1527struct amap_set_hsw_context { 1539struct amap_set_hsw_context {
1528 u8 interface_id[16]; 1540 u8 interface_id[16];
1529 u8 rsvd0[14]; 1541 u8 rsvd0[14];
1530 u8 pvid_valid; 1542 u8 pvid_valid;
1531 u8 rsvd1; 1543 u8 pport;
1532 u8 rsvd2[16]; 1544 u8 rsvd1[6];
1545 u8 port_fwd_type[3];
1546 u8 rsvd2[7];
1533 u8 pvid[16]; 1547 u8 pvid[16];
1534 u8 rsvd3[32]; 1548 u8 rsvd3[32];
1535 u8 rsvd4[32]; 1549 u8 rsvd4[32];
@@ -1554,7 +1568,9 @@ struct amap_get_hsw_req_context {
1554} __packed; 1568} __packed;
1555 1569
1556struct amap_get_hsw_resp_context { 1570struct amap_get_hsw_resp_context {
1557 u8 rsvd1[16]; 1571 u8 rsvd0[6];
1572 u8 port_fwd_type[3];
1573 u8 rsvd1[7];
1558 u8 pvid[16]; 1574 u8 pvid[16];
1559 u8 rsvd2[32]; 1575 u8 rsvd2[32];
1560 u8 rsvd3[32]; 1576 u8 rsvd3[32];
@@ -1709,11 +1725,13 @@ struct be_cmd_req_set_ext_fat_caps {
1709 struct be_fat_conf_params set_params; 1725 struct be_fat_conf_params set_params;
1710}; 1726};
1711 1727
1712#define RESOURCE_DESC_SIZE 88 1728#define RESOURCE_DESC_SIZE_V0 72
1729#define RESOURCE_DESC_SIZE_V1 88
1730#define PCIE_RESOURCE_DESC_TYPE_V0 0x40
1713#define NIC_RESOURCE_DESC_TYPE_V0 0x41 1731#define NIC_RESOURCE_DESC_TYPE_V0 0x41
1732#define PCIE_RESOURCE_DESC_TYPE_V1 0x50
1714#define NIC_RESOURCE_DESC_TYPE_V1 0x51 1733#define NIC_RESOURCE_DESC_TYPE_V1 0x51
1715#define MAX_RESOURCE_DESC 4 1734#define MAX_RESOURCE_DESC 264
1716#define MAX_RESOURCE_DESC_V1 32
1717 1735
1718/* QOS unit number */ 1736/* QOS unit number */
1719#define QUN 4 1737#define QUN 4
@@ -1722,9 +1740,30 @@ struct be_cmd_req_set_ext_fat_caps {
1722/* No save */ 1740/* No save */
1723#define NOSV 7 1741#define NOSV 7
1724 1742
1725struct be_nic_resource_desc { 1743struct be_res_desc_hdr {
1726 u8 desc_type; 1744 u8 desc_type;
1727 u8 desc_len; 1745 u8 desc_len;
1746} __packed;
1747
1748struct be_pcie_res_desc {
1749 struct be_res_desc_hdr hdr;
1750 u8 rsvd0;
1751 u8 flags;
1752 u16 rsvd1;
1753 u8 pf_num;
1754 u8 rsvd2;
1755 u32 rsvd3;
1756 u8 sriov_state;
1757 u8 pf_state;
1758 u8 pf_type;
1759 u8 rsvd4;
1760 u16 num_vfs;
1761 u16 rsvd5;
1762 u32 rsvd6[17];
1763} __packed;
1764
1765struct be_nic_res_desc {
1766 struct be_res_desc_hdr hdr;
1728 u8 rsvd1; 1767 u8 rsvd1;
1729 u8 flags; 1768 u8 flags;
1730 u8 vf_num; 1769 u8 vf_num;
@@ -1753,7 +1792,7 @@ struct be_nic_resource_desc {
1753 u8 wol_param; 1792 u8 wol_param;
1754 u16 rsvd7; 1793 u16 rsvd7;
1755 u32 rsvd8[3]; 1794 u32 rsvd8[3];
1756}; 1795} __packed;
1757 1796
1758struct be_cmd_req_get_func_config { 1797struct be_cmd_req_get_func_config {
1759 struct be_cmd_req_hdr hdr; 1798 struct be_cmd_req_hdr hdr;
@@ -1762,7 +1801,7 @@ struct be_cmd_req_get_func_config {
1762struct be_cmd_resp_get_func_config { 1801struct be_cmd_resp_get_func_config {
1763 struct be_cmd_resp_hdr hdr; 1802 struct be_cmd_resp_hdr hdr;
1764 u32 desc_count; 1803 u32 desc_count;
1765 u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE]; 1804 u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
1766}; 1805};
1767 1806
1768#define ACTIVE_PROFILE_TYPE 0x2 1807#define ACTIVE_PROFILE_TYPE 0x2
@@ -1774,26 +1813,20 @@ struct be_cmd_req_get_profile_config {
1774}; 1813};
1775 1814
1776struct be_cmd_resp_get_profile_config { 1815struct be_cmd_resp_get_profile_config {
1777 struct be_cmd_req_hdr hdr; 1816 struct be_cmd_resp_hdr hdr;
1778 u32 desc_count;
1779 u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
1780};
1781
1782struct be_cmd_resp_get_profile_config_v1 {
1783 struct be_cmd_req_hdr hdr;
1784 u32 desc_count; 1817 u32 desc_count;
1785 u8 func_param[MAX_RESOURCE_DESC_V1 * RESOURCE_DESC_SIZE]; 1818 u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
1786}; 1819};
1787 1820
1788struct be_cmd_req_set_profile_config { 1821struct be_cmd_req_set_profile_config {
1789 struct be_cmd_req_hdr hdr; 1822 struct be_cmd_req_hdr hdr;
1790 u32 rsvd; 1823 u32 rsvd;
1791 u32 desc_count; 1824 u32 desc_count;
1792 struct be_nic_resource_desc nic_desc; 1825 struct be_nic_res_desc nic_desc;
1793}; 1826};
1794 1827
1795struct be_cmd_resp_set_profile_config { 1828struct be_cmd_resp_set_profile_config {
1796 struct be_cmd_req_hdr hdr; 1829 struct be_cmd_resp_hdr hdr;
1797}; 1830};
1798 1831
1799struct be_cmd_enable_disable_vf { 1832struct be_cmd_enable_disable_vf {
@@ -1842,8 +1875,7 @@ extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
1842 u32 en_flags, u32 *if_handle, u32 domain); 1875 u32 en_flags, u32 *if_handle, u32 domain);
1843extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, 1876extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
1844 u32 domain); 1877 u32 domain);
1845extern int be_cmd_eq_create(struct be_adapter *adapter, 1878extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
1846 struct be_queue_info *eq, int eq_delay);
1847extern int be_cmd_cq_create(struct be_adapter *adapter, 1879extern int be_cmd_cq_create(struct be_adapter *adapter,
1848 struct be_queue_info *cq, struct be_queue_info *eq, 1880 struct be_queue_info *cq, struct be_queue_info *eq,
1849 bool no_delay, int num_cqe_dma_coalesce); 1881 bool no_delay, int num_cqe_dma_coalesce);
@@ -1927,15 +1959,22 @@ extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
1927extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); 1959extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
1928extern int be_cmd_get_fn_privileges(struct be_adapter *adapter, 1960extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
1929 u32 *privilege, u32 domain); 1961 u32 *privilege, u32 domain);
1962extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
1963 u32 privileges, u32 vf_num);
1930extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, 1964extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
1931 bool *pmac_id_active, u32 *pmac_id, 1965 bool *pmac_id_active, u32 *pmac_id,
1932 u8 domain); 1966 u8 domain);
1967extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id,
1968 u8 *mac);
1969extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
1933extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, 1970extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
1934 u8 mac_count, u32 domain); 1971 u8 mac_count, u32 domain);
1972extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id,
1973 u32 dom);
1935extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, 1974extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
1936 u32 domain, u16 intf_id); 1975 u32 domain, u16 intf_id, u16 hsw_mode);
1937extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, 1976extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
1938 u32 domain, u16 intf_id); 1977 u32 domain, u16 intf_id, u8 *mode);
1939extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter); 1978extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
1940extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, 1979extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
1941 struct be_dma_mem *cmd); 1980 struct be_dma_mem *cmd);
@@ -1948,10 +1987,10 @@ extern int lancer_initiate_dump(struct be_adapter *adapter);
1948extern bool dump_present(struct be_adapter *adapter); 1987extern bool dump_present(struct be_adapter *adapter);
1949extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter); 1988extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
1950extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name); 1989extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
1951extern int be_cmd_get_func_config(struct be_adapter *adapter); 1990int be_cmd_get_func_config(struct be_adapter *adapter,
1952extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags, 1991 struct be_resources *res);
1953 u16 *txq_count, u8 domain); 1992int be_cmd_get_profile_config(struct be_adapter *adapter,
1954 1993 struct be_resources *res, u8 domain);
1955extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, 1994extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
1956 u8 domain); 1995 u8 domain);
1957extern int be_cmd_get_if_id(struct be_adapter *adapter, 1996extern int be_cmd_get_if_id(struct be_adapter *adapter,
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 4f8c941217cc..b440a1fac77b 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1119,6 +1119,29 @@ static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1119 return status; 1119 return status;
1120} 1120}
1121 1121
1122static void be_get_channels(struct net_device *netdev,
1123 struct ethtool_channels *ch)
1124{
1125 struct be_adapter *adapter = netdev_priv(netdev);
1126
1127 ch->combined_count = adapter->num_evt_qs;
1128 ch->max_combined = be_max_qs(adapter);
1129}
1130
1131static int be_set_channels(struct net_device *netdev,
1132 struct ethtool_channels *ch)
1133{
1134 struct be_adapter *adapter = netdev_priv(netdev);
1135
1136 if (ch->rx_count || ch->tx_count || ch->other_count ||
1137 !ch->combined_count || ch->combined_count > be_max_qs(adapter))
1138 return -EINVAL;
1139
1140 adapter->cfg_num_qs = ch->combined_count;
1141
1142 return be_update_queues(adapter);
1143}
1144
1122const struct ethtool_ops be_ethtool_ops = { 1145const struct ethtool_ops be_ethtool_ops = {
1123 .get_settings = be_get_settings, 1146 .get_settings = be_get_settings,
1124 .get_drvinfo = be_get_drvinfo, 1147 .get_drvinfo = be_get_drvinfo,
@@ -1145,4 +1168,6 @@ const struct ethtool_ops be_ethtool_ops = {
1145 .self_test = be_self_test, 1168 .self_test = be_self_test,
1146 .get_rxnfc = be_get_rxnfc, 1169 .get_rxnfc = be_get_rxnfc,
1147 .set_rxnfc = be_set_rxnfc, 1170 .set_rxnfc = be_set_rxnfc,
1171 .get_channels = be_get_channels,
1172 .set_channels = be_set_channels
1148}; 1173};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3d91a5ec61a4..3224d28cdad4 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -21,6 +21,7 @@
21#include "be_cmds.h" 21#include "be_cmds.h"
22#include <asm/div64.h> 22#include <asm/div64.h>
23#include <linux/aer.h> 23#include <linux/aer.h>
24#include <linux/if_bridge.h>
24 25
25MODULE_VERSION(DRV_VER); 26MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids); 27MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -145,8 +146,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
145 q->len = len; 146 q->len = len;
146 q->entry_size = entry_size; 147 q->entry_size = entry_size;
147 mem->size = len * entry_size; 148 mem->size = len * entry_size;
148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, 149 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL | __GFP_ZERO); 150 GFP_KERNEL);
150 if (!mem->va) 151 if (!mem->va)
151 return -ENOMEM; 152 return -ENOMEM;
152 return 0; 153 return 0;
@@ -247,54 +248,54 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
247static int be_mac_addr_set(struct net_device *netdev, void *p) 248static int be_mac_addr_set(struct net_device *netdev, void *p)
248{ 249{
249 struct be_adapter *adapter = netdev_priv(netdev); 250 struct be_adapter *adapter = netdev_priv(netdev);
251 struct device *dev = &adapter->pdev->dev;
250 struct sockaddr *addr = p; 252 struct sockaddr *addr = p;
251 int status = 0; 253 int status;
252 u8 current_mac[ETH_ALEN]; 254 u8 mac[ETH_ALEN];
253 u32 pmac_id = adapter->pmac_id[0]; 255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
254 bool active_mac = true;
255 256
256 if (!is_valid_ether_addr(addr->sa_data)) 257 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL; 258 return -EADDRNOTAVAIL;
258 259
259 /* For BE VF, MAC address is already activated by PF. 260 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
260 * Hence only operation left is updating netdev->devaddr. 261 * privilege or if PF did not provision the new MAC address.
261 * Update it if user is passing the same MAC which was used 262 * On BE3, this cmd will always fail if the VF doesn't have the
262 * during configuring VF MAC from PF(Hypervisor). 263 * FILTMGMT privilege. This failure is OK, only if the PF programmed
264 * the MAC for the VF.
263 */ 265 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) { 266 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
265 status = be_cmd_mac_addr_query(adapter, current_mac, 267 adapter->if_handle, &adapter->pmac_id[0], 0);
266 false, adapter->if_handle, 0); 268 if (!status) {
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN)) 269 curr_pmac_id = adapter->pmac_id[0];
268 goto done;
269 else
270 goto err;
271 }
272 270
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN)) 271 /* Delete the old programmed MAC. This call may fail if the
274 goto done; 272 * old MAC was already deleted by the PF driver.
273 */
274 if (adapter->pmac_id[0] != old_pmac_id)
275 be_cmd_pmac_del(adapter, adapter->if_handle,
276 old_pmac_id, 0);
277 }
275 278
276 /* For Lancer check if any MAC is active. 279 /* Decide if the new MAC is successfully activated only after
277 * If active, get its mac id. 280 * querying the FW
278 */ 281 */
279 if (lancer_chip(adapter) && !be_physfn(adapter)) 282 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
287 if (status) 283 if (status)
288 goto err; 284 goto err;
289 285
290 if (active_mac) 286 /* The MAC change did not happen, either due to lack of privilege
291 be_cmd_pmac_del(adapter, adapter->if_handle, 287 * or PF didn't pre-provision.
292 pmac_id, 0); 288 */
293done: 289 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
290 status = -EPERM;
291 goto err;
292 }
293
294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 dev_info(dev, "MAC address changed to %pM\n", mac);
295 return 0; 296 return 0;
296err: 297err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data); 298 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
298 return status; 299 return status;
299} 300}
300 301
@@ -472,7 +473,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
472 ACCESS_ONCE(*acc) = newacc; 473 ACCESS_ONCE(*acc) = newacc;
473} 474}
474 475
475void populate_erx_stats(struct be_adapter *adapter, 476static void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo, 477 struct be_rx_obj *rxo,
477 u32 erx_stat) 478 u32 erx_stat)
478{ 479{
@@ -1001,7 +1002,7 @@ static int be_vid_config(struct be_adapter *adapter)
1001 if (adapter->promiscuous) 1002 if (adapter->promiscuous)
1002 return 0; 1003 return 0;
1003 1004
1004 if (adapter->vlans_added > adapter->max_vlans) 1005 if (adapter->vlans_added > be_max_vlans(adapter))
1005 goto set_vlan_promisc; 1006 goto set_vlan_promisc;
1006 1007
1007 /* Construct VLAN Table to give to HW */ 1008 /* Construct VLAN Table to give to HW */
@@ -1042,7 +1043,7 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1042 goto ret; 1043 goto ret;
1043 1044
1044 adapter->vlan_tag[vid] = 1; 1045 adapter->vlan_tag[vid] = 1;
1045 if (adapter->vlans_added <= (adapter->max_vlans + 1)) 1046 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
1046 status = be_vid_config(adapter); 1047 status = be_vid_config(adapter);
1047 1048
1048 if (!status) 1049 if (!status)
@@ -1068,7 +1069,7 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1068 goto ret; 1069 goto ret;
1069 1070
1070 adapter->vlan_tag[vid] = 0; 1071 adapter->vlan_tag[vid] = 0;
1071 if (adapter->vlans_added <= adapter->max_vlans) 1072 if (adapter->vlans_added <= be_max_vlans(adapter))
1072 status = be_vid_config(adapter); 1073 status = be_vid_config(adapter);
1073 1074
1074 if (!status) 1075 if (!status)
@@ -1101,7 +1102,7 @@ static void be_set_rx_mode(struct net_device *netdev)
1101 1102
1102 /* Enable multicast promisc if num configured exceeds what we support */ 1103 /* Enable multicast promisc if num configured exceeds what we support */
1103 if (netdev->flags & IFF_ALLMULTI || 1104 if (netdev->flags & IFF_ALLMULTI ||
1104 netdev_mc_count(netdev) > adapter->max_mcast_mac) { 1105 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1105 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); 1106 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1106 goto done; 1107 goto done;
1107 } 1108 }
@@ -1115,7 +1116,7 @@ static void be_set_rx_mode(struct net_device *netdev)
1115 adapter->pmac_id[i], 0); 1116 adapter->pmac_id[i], 0);
1116 } 1117 }
1117 1118
1118 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) { 1119 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1119 be_cmd_rx_filter(adapter, IFF_PROMISC, ON); 1120 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1120 adapter->promiscuous = true; 1121 adapter->promiscuous = true;
1121 goto done; 1122 goto done;
@@ -1146,9 +1147,6 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1146 struct be_adapter *adapter = netdev_priv(netdev); 1147 struct be_adapter *adapter = netdev_priv(netdev);
1147 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; 1148 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1148 int status; 1149 int status;
1149 bool active_mac = false;
1150 u32 pmac_id;
1151 u8 old_mac[ETH_ALEN];
1152 1150
1153 if (!sriov_enabled(adapter)) 1151 if (!sriov_enabled(adapter))
1154 return -EPERM; 1152 return -EPERM;
@@ -1156,20 +1154,15 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1156 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs) 1154 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1157 return -EINVAL; 1155 return -EINVAL;
1158 1156
1159 if (lancer_chip(adapter)) { 1157 if (BEx_chip(adapter)) {
1160 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac, 1158 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1161 &pmac_id, vf + 1); 1159 vf + 1);
1162 if (!status && active_mac)
1163 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1164 pmac_id, vf + 1);
1165
1166 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1167 } else {
1168 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1169 vf_cfg->pmac_id, vf + 1);
1170 1160
1171 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle, 1161 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1172 &vf_cfg->pmac_id, vf + 1); 1162 &vf_cfg->pmac_id, vf + 1);
1163 } else {
1164 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1165 vf + 1);
1173 } 1166 }
1174 1167
1175 if (status) 1168 if (status)
@@ -1220,14 +1213,14 @@ static int be_set_vf_vlan(struct net_device *netdev,
1220 adapter->vf_cfg[vf].vlan_tag = vlan; 1213 adapter->vf_cfg[vf].vlan_tag = vlan;
1221 1214
1222 status = be_cmd_set_hsw_config(adapter, vlan, 1215 status = be_cmd_set_hsw_config(adapter, vlan,
1223 vf + 1, adapter->vf_cfg[vf].if_handle); 1216 vf + 1, adapter->vf_cfg[vf].if_handle, 0);
1224 } 1217 }
1225 } else { 1218 } else {
1226 /* Reset Transparent Vlan Tagging. */ 1219 /* Reset Transparent Vlan Tagging. */
1227 adapter->vf_cfg[vf].vlan_tag = 0; 1220 adapter->vf_cfg[vf].vlan_tag = 0;
1228 vlan = adapter->vf_cfg[vf].def_vid; 1221 vlan = adapter->vf_cfg[vf].def_vid;
1229 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, 1222 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1230 adapter->vf_cfg[vf].if_handle); 1223 adapter->vf_cfg[vf].if_handle, 0);
1231 } 1224 }
1232 1225
1233 1226
@@ -1490,8 +1483,9 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,
1490} 1483}
1491 1484
1492/* Process the RX completion indicated by rxcp when GRO is enabled */ 1485/* Process the RX completion indicated by rxcp when GRO is enabled */
1493void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi, 1486static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1494 struct be_rx_compl_info *rxcp) 1487 struct napi_struct *napi,
1488 struct be_rx_compl_info *rxcp)
1495{ 1489{
1496 struct be_adapter *adapter = rxo->adapter; 1490 struct be_adapter *adapter = rxo->adapter;
1497 struct be_rx_page_info *page_info; 1491 struct be_rx_page_info *page_info;
@@ -1920,6 +1914,7 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
1920 if (eqo->q.created) { 1914 if (eqo->q.created) {
1921 be_eq_clean(eqo); 1915 be_eq_clean(eqo);
1922 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); 1916 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1917 netif_napi_del(&eqo->napi);
1923 } 1918 }
1924 be_queue_free(adapter, &eqo->q); 1919 be_queue_free(adapter, &eqo->q);
1925 } 1920 }
@@ -1931,9 +1926,12 @@ static int be_evt_queues_create(struct be_adapter *adapter)
1931 struct be_eq_obj *eqo; 1926 struct be_eq_obj *eqo;
1932 int i, rc; 1927 int i, rc;
1933 1928
1934 adapter->num_evt_qs = num_irqs(adapter); 1929 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
1930 adapter->cfg_num_qs);
1935 1931
1936 for_all_evt_queues(adapter, eqo, i) { 1932 for_all_evt_queues(adapter, eqo, i) {
1933 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
1934 BE_NAPI_WEIGHT);
1937 eqo->adapter = adapter; 1935 eqo->adapter = adapter;
1938 eqo->tx_budget = BE_TX_BUDGET; 1936 eqo->tx_budget = BE_TX_BUDGET;
1939 eqo->idx = i; 1937 eqo->idx = i;
@@ -1946,7 +1944,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
1946 if (rc) 1944 if (rc)
1947 return rc; 1945 return rc;
1948 1946
1949 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd); 1947 rc = be_cmd_eq_create(adapter, eqo);
1950 if (rc) 1948 if (rc)
1951 return rc; 1949 return rc;
1952 } 1950 }
@@ -2020,31 +2018,13 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
2020 } 2018 }
2021} 2019}
2022 2020
2023static int be_num_txqs_want(struct be_adapter *adapter) 2021static int be_tx_qs_create(struct be_adapter *adapter)
2024{
2025 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2026 be_is_mc(adapter) ||
2027 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
2028 BE2_chip(adapter))
2029 return 1;
2030 else
2031 return adapter->max_tx_queues;
2032}
2033
2034static int be_tx_cqs_create(struct be_adapter *adapter)
2035{ 2022{
2036 struct be_queue_info *cq, *eq; 2023 struct be_queue_info *cq, *eq;
2037 int status;
2038 struct be_tx_obj *txo; 2024 struct be_tx_obj *txo;
2039 u8 i; 2025 int status, i;
2040 2026
2041 adapter->num_tx_qs = be_num_txqs_want(adapter); 2027 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2042 if (adapter->num_tx_qs != MAX_TX_QS) {
2043 rtnl_lock();
2044 netif_set_real_num_tx_queues(adapter->netdev,
2045 adapter->num_tx_qs);
2046 rtnl_unlock();
2047 }
2048 2028
2049 for_all_tx_queues(adapter, txo, i) { 2029 for_all_tx_queues(adapter, txo, i) {
2050 cq = &txo->cq; 2030 cq = &txo->cq;
@@ -2060,16 +2040,7 @@ static int be_tx_cqs_create(struct be_adapter *adapter)
2060 status = be_cmd_cq_create(adapter, cq, eq, false, 3); 2040 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2061 if (status) 2041 if (status)
2062 return status; 2042 return status;
2063 }
2064 return 0;
2065}
2066 2043
2067static int be_tx_qs_create(struct be_adapter *adapter)
2068{
2069 struct be_tx_obj *txo;
2070 int i, status;
2071
2072 for_all_tx_queues(adapter, txo, i) {
2073 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN, 2044 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2074 sizeof(struct be_eth_wrb)); 2045 sizeof(struct be_eth_wrb));
2075 if (status) 2046 if (status)
@@ -2105,17 +2076,14 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
2105 struct be_rx_obj *rxo; 2076 struct be_rx_obj *rxo;
2106 int rc, i; 2077 int rc, i;
2107 2078
2108 /* We'll create as many RSS rings as there are irqs. 2079 /* We can create as many RSS rings as there are EQs. */
2109 * But when there's only one irq there's no use creating RSS rings 2080 adapter->num_rx_qs = adapter->num_evt_qs;
2081
2082 /* We'll use RSS only if atleast 2 RSS rings are supported.
2083 * When RSS is used, we'll need a default RXQ for non-IP traffic.
2110 */ 2084 */
2111 adapter->num_rx_qs = (num_irqs(adapter) > 1) ? 2085 if (adapter->num_rx_qs > 1)
2112 num_irqs(adapter) + 1 : 1; 2086 adapter->num_rx_qs++;
2113 if (adapter->num_rx_qs != MAX_RX_QS) {
2114 rtnl_lock();
2115 netif_set_real_num_rx_queues(adapter->netdev,
2116 adapter->num_rx_qs);
2117 rtnl_unlock();
2118 }
2119 2087
2120 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; 2088 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2121 for_all_rx_queues(adapter, rxo, i) { 2089 for_all_rx_queues(adapter, rxo, i) {
@@ -2379,38 +2347,24 @@ static void be_msix_disable(struct be_adapter *adapter)
2379 if (msix_enabled(adapter)) { 2347 if (msix_enabled(adapter)) {
2380 pci_disable_msix(adapter->pdev); 2348 pci_disable_msix(adapter->pdev);
2381 adapter->num_msix_vec = 0; 2349 adapter->num_msix_vec = 0;
2350 adapter->num_msix_roce_vec = 0;
2382 } 2351 }
2383} 2352}
2384 2353
2385static uint be_num_rss_want(struct be_adapter *adapter)
2386{
2387 u32 num = 0;
2388
2389 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2390 (lancer_chip(adapter) ||
2391 (!sriov_want(adapter) && be_physfn(adapter)))) {
2392 num = adapter->max_rss_queues;
2393 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2394 }
2395 return num;
2396}
2397
2398static int be_msix_enable(struct be_adapter *adapter) 2354static int be_msix_enable(struct be_adapter *adapter)
2399{ 2355{
2400#define BE_MIN_MSIX_VECTORS 1 2356 int i, status, num_vec;
2401 int i, status, num_vec, num_roce_vec = 0;
2402 struct device *dev = &adapter->pdev->dev; 2357 struct device *dev = &adapter->pdev->dev;
2403 2358
2404 /* If RSS queues are not used, need a vec for default RX Q */ 2359 /* If RoCE is supported, program the max number of NIC vectors that
2405 num_vec = min(be_num_rss_want(adapter), num_online_cpus()); 2360 * may be configured via set-channels, along with vectors needed for
2406 if (be_roce_supported(adapter)) { 2361 * RoCe. Else, just program the number we'll use initially.
2407 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS, 2362 */
2408 (num_online_cpus() + 1)); 2363 if (be_roce_supported(adapter))
2409 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS); 2364 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2410 num_vec += num_roce_vec; 2365 2 * num_online_cpus());
2411 num_vec = min(num_vec, MAX_MSIX_VECTORS); 2366 else
2412 } 2367 num_vec = adapter->cfg_num_qs;
2413 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2414 2368
2415 for (i = 0; i < num_vec; i++) 2369 for (i = 0; i < num_vec; i++)
2416 adapter->msix_entries[i].entry = i; 2370 adapter->msix_entries[i].entry = i;
@@ -2418,7 +2372,7 @@ static int be_msix_enable(struct be_adapter *adapter)
2418 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec); 2372 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2419 if (status == 0) { 2373 if (status == 0) {
2420 goto done; 2374 goto done;
2421 } else if (status >= BE_MIN_MSIX_VECTORS) { 2375 } else if (status >= MIN_MSIX_VECTORS) {
2422 num_vec = status; 2376 num_vec = status;
2423 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, 2377 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2424 num_vec); 2378 num_vec);
@@ -2427,30 +2381,29 @@ static int be_msix_enable(struct be_adapter *adapter)
2427 } 2381 }
2428 2382
2429 dev_warn(dev, "MSIx enable failed\n"); 2383 dev_warn(dev, "MSIx enable failed\n");
2384
2430 /* INTx is not supported in VFs, so fail probe if enable_msix fails */ 2385 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2431 if (!be_physfn(adapter)) 2386 if (!be_physfn(adapter))
2432 return status; 2387 return status;
2433 return 0; 2388 return 0;
2434done: 2389done:
2435 if (be_roce_supported(adapter)) { 2390 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2436 if (num_vec > num_roce_vec) { 2391 adapter->num_msix_roce_vec = num_vec / 2;
2437 adapter->num_msix_vec = num_vec - num_roce_vec; 2392 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2438 adapter->num_msix_roce_vec = 2393 adapter->num_msix_roce_vec);
2439 num_vec - adapter->num_msix_vec; 2394 }
2440 } else { 2395
2441 adapter->num_msix_vec = num_vec; 2396 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2442 adapter->num_msix_roce_vec = 0; 2397
2443 } 2398 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2444 } else 2399 adapter->num_msix_vec);
2445 adapter->num_msix_vec = num_vec;
2446 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2447 return 0; 2400 return 0;
2448} 2401}
2449 2402
2450static inline int be_msix_vec_get(struct be_adapter *adapter, 2403static inline int be_msix_vec_get(struct be_adapter *adapter,
2451 struct be_eq_obj *eqo) 2404 struct be_eq_obj *eqo)
2452{ 2405{
2453 return adapter->msix_entries[eqo->idx].vector; 2406 return adapter->msix_entries[eqo->msix_idx].vector;
2454} 2407}
2455 2408
2456static int be_msix_register(struct be_adapter *adapter) 2409static int be_msix_register(struct be_adapter *adapter)
@@ -2690,8 +2643,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2690 memset(mac, 0, ETH_ALEN); 2643 memset(mac, 0, ETH_ALEN);
2691 2644
2692 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); 2645 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2693 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 2646 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2694 GFP_KERNEL | __GFP_ZERO); 2647 GFP_KERNEL);
2695 if (cmd.va == NULL) 2648 if (cmd.va == NULL)
2696 return -1; 2649 return -1;
2697 2650
@@ -2735,13 +2688,13 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter)
2735 be_vf_eth_addr_generate(adapter, mac); 2688 be_vf_eth_addr_generate(adapter, mac);
2736 2689
2737 for_all_vfs(adapter, vf_cfg, vf) { 2690 for_all_vfs(adapter, vf_cfg, vf) {
2738 if (lancer_chip(adapter)) { 2691 if (BEx_chip(adapter))
2739 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2740 } else {
2741 status = be_cmd_pmac_add(adapter, mac, 2692 status = be_cmd_pmac_add(adapter, mac,
2742 vf_cfg->if_handle, 2693 vf_cfg->if_handle,
2743 &vf_cfg->pmac_id, vf + 1); 2694 &vf_cfg->pmac_id, vf + 1);
2744 } 2695 else
2696 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2697 vf + 1);
2745 2698
2746 if (status) 2699 if (status)
2747 dev_err(&adapter->pdev->dev, 2700 dev_err(&adapter->pdev->dev,
@@ -2759,7 +2712,7 @@ static int be_vfs_mac_query(struct be_adapter *adapter)
2759 int status, vf; 2712 int status, vf;
2760 u8 mac[ETH_ALEN]; 2713 u8 mac[ETH_ALEN];
2761 struct be_vf_cfg *vf_cfg; 2714 struct be_vf_cfg *vf_cfg;
2762 bool active; 2715 bool active = false;
2763 2716
2764 for_all_vfs(adapter, vf_cfg, vf) { 2717 for_all_vfs(adapter, vf_cfg, vf) {
2765 be_cmd_get_mac_from_list(adapter, mac, &active, 2718 be_cmd_get_mac_from_list(adapter, mac, &active,
@@ -2788,11 +2741,12 @@ static void be_vf_clear(struct be_adapter *adapter)
2788 pci_disable_sriov(adapter->pdev); 2741 pci_disable_sriov(adapter->pdev);
2789 2742
2790 for_all_vfs(adapter, vf_cfg, vf) { 2743 for_all_vfs(adapter, vf_cfg, vf) {
2791 if (lancer_chip(adapter)) 2744 if (BEx_chip(adapter))
2792 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2793 else
2794 be_cmd_pmac_del(adapter, vf_cfg->if_handle, 2745 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2795 vf_cfg->pmac_id, vf + 1); 2746 vf_cfg->pmac_id, vf + 1);
2747 else
2748 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2749 vf + 1);
2796 2750
2797 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1); 2751 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2798 } 2752 }
@@ -2801,28 +2755,40 @@ done:
2801 adapter->num_vfs = 0; 2755 adapter->num_vfs = 0;
2802} 2756}
2803 2757
2804static int be_clear(struct be_adapter *adapter) 2758static void be_clear_queues(struct be_adapter *adapter)
2805{ 2759{
2806 int i = 1; 2760 be_mcc_queues_destroy(adapter);
2761 be_rx_cqs_destroy(adapter);
2762 be_tx_queues_destroy(adapter);
2763 be_evt_queues_destroy(adapter);
2764}
2807 2765
2766static void be_cancel_worker(struct be_adapter *adapter)
2767{
2808 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) { 2768 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2809 cancel_delayed_work_sync(&adapter->work); 2769 cancel_delayed_work_sync(&adapter->work);
2810 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED; 2770 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2811 } 2771 }
2772}
2773
2774static int be_clear(struct be_adapter *adapter)
2775{
2776 int i;
2777
2778 be_cancel_worker(adapter);
2812 2779
2813 if (sriov_enabled(adapter)) 2780 if (sriov_enabled(adapter))
2814 be_vf_clear(adapter); 2781 be_vf_clear(adapter);
2815 2782
2816 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) 2783 /* delete the primary mac along with the uc-mac list */
2784 for (i = 0; i < (adapter->uc_macs + 1); i++)
2817 be_cmd_pmac_del(adapter, adapter->if_handle, 2785 be_cmd_pmac_del(adapter, adapter->if_handle,
2818 adapter->pmac_id[i], 0); 2786 adapter->pmac_id[i], 0);
2787 adapter->uc_macs = 0;
2819 2788
2820 be_cmd_if_destroy(adapter, adapter->if_handle, 0); 2789 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2821 2790
2822 be_mcc_queues_destroy(adapter); 2791 be_clear_queues(adapter);
2823 be_rx_cqs_destroy(adapter);
2824 be_tx_queues_destroy(adapter);
2825 be_evt_queues_destroy(adapter);
2826 2792
2827 kfree(adapter->pmac_id); 2793 kfree(adapter->pmac_id);
2828 adapter->pmac_id = NULL; 2794 adapter->pmac_id = NULL;
@@ -2833,6 +2799,7 @@ static int be_clear(struct be_adapter *adapter)
2833 2799
2834static int be_vfs_if_create(struct be_adapter *adapter) 2800static int be_vfs_if_create(struct be_adapter *adapter)
2835{ 2801{
2802 struct be_resources res = {0};
2836 struct be_vf_cfg *vf_cfg; 2803 struct be_vf_cfg *vf_cfg;
2837 u32 cap_flags, en_flags, vf; 2804 u32 cap_flags, en_flags, vf;
2838 int status; 2805 int status;
@@ -2841,9 +2808,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
2841 BE_IF_FLAGS_MULTICAST; 2808 BE_IF_FLAGS_MULTICAST;
2842 2809
2843 for_all_vfs(adapter, vf_cfg, vf) { 2810 for_all_vfs(adapter, vf_cfg, vf) {
2844 if (!BE3_chip(adapter)) 2811 if (!BE3_chip(adapter)) {
2845 be_cmd_get_profile_config(adapter, &cap_flags, 2812 status = be_cmd_get_profile_config(adapter, &res,
2846 NULL, vf + 1); 2813 vf + 1);
2814 if (!status)
2815 cap_flags = res.if_cap_flags;
2816 }
2847 2817
2848 /* If a FW profile exists, then cap_flags are updated */ 2818 /* If a FW profile exists, then cap_flags are updated */
2849 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | 2819 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
@@ -2880,6 +2850,7 @@ static int be_vf_setup(struct be_adapter *adapter)
2880 u16 def_vlan, lnk_speed; 2850 u16 def_vlan, lnk_speed;
2881 int status, old_vfs, vf; 2851 int status, old_vfs, vf;
2882 struct device *dev = &adapter->pdev->dev; 2852 struct device *dev = &adapter->pdev->dev;
2853 u32 privileges;
2883 2854
2884 old_vfs = pci_num_vf(adapter->pdev); 2855 old_vfs = pci_num_vf(adapter->pdev);
2885 if (old_vfs) { 2856 if (old_vfs) {
@@ -2888,10 +2859,10 @@ static int be_vf_setup(struct be_adapter *adapter)
2888 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs); 2859 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2889 adapter->num_vfs = old_vfs; 2860 adapter->num_vfs = old_vfs;
2890 } else { 2861 } else {
2891 if (num_vfs > adapter->dev_num_vfs) 2862 if (num_vfs > be_max_vfs(adapter))
2892 dev_info(dev, "Device supports %d VFs and not %d\n", 2863 dev_info(dev, "Device supports %d VFs and not %d\n",
2893 adapter->dev_num_vfs, num_vfs); 2864 be_max_vfs(adapter), num_vfs);
2894 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs); 2865 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
2895 if (!adapter->num_vfs) 2866 if (!adapter->num_vfs)
2896 return 0; 2867 return 0;
2897 } 2868 }
@@ -2923,6 +2894,18 @@ static int be_vf_setup(struct be_adapter *adapter)
2923 } 2894 }
2924 2895
2925 for_all_vfs(adapter, vf_cfg, vf) { 2896 for_all_vfs(adapter, vf_cfg, vf) {
2897 /* Allow VFs to programs MAC/VLAN filters */
2898 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2899 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2900 status = be_cmd_set_fn_privileges(adapter,
2901 privileges |
2902 BE_PRIV_FILTMGMT,
2903 vf + 1);
2904 if (!status)
2905 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2906 vf);
2907 }
2908
2926 /* BE3 FW, by default, caps VF TX-rate to 100mbps. 2909 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2927 * Allow full available bandwidth 2910 * Allow full available bandwidth
2928 */ 2911 */
@@ -2935,7 +2918,7 @@ static int be_vf_setup(struct be_adapter *adapter)
2935 vf_cfg->tx_rate = lnk_speed; 2918 vf_cfg->tx_rate = lnk_speed;
2936 2919
2937 status = be_cmd_get_hsw_config(adapter, &def_vlan, 2920 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2938 vf + 1, vf_cfg->if_handle); 2921 vf + 1, vf_cfg->if_handle, NULL);
2939 if (status) 2922 if (status)
2940 goto err; 2923 goto err;
2941 vf_cfg->def_vid = def_vlan; 2924 vf_cfg->def_vid = def_vlan;
@@ -2958,6 +2941,51 @@ err:
2958 return status; 2941 return status;
2959} 2942}
2960 2943
2944/* On BE2/BE3 FW does not suggest the supported limits */
2945static void BEx_get_resources(struct be_adapter *adapter,
2946 struct be_resources *res)
2947{
2948 struct pci_dev *pdev = adapter->pdev;
2949 bool use_sriov = false;
2950
2951 if (BE3_chip(adapter) && be_physfn(adapter)) {
2952 int max_vfs;
2953
2954 max_vfs = pci_sriov_get_totalvfs(pdev);
2955 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
2956 use_sriov = res->max_vfs && num_vfs;
2957 }
2958
2959 if (be_physfn(adapter))
2960 res->max_uc_mac = BE_UC_PMAC_COUNT;
2961 else
2962 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
2963
2964 if (adapter->function_mode & FLEX10_MODE)
2965 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2966 else
2967 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2968 res->max_mcast_mac = BE_MAX_MC;
2969
2970 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
2971 !be_physfn(adapter))
2972 res->max_tx_qs = 1;
2973 else
2974 res->max_tx_qs = BE3_MAX_TX_QS;
2975
2976 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2977 !use_sriov && be_physfn(adapter))
2978 res->max_rss_qs = (adapter->be3_native) ?
2979 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2980 res->max_rx_qs = res->max_rss_qs + 1;
2981
2982 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
2983
2984 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
2985 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
2986 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
2987}
2988
2961static void be_setup_init(struct be_adapter *adapter) 2989static void be_setup_init(struct be_adapter *adapter)
2962{ 2990{
2963 adapter->vlan_prio_bmap = 0xff; 2991 adapter->vlan_prio_bmap = 0xff;
@@ -2971,118 +2999,56 @@ static void be_setup_init(struct be_adapter *adapter)
2971 adapter->cmd_privileges = MIN_PRIVILEGES; 2999 adapter->cmd_privileges = MIN_PRIVILEGES;
2972} 3000}
2973 3001
2974static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle, 3002static int be_get_resources(struct be_adapter *adapter)
2975 bool *active_mac, u32 *pmac_id)
2976{ 3003{
2977 int status = 0; 3004 struct device *dev = &adapter->pdev->dev;
2978 3005 struct be_resources res = {0};
2979 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) { 3006 int status;
2980 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2981 if (!lancer_chip(adapter) && !be_physfn(adapter))
2982 *active_mac = true;
2983 else
2984 *active_mac = false;
2985 3007
2986 return status; 3008 if (BEx_chip(adapter)) {
3009 BEx_get_resources(adapter, &res);
3010 adapter->res = res;
2987 } 3011 }
2988 3012
2989 if (lancer_chip(adapter)) { 3013 /* For BE3 only check if FW suggests a different max-txqs value */
2990 status = be_cmd_get_mac_from_list(adapter, mac, 3014 if (BE3_chip(adapter)) {
2991 active_mac, pmac_id, 0); 3015 status = be_cmd_get_profile_config(adapter, &res, 0);
2992 if (*active_mac) { 3016 if (!status && res.max_tx_qs)
2993 status = be_cmd_mac_addr_query(adapter, mac, false, 3017 adapter->res.max_tx_qs =
2994 if_handle, *pmac_id); 3018 min(adapter->res.max_tx_qs, res.max_tx_qs);
2995 }
2996 } else if (be_physfn(adapter)) {
2997 /* For BE3, for PF get permanent MAC */
2998 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2999 *active_mac = false;
3000 } else {
3001 /* For BE3, for VF get soft MAC assigned by PF*/
3002 status = be_cmd_mac_addr_query(adapter, mac, false,
3003 if_handle, 0);
3004 *active_mac = true;
3005 } 3019 }
3006 return status;
3007}
3008
3009static void be_get_resources(struct be_adapter *adapter)
3010{
3011 u16 dev_num_vfs;
3012 int pos, status;
3013 bool profile_present = false;
3014 u16 txq_count = 0;
3015 3020
3021 /* For Lancer, SH etc read per-function resource limits from FW.
3022 * GET_FUNC_CONFIG returns per function guaranteed limits.
3023 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3024 */
3016 if (!BEx_chip(adapter)) { 3025 if (!BEx_chip(adapter)) {
3017 status = be_cmd_get_func_config(adapter); 3026 status = be_cmd_get_func_config(adapter, &res);
3018 if (!status) 3027 if (status)
3019 profile_present = true; 3028 return status;
3020 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3021 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
3022 }
3023
3024 if (profile_present) {
3025 /* Sanity fixes for Lancer */
3026 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3027 BE_UC_PMAC_COUNT);
3028 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3029 BE_NUM_VLANS_SUPPORTED);
3030 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3031 BE_MAX_MC);
3032 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3033 MAX_TX_QS);
3034 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3035 BE3_MAX_RSS_QS);
3036 adapter->max_event_queues = min_t(u16,
3037 adapter->max_event_queues,
3038 BE3_MAX_RSS_QS);
3039
3040 if (adapter->max_rss_queues &&
3041 adapter->max_rss_queues == adapter->max_rx_queues)
3042 adapter->max_rss_queues -= 1;
3043
3044 if (adapter->max_event_queues < adapter->max_rss_queues)
3045 adapter->max_rss_queues = adapter->max_event_queues;
3046
3047 } else {
3048 if (be_physfn(adapter))
3049 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3050 else
3051 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3052
3053 if (adapter->function_mode & FLEX10_MODE)
3054 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3055 else
3056 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3057 3029
3058 adapter->max_mcast_mac = BE_MAX_MC; 3030 /* If RoCE may be enabled stash away half the EQs for RoCE */
3059 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS; 3031 if (be_roce_supported(adapter))
3060 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues, 3032 res.max_evt_qs /= 2;
3061 MAX_TX_QS); 3033 adapter->res = res;
3062 adapter->max_rss_queues = (adapter->be3_native) ?
3063 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3064 adapter->max_event_queues = BE3_MAX_RSS_QS;
3065 3034
3066 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED | 3035 if (be_physfn(adapter)) {
3067 BE_IF_FLAGS_BROADCAST | 3036 status = be_cmd_get_profile_config(adapter, &res, 0);
3068 BE_IF_FLAGS_MULTICAST | 3037 if (status)
3069 BE_IF_FLAGS_PASS_L3L4_ERRORS | 3038 return status;
3070 BE_IF_FLAGS_MCAST_PROMISCUOUS | 3039 adapter->res.max_vfs = res.max_vfs;
3071 BE_IF_FLAGS_VLAN_PROMISCUOUS | 3040 }
3072 BE_IF_FLAGS_PROMISCUOUS;
3073 3041
3074 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) 3042 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3075 adapter->if_cap_flags |= BE_IF_FLAGS_RSS; 3043 be_max_txqs(adapter), be_max_rxqs(adapter),
3044 be_max_rss(adapter), be_max_eqs(adapter),
3045 be_max_vfs(adapter));
3046 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3047 be_max_uc(adapter), be_max_mc(adapter),
3048 be_max_vlans(adapter));
3076 } 3049 }
3077 3050
3078 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV); 3051 return 0;
3079 if (pos) {
3080 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3081 &dev_num_vfs);
3082 if (BE3_chip(adapter))
3083 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3084 adapter->dev_num_vfs = dev_num_vfs;
3085 }
3086} 3052}
3087 3053
3088/* Routine to query per function resource limits */ 3054/* Routine to query per function resource limits */
@@ -3095,100 +3061,171 @@ static int be_get_config(struct be_adapter *adapter)
3095 &adapter->function_caps, 3061 &adapter->function_caps,
3096 &adapter->asic_rev); 3062 &adapter->asic_rev);
3097 if (status) 3063 if (status)
3098 goto err; 3064 return status;
3099 3065
3100 be_get_resources(adapter); 3066 status = be_get_resources(adapter);
3067 if (status)
3068 return status;
3101 3069
3102 /* primary mac needs 1 pmac entry */ 3070 /* primary mac needs 1 pmac entry */
3103 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1, 3071 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3104 sizeof(u32), GFP_KERNEL); 3072 GFP_KERNEL);
3105 if (!adapter->pmac_id) { 3073 if (!adapter->pmac_id)
3106 status = -ENOMEM; 3074 return -ENOMEM;
3107 goto err;
3108 }
3109 3075
3110err: 3076 /* Sanitize cfg_num_qs based on HW and platform limits */
3111 return status; 3077 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3078
3079 return 0;
3112} 3080}
3113 3081
3114static int be_setup(struct be_adapter *adapter) 3082static int be_mac_setup(struct be_adapter *adapter)
3115{ 3083{
3116 struct device *dev = &adapter->pdev->dev;
3117 u32 en_flags;
3118 u32 tx_fc, rx_fc;
3119 int status;
3120 u8 mac[ETH_ALEN]; 3084 u8 mac[ETH_ALEN];
3121 bool active_mac; 3085 int status;
3122 3086
3123 be_setup_init(adapter); 3087 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3088 status = be_cmd_get_perm_mac(adapter, mac);
3089 if (status)
3090 return status;
3124 3091
3125 if (!lancer_chip(adapter)) 3092 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3126 be_cmd_req_native_mode(adapter); 3093 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3094 } else {
3095 /* Maybe the HW was reset; dev_addr must be re-programmed */
3096 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3097 }
3127 3098
3128 status = be_get_config(adapter); 3099 /* On BE3 VFs this cmd may fail due to lack of privilege.
3100 * Ignore the failure as in this case pmac_id is fetched
3101 * in the IFACE_CREATE cmd.
3102 */
3103 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3104 &adapter->pmac_id[0], 0);
3105 return 0;
3106}
3107
3108static void be_schedule_worker(struct be_adapter *adapter)
3109{
3110 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3111 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3112}
3113
3114static int be_setup_queues(struct be_adapter *adapter)
3115{
3116 struct net_device *netdev = adapter->netdev;
3117 int status;
3118
3119 status = be_evt_queues_create(adapter);
3129 if (status) 3120 if (status)
3130 goto err; 3121 goto err;
3131 3122
3132 status = be_msix_enable(adapter); 3123 status = be_tx_qs_create(adapter);
3133 if (status) 3124 if (status)
3134 goto err; 3125 goto err;
3135 3126
3136 status = be_evt_queues_create(adapter); 3127 status = be_rx_cqs_create(adapter);
3137 if (status) 3128 if (status)
3138 goto err; 3129 goto err;
3139 3130
3140 status = be_tx_cqs_create(adapter); 3131 status = be_mcc_queues_create(adapter);
3141 if (status) 3132 if (status)
3142 goto err; 3133 goto err;
3143 3134
3144 status = be_rx_cqs_create(adapter); 3135 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3145 if (status) 3136 if (status)
3146 goto err; 3137 goto err;
3147 3138
3148 status = be_mcc_queues_create(adapter); 3139 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3149 if (status) 3140 if (status)
3150 goto err; 3141 goto err;
3151 3142
3152 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0); 3143 return 0;
3153 /* In UMC mode FW does not return right privileges. 3144err:
3154 * Override with correct privilege equivalent to PF. 3145 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3146 return status;
3147}
3148
3149int be_update_queues(struct be_adapter *adapter)
3150{
3151 struct net_device *netdev = adapter->netdev;
3152 int status;
3153
3154 if (netif_running(netdev))
3155 be_close(netdev);
3156
3157 be_cancel_worker(adapter);
3158
3159 /* If any vectors have been shared with RoCE we cannot re-program
3160 * the MSIx table.
3155 */ 3161 */
3156 if (be_is_mc(adapter)) 3162 if (!adapter->num_msix_roce_vec)
3157 adapter->cmd_privileges = MAX_PRIVILEGES; 3163 be_msix_disable(adapter);
3158 3164
3159 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 3165 be_clear_queues(adapter);
3160 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3161 3166
3162 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) 3167 if (!msix_enabled(adapter)) {
3163 en_flags |= BE_IF_FLAGS_RSS; 3168 status = be_msix_enable(adapter);
3169 if (status)
3170 return status;
3171 }
3164 3172
3165 en_flags = en_flags & adapter->if_cap_flags; 3173 status = be_setup_queues(adapter);
3174 if (status)
3175 return status;
3166 3176
3167 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags, 3177 be_schedule_worker(adapter);
3168 &adapter->if_handle, 0); 3178
3169 if (status != 0) 3179 if (netif_running(netdev))
3180 status = be_open(netdev);
3181
3182 return status;
3183}
3184
3185static int be_setup(struct be_adapter *adapter)
3186{
3187 struct device *dev = &adapter->pdev->dev;
3188 u32 tx_fc, rx_fc, en_flags;
3189 int status;
3190
3191 be_setup_init(adapter);
3192
3193 if (!lancer_chip(adapter))
3194 be_cmd_req_native_mode(adapter);
3195
3196 status = be_get_config(adapter);
3197 if (status)
3170 goto err; 3198 goto err;
3171 3199
3172 memset(mac, 0, ETH_ALEN); 3200 status = be_msix_enable(adapter);
3173 active_mac = false; 3201 if (status)
3174 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3175 &active_mac, &adapter->pmac_id[0]);
3176 if (status != 0)
3177 goto err; 3202 goto err;
3178 3203
3179 if (!active_mac) { 3204 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3180 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle, 3205 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3181 &adapter->pmac_id[0], 0); 3206 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3182 if (status != 0) 3207 en_flags |= BE_IF_FLAGS_RSS;
3183 goto err; 3208 en_flags = en_flags & be_if_cap_flags(adapter);
3184 } 3209 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3210 &adapter->if_handle, 0);
3211 if (status)
3212 goto err;
3185 3213
3186 if (is_zero_ether_addr(adapter->netdev->dev_addr)) { 3214 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3187 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); 3215 rtnl_lock();
3188 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); 3216 status = be_setup_queues(adapter);
3189 } 3217 rtnl_unlock();
3218 if (status)
3219 goto err;
3190 3220
3191 status = be_tx_qs_create(adapter); 3221 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3222 /* In UMC mode FW does not return right privileges.
3223 * Override with correct privilege equivalent to PF.
3224 */
3225 if (be_is_mc(adapter))
3226 adapter->cmd_privileges = MAX_PRIVILEGES;
3227
3228 status = be_mac_setup(adapter);
3192 if (status) 3229 if (status)
3193 goto err; 3230 goto err;
3194 3231
@@ -3205,8 +3242,8 @@ static int be_setup(struct be_adapter *adapter)
3205 be_cmd_set_flow_control(adapter, adapter->tx_fc, 3242 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3206 adapter->rx_fc); 3243 adapter->rx_fc);
3207 3244
3208 if (be_physfn(adapter)) { 3245 if (be_physfn(adapter) && num_vfs) {
3209 if (adapter->dev_num_vfs) 3246 if (be_max_vfs(adapter))
3210 be_vf_setup(adapter); 3247 be_vf_setup(adapter);
3211 else 3248 else
3212 dev_warn(dev, "device doesn't support SRIOV\n"); 3249 dev_warn(dev, "device doesn't support SRIOV\n");
@@ -3216,8 +3253,7 @@ static int be_setup(struct be_adapter *adapter)
3216 if (!status && be_pause_supported(adapter)) 3253 if (!status && be_pause_supported(adapter))
3217 adapter->phy.fc_autoneg = 1; 3254 adapter->phy.fc_autoneg = 1;
3218 3255
3219 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 3256 be_schedule_worker(adapter);
3220 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3221 return 0; 3257 return 0;
3222err: 3258err:
3223 be_clear(adapter); 3259 be_clear(adapter);
@@ -3241,7 +3277,7 @@ static void be_netpoll(struct net_device *netdev)
3241#endif 3277#endif
3242 3278
3243#define FW_FILE_HDR_SIGN "ServerEngines Corp. " 3279#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
3244char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "}; 3280static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3245 3281
3246static bool be_flash_redboot(struct be_adapter *adapter, 3282static bool be_flash_redboot(struct be_adapter *adapter,
3247 const u8 *p, u32 img_start, int image_size, 3283 const u8 *p, u32 img_start, int image_size,
@@ -3298,7 +3334,7 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
3298 3334
3299} 3335}
3300 3336
3301struct flash_section_info *get_fsec_info(struct be_adapter *adapter, 3337static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3302 int header_size, 3338 int header_size,
3303 const struct firmware *fw) 3339 const struct firmware *fw)
3304{ 3340{
@@ -3760,6 +3796,74 @@ fw_exit:
3760 return status; 3796 return status;
3761} 3797}
3762 3798
3799static int be_ndo_bridge_setlink(struct net_device *dev,
3800 struct nlmsghdr *nlh)
3801{
3802 struct be_adapter *adapter = netdev_priv(dev);
3803 struct nlattr *attr, *br_spec;
3804 int rem;
3805 int status = 0;
3806 u16 mode = 0;
3807
3808 if (!sriov_enabled(adapter))
3809 return -EOPNOTSUPP;
3810
3811 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3812
3813 nla_for_each_nested(attr, br_spec, rem) {
3814 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3815 continue;
3816
3817 mode = nla_get_u16(attr);
3818 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3819 return -EINVAL;
3820
3821 status = be_cmd_set_hsw_config(adapter, 0, 0,
3822 adapter->if_handle,
3823 mode == BRIDGE_MODE_VEPA ?
3824 PORT_FWD_TYPE_VEPA :
3825 PORT_FWD_TYPE_VEB);
3826 if (status)
3827 goto err;
3828
3829 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3830 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3831
3832 return status;
3833 }
3834err:
3835 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
3836 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3837
3838 return status;
3839}
3840
3841static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3842 struct net_device *dev,
3843 u32 filter_mask)
3844{
3845 struct be_adapter *adapter = netdev_priv(dev);
3846 int status = 0;
3847 u8 hsw_mode;
3848
3849 if (!sriov_enabled(adapter))
3850 return 0;
3851
3852 /* BE and Lancer chips support VEB mode only */
3853 if (BEx_chip(adapter) || lancer_chip(adapter)) {
3854 hsw_mode = PORT_FWD_TYPE_VEB;
3855 } else {
3856 status = be_cmd_get_hsw_config(adapter, NULL, 0,
3857 adapter->if_handle, &hsw_mode);
3858 if (status)
3859 return 0;
3860 }
3861
3862 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
3863 hsw_mode == PORT_FWD_TYPE_VEPA ?
3864 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
3865}
3866
3763static const struct net_device_ops be_netdev_ops = { 3867static const struct net_device_ops be_netdev_ops = {
3764 .ndo_open = be_open, 3868 .ndo_open = be_open,
3765 .ndo_stop = be_close, 3869 .ndo_stop = be_close,
@@ -3778,13 +3882,13 @@ static const struct net_device_ops be_netdev_ops = {
3778#ifdef CONFIG_NET_POLL_CONTROLLER 3882#ifdef CONFIG_NET_POLL_CONTROLLER
3779 .ndo_poll_controller = be_netpoll, 3883 .ndo_poll_controller = be_netpoll,
3780#endif 3884#endif
3885 .ndo_bridge_setlink = be_ndo_bridge_setlink,
3886 .ndo_bridge_getlink = be_ndo_bridge_getlink,
3781}; 3887};
3782 3888
3783static void be_netdev_init(struct net_device *netdev) 3889static void be_netdev_init(struct net_device *netdev)
3784{ 3890{
3785 struct be_adapter *adapter = netdev_priv(netdev); 3891 struct be_adapter *adapter = netdev_priv(netdev);
3786 struct be_eq_obj *eqo;
3787 int i;
3788 3892
3789 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 3893 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3790 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | 3894 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
@@ -3807,9 +3911,6 @@ static void be_netdev_init(struct net_device *netdev)
3807 netdev->netdev_ops = &be_netdev_ops; 3911 netdev->netdev_ops = &be_netdev_ops;
3808 3912
3809 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); 3913 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3810
3811 for_all_evt_queues(adapter, eqo, i)
3812 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3813} 3914}
3814 3915
3815static void be_unmap_pci_bars(struct be_adapter *adapter) 3916static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -3916,9 +4017,9 @@ static int be_ctrl_init(struct be_adapter *adapter)
3916 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 4017 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3917 4018
3918 rx_filter->size = sizeof(struct be_cmd_req_rx_filter); 4019 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3919 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size, 4020 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
3920 &rx_filter->dma, 4021 rx_filter->size, &rx_filter->dma,
3921 GFP_KERNEL | __GFP_ZERO); 4022 GFP_KERNEL);
3922 if (rx_filter->va == NULL) { 4023 if (rx_filter->va == NULL) {
3923 status = -ENOMEM; 4024 status = -ENOMEM;
3924 goto free_mbox; 4025 goto free_mbox;
@@ -3964,8 +4065,8 @@ static int be_stats_init(struct be_adapter *adapter)
3964 /* BE3 and Skyhawk */ 4065 /* BE3 and Skyhawk */
3965 cmd->size = sizeof(struct be_cmd_req_get_stats_v1); 4066 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3966 4067
3967 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, 4068 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3968 GFP_KERNEL | __GFP_ZERO); 4069 GFP_KERNEL);
3969 if (cmd->va == NULL) 4070 if (cmd->va == NULL)
3970 return -1; 4071 return -1;
3971 return 0; 4072 return 0;
@@ -4072,6 +4173,7 @@ static int be_get_initial_config(struct be_adapter *adapter)
4072 level = be_get_fw_log_level(adapter); 4173 level = be_get_fw_log_level(adapter);
4073 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; 4174 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4074 4175
4176 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4075 return 0; 4177 return 0;
4076} 4178}
4077 4179
@@ -4164,7 +4266,8 @@ static void be_worker(struct work_struct *work)
4164 be_cmd_get_stats(adapter, &adapter->stats_cmd); 4266 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4165 } 4267 }
4166 4268
4167 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) 4269 if (be_physfn(adapter) &&
4270 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4168 be_cmd_get_die_temperature(adapter); 4271 be_cmd_get_die_temperature(adapter);
4169 4272
4170 for_all_rx_queues(adapter, rxo, i) { 4273 for_all_rx_queues(adapter, rxo, i) {
@@ -4253,7 +4356,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4253 4356
4254 status = pci_enable_pcie_error_reporting(pdev); 4357 status = pci_enable_pcie_error_reporting(pdev);
4255 if (status) 4358 if (status)
4256 dev_err(&pdev->dev, "Could not use PCIe error reporting\n"); 4359 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
4257 4360
4258 status = be_ctrl_init(adapter); 4361 status = be_ctrl_init(adapter);
4259 if (status) 4362 if (status)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index f3d126dcc104..9cd5415fe017 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -60,7 +60,7 @@ static void _be_roce_dev_add(struct be_adapter *adapter)
60 */ 60 */
61 num_vec = adapter->num_msix_vec + adapter->num_msix_roce_vec; 61 num_vec = adapter->num_msix_vec + adapter->num_msix_roce_vec;
62 dev_info.intr_mode = BE_INTERRUPT_MODE_MSIX; 62 dev_info.intr_mode = BE_INTERRUPT_MODE_MSIX;
63 dev_info.msix.num_vectors = min(num_vec, MAX_ROCE_MSIX_VECTORS); 63 dev_info.msix.num_vectors = min(num_vec, MAX_MSIX_VECTORS);
64 /* provide start index of the vector, 64 /* provide start index of the vector,
65 * so in case of linear usage, 65 * so in case of linear usage,
66 * it can use the base as starting point. 66 * it can use the base as starting point.
@@ -93,7 +93,7 @@ void be_roce_dev_add(struct be_adapter *adapter)
93 } 93 }
94} 94}
95 95
96void _be_roce_dev_remove(struct be_adapter *adapter) 96static void _be_roce_dev_remove(struct be_adapter *adapter)
97{ 97{
98 if (ocrdma_drv && ocrdma_drv->remove && adapter->ocrdma_dev) 98 if (ocrdma_drv && ocrdma_drv->remove && adapter->ocrdma_dev)
99 ocrdma_drv->remove(adapter->ocrdma_dev); 99 ocrdma_drv->remove(adapter->ocrdma_dev);
@@ -110,7 +110,7 @@ void be_roce_dev_remove(struct be_adapter *adapter)
110 } 110 }
111} 111}
112 112
113void _be_roce_dev_open(struct be_adapter *adapter) 113static void _be_roce_dev_open(struct be_adapter *adapter)
114{ 114{
115 if (ocrdma_drv && adapter->ocrdma_dev && 115 if (ocrdma_drv && adapter->ocrdma_dev &&
116 ocrdma_drv->state_change_handler) 116 ocrdma_drv->state_change_handler)
@@ -126,7 +126,7 @@ void be_roce_dev_open(struct be_adapter *adapter)
126 } 126 }
127} 127}
128 128
129void _be_roce_dev_close(struct be_adapter *adapter) 129static void _be_roce_dev_close(struct be_adapter *adapter)
130{ 130{
131 if (ocrdma_drv && adapter->ocrdma_dev && 131 if (ocrdma_drv && adapter->ocrdma_dev &&
132 ocrdma_drv->state_change_handler) 132 ocrdma_drv->state_change_handler)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index 276572998463..2cd1129e19af 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -29,7 +29,7 @@ enum be_interrupt_mode {
29 BE_INTERRUPT_MODE_MSI = 2, 29 BE_INTERRUPT_MODE_MSI = 2,
30}; 30};
31 31
32#define MAX_ROCE_MSIX_VECTORS 16 32#define MAX_MSIX_VECTORS 32
33struct be_dev_info { 33struct be_dev_info {
34 u8 __iomem *db; 34 u8 __iomem *db;
35 u64 unmapped_db; 35 u64 unmapped_db;
@@ -45,7 +45,7 @@ struct be_dev_info {
45 struct { 45 struct {
46 int num_vectors; 46 int num_vectors;
47 int start_vector; 47 int start_vector;
48 u32 vector_list[MAX_ROCE_MSIX_VECTORS]; 48 u32 vector_list[MAX_MSIX_VECTORS];
49 } msix; 49 } msix;
50}; 50};
51 51
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index cf579fb39bc5..4de8cfd149cf 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1030,8 +1030,8 @@ static int ethoc_probe(struct platform_device *pdev)
1030 } 1030 }
1031 1031
1032 /* Allow the platform setup code to pass in a MAC address. */ 1032 /* Allow the platform setup code to pass in a MAC address. */
1033 if (pdev->dev.platform_data) { 1033 if (dev_get_platdata(&pdev->dev)) {
1034 struct ethoc_platform_data *pdata = pdev->dev.platform_data; 1034 struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
1035 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); 1035 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
1036 priv->phy_id = pdata->phy_id; 1036 priv->phy_id = pdata->phy_id;
1037 } else { 1037 } else {
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 934e1ae279f0..212f44b3a773 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -778,10 +778,9 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
778{ 778{
779 int i; 779 int i;
780 780
781 priv->descs = dma_alloc_coherent(priv->dev, 781 priv->descs = dma_zalloc_coherent(priv->dev,
782 sizeof(struct ftgmac100_descs), 782 sizeof(struct ftgmac100_descs),
783 &priv->descs_dma_addr, 783 &priv->descs_dma_addr, GFP_KERNEL);
784 GFP_KERNEL | __GFP_ZERO);
785 if (!priv->descs) 784 if (!priv->descs)
786 return -ENOMEM; 785 return -ENOMEM;
787 786
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 4658f4cc1969..8be5b40c0a12 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -732,10 +732,10 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
732{ 732{
733 int i; 733 int i;
734 734
735 priv->descs = dma_alloc_coherent(priv->dev, 735 priv->descs = dma_zalloc_coherent(priv->dev,
736 sizeof(struct ftmac100_descs), 736 sizeof(struct ftmac100_descs),
737 &priv->descs_dma_addr, 737 &priv->descs_dma_addr,
738 GFP_KERNEL | __GFP_ZERO); 738 GFP_KERNEL);
739 if (!priv->descs) 739 if (!priv->descs)
740 return -ENOMEM; 740 return -ENOMEM;
741 741
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ae236009f1a8..0120217a16dd 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -296,6 +296,9 @@ struct fec_enet_private {
296 /* The ring entries to be free()ed */ 296 /* The ring entries to be free()ed */
297 struct bufdesc *dirty_tx; 297 struct bufdesc *dirty_tx;
298 298
299 unsigned short tx_ring_size;
300 unsigned short rx_ring_size;
301
299 struct platform_device *pdev; 302 struct platform_device *pdev;
300 303
301 int opened; 304 int opened;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index c610a2716be4..f9aacf5d8523 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -69,7 +69,6 @@ static void set_multicast_list(struct net_device *ndev);
69#endif 69#endif
70 70
71#define DRIVER_NAME "fec" 71#define DRIVER_NAME "fec"
72#define FEC_NAPI_WEIGHT 64
73 72
74/* Pause frame feild and FIFO threshold */ 73/* Pause frame feild and FIFO threshold */
75#define FEC_ENET_FCE (1 << 5) 74#define FEC_ENET_FCE (1 << 5)
@@ -239,22 +238,57 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
239 238
240static int mii_cnt; 239static int mii_cnt;
241 240
242static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex) 241static inline
242struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
243{ 243{
244 struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; 244 struct bufdesc *new_bd = bdp + 1;
245 if (is_ex) 245 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
246 return (struct bufdesc *)(ex + 1); 246 struct bufdesc_ex *ex_base;
247 struct bufdesc *base;
248 int ring_size;
249
250 if (bdp >= fep->tx_bd_base) {
251 base = fep->tx_bd_base;
252 ring_size = fep->tx_ring_size;
253 ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
254 } else {
255 base = fep->rx_bd_base;
256 ring_size = fep->rx_ring_size;
257 ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
258 }
259
260 if (fep->bufdesc_ex)
261 return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
262 ex_base : ex_new_bd);
247 else 263 else
248 return bdp + 1; 264 return (new_bd >= (base + ring_size)) ?
265 base : new_bd;
249} 266}
250 267
251static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex) 268static inline
269struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
252{ 270{
253 struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; 271 struct bufdesc *new_bd = bdp - 1;
254 if (is_ex) 272 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
255 return (struct bufdesc *)(ex - 1); 273 struct bufdesc_ex *ex_base;
274 struct bufdesc *base;
275 int ring_size;
276
277 if (bdp >= fep->tx_bd_base) {
278 base = fep->tx_bd_base;
279 ring_size = fep->tx_ring_size;
280 ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
281 } else {
282 base = fep->rx_bd_base;
283 ring_size = fep->rx_ring_size;
284 ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
285 }
286
287 if (fep->bufdesc_ex)
288 return (struct bufdesc *)((ex_new_bd < ex_base) ?
289 (ex_new_bd + ring_size) : ex_new_bd);
256 else 290 else
257 return bdp - 1; 291 return (new_bd < base) ? (new_bd + ring_size) : new_bd;
258} 292}
259 293
260static void *swap_buffer(void *bufaddr, int len) 294static void *swap_buffer(void *bufaddr, int len)
@@ -380,7 +414,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
380 } 414 }
381 } 415 }
382 416
383 bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 417 bdp_pre = fec_enet_get_prevdesc(bdp, fep);
384 if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && 418 if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
385 !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { 419 !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
386 fep->delay_work.trig_tx = true; 420 fep->delay_work.trig_tx = true;
@@ -389,10 +423,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
389 } 423 }
390 424
391 /* If this was the last BD in the ring, start at the beginning again. */ 425 /* If this was the last BD in the ring, start at the beginning again. */
392 if (status & BD_ENET_TX_WRAP) 426 bdp = fec_enet_get_nextdesc(bdp, fep);
393 bdp = fep->tx_bd_base;
394 else
395 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
396 427
397 fep->cur_tx = bdp; 428 fep->cur_tx = bdp;
398 429
@@ -417,18 +448,18 @@ static void fec_enet_bd_init(struct net_device *dev)
417 448
418 /* Initialize the receive buffer descriptors. */ 449 /* Initialize the receive buffer descriptors. */
419 bdp = fep->rx_bd_base; 450 bdp = fep->rx_bd_base;
420 for (i = 0; i < RX_RING_SIZE; i++) { 451 for (i = 0; i < fep->rx_ring_size; i++) {
421 452
422 /* Initialize the BD for every fragment in the page. */ 453 /* Initialize the BD for every fragment in the page. */
423 if (bdp->cbd_bufaddr) 454 if (bdp->cbd_bufaddr)
424 bdp->cbd_sc = BD_ENET_RX_EMPTY; 455 bdp->cbd_sc = BD_ENET_RX_EMPTY;
425 else 456 else
426 bdp->cbd_sc = 0; 457 bdp->cbd_sc = 0;
427 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 458 bdp = fec_enet_get_nextdesc(bdp, fep);
428 } 459 }
429 460
430 /* Set the last buffer to wrap */ 461 /* Set the last buffer to wrap */
431 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 462 bdp = fec_enet_get_prevdesc(bdp, fep);
432 bdp->cbd_sc |= BD_SC_WRAP; 463 bdp->cbd_sc |= BD_SC_WRAP;
433 464
434 fep->cur_rx = fep->rx_bd_base; 465 fep->cur_rx = fep->rx_bd_base;
@@ -436,7 +467,7 @@ static void fec_enet_bd_init(struct net_device *dev)
436 /* ...and the same for transmit */ 467 /* ...and the same for transmit */
437 bdp = fep->tx_bd_base; 468 bdp = fep->tx_bd_base;
438 fep->cur_tx = bdp; 469 fep->cur_tx = bdp;
439 for (i = 0; i < TX_RING_SIZE; i++) { 470 for (i = 0; i < fep->tx_ring_size; i++) {
440 471
441 /* Initialize the BD for every fragment in the page. */ 472 /* Initialize the BD for every fragment in the page. */
442 bdp->cbd_sc = 0; 473 bdp->cbd_sc = 0;
@@ -445,11 +476,11 @@ static void fec_enet_bd_init(struct net_device *dev)
445 fep->tx_skbuff[i] = NULL; 476 fep->tx_skbuff[i] = NULL;
446 } 477 }
447 bdp->cbd_bufaddr = 0; 478 bdp->cbd_bufaddr = 0;
448 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 479 bdp = fec_enet_get_nextdesc(bdp, fep);
449 } 480 }
450 481
451 /* Set the last buffer to wrap */ 482 /* Set the last buffer to wrap */
452 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 483 bdp = fec_enet_get_prevdesc(bdp, fep);
453 bdp->cbd_sc |= BD_SC_WRAP; 484 bdp->cbd_sc |= BD_SC_WRAP;
454 fep->dirty_tx = bdp; 485 fep->dirty_tx = bdp;
455} 486}
@@ -510,10 +541,10 @@ fec_restart(struct net_device *ndev, int duplex)
510 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); 541 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
511 if (fep->bufdesc_ex) 542 if (fep->bufdesc_ex)
512 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) 543 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
513 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); 544 * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
514 else 545 else
515 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) 546 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
516 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); 547 * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
517 548
518 549
519 for (i = 0; i <= TX_RING_MOD_MASK; i++) { 550 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
@@ -727,10 +758,7 @@ fec_enet_tx(struct net_device *ndev)
727 bdp = fep->dirty_tx; 758 bdp = fep->dirty_tx;
728 759
729 /* get next bdp of dirty_tx */ 760 /* get next bdp of dirty_tx */
730 if (bdp->cbd_sc & BD_ENET_TX_WRAP) 761 bdp = fec_enet_get_nextdesc(bdp, fep);
731 bdp = fep->tx_bd_base;
732 else
733 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
734 762
735 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 763 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
736 764
@@ -800,10 +828,7 @@ fec_enet_tx(struct net_device *ndev)
800 fep->dirty_tx = bdp; 828 fep->dirty_tx = bdp;
801 829
802 /* Update pointer to next buffer descriptor to be transmitted */ 830 /* Update pointer to next buffer descriptor to be transmitted */
803 if (status & BD_ENET_TX_WRAP) 831 bdp = fec_enet_get_nextdesc(bdp, fep);
804 bdp = fep->tx_bd_base;
805 else
806 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
807 832
808 /* Since we have freed up a buffer, the ring is no longer full 833 /* Since we have freed up a buffer, the ring is no longer full
809 */ 834 */
@@ -993,10 +1018,8 @@ rx_processing_done:
993 } 1018 }
994 1019
995 /* Update BD pointer to next entry */ 1020 /* Update BD pointer to next entry */
996 if (status & BD_ENET_RX_WRAP) 1021 bdp = fec_enet_get_nextdesc(bdp, fep);
997 bdp = fep->rx_bd_base; 1022
998 else
999 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
1000 /* Doing this here will keep the FEC running while we process 1023 /* Doing this here will keep the FEC running while we process
1001 * incoming frames. On a heavily loaded network, we should be 1024 * incoming frames. On a heavily loaded network, we should be
1002 * able to keep up at the expense of system resources. 1025 * able to keep up at the expense of system resources.
@@ -1059,7 +1082,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1059static void fec_get_mac(struct net_device *ndev) 1082static void fec_get_mac(struct net_device *ndev)
1060{ 1083{
1061 struct fec_enet_private *fep = netdev_priv(ndev); 1084 struct fec_enet_private *fep = netdev_priv(ndev);
1062 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 1085 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1063 unsigned char *iap, tmpaddr[ETH_ALEN]; 1086 unsigned char *iap, tmpaddr[ETH_ALEN];
1064 1087
1065 /* 1088 /*
@@ -1099,10 +1122,10 @@ static void fec_get_mac(struct net_device *ndev)
1099 * 4) FEC mac registers set by bootloader 1122 * 4) FEC mac registers set by bootloader
1100 */ 1123 */
1101 if (!is_valid_ether_addr(iap)) { 1124 if (!is_valid_ether_addr(iap)) {
1102 *((unsigned long *) &tmpaddr[0]) = 1125 *((__be32 *) &tmpaddr[0]) =
1103 be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW)); 1126 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1104 *((unsigned short *) &tmpaddr[4]) = 1127 *((__be16 *) &tmpaddr[4]) =
1105 be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 1128 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1106 iap = &tmpaddr[0]; 1129 iap = &tmpaddr[0];
1107 } 1130 }
1108 1131
@@ -1662,7 +1685,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
1662 struct bufdesc *bdp; 1685 struct bufdesc *bdp;
1663 1686
1664 bdp = fep->rx_bd_base; 1687 bdp = fep->rx_bd_base;
1665 for (i = 0; i < RX_RING_SIZE; i++) { 1688 for (i = 0; i < fep->rx_ring_size; i++) {
1666 skb = fep->rx_skbuff[i]; 1689 skb = fep->rx_skbuff[i];
1667 1690
1668 if (bdp->cbd_bufaddr) 1691 if (bdp->cbd_bufaddr)
@@ -1670,11 +1693,11 @@ static void fec_enet_free_buffers(struct net_device *ndev)
1670 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1693 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1671 if (skb) 1694 if (skb)
1672 dev_kfree_skb(skb); 1695 dev_kfree_skb(skb);
1673 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 1696 bdp = fec_enet_get_nextdesc(bdp, fep);
1674 } 1697 }
1675 1698
1676 bdp = fep->tx_bd_base; 1699 bdp = fep->tx_bd_base;
1677 for (i = 0; i < TX_RING_SIZE; i++) 1700 for (i = 0; i < fep->tx_ring_size; i++)
1678 kfree(fep->tx_bounce[i]); 1701 kfree(fep->tx_bounce[i]);
1679} 1702}
1680 1703
@@ -1686,7 +1709,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1686 struct bufdesc *bdp; 1709 struct bufdesc *bdp;
1687 1710
1688 bdp = fep->rx_bd_base; 1711 bdp = fep->rx_bd_base;
1689 for (i = 0; i < RX_RING_SIZE; i++) { 1712 for (i = 0; i < fep->rx_ring_size; i++) {
1690 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 1713 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
1691 if (!skb) { 1714 if (!skb) {
1692 fec_enet_free_buffers(ndev); 1715 fec_enet_free_buffers(ndev);
@@ -1703,15 +1726,15 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1703 ebdp->cbd_esc = BD_ENET_RX_INT; 1726 ebdp->cbd_esc = BD_ENET_RX_INT;
1704 } 1727 }
1705 1728
1706 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 1729 bdp = fec_enet_get_nextdesc(bdp, fep);
1707 } 1730 }
1708 1731
1709 /* Set the last buffer to wrap. */ 1732 /* Set the last buffer to wrap. */
1710 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 1733 bdp = fec_enet_get_prevdesc(bdp, fep);
1711 bdp->cbd_sc |= BD_SC_WRAP; 1734 bdp->cbd_sc |= BD_SC_WRAP;
1712 1735
1713 bdp = fep->tx_bd_base; 1736 bdp = fep->tx_bd_base;
1714 for (i = 0; i < TX_RING_SIZE; i++) { 1737 for (i = 0; i < fep->tx_ring_size; i++) {
1715 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 1738 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
1716 1739
1717 bdp->cbd_sc = 0; 1740 bdp->cbd_sc = 0;
@@ -1722,11 +1745,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1722 ebdp->cbd_esc = BD_ENET_TX_INT; 1745 ebdp->cbd_esc = BD_ENET_TX_INT;
1723 } 1746 }
1724 1747
1725 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 1748 bdp = fec_enet_get_nextdesc(bdp, fep);
1726 } 1749 }
1727 1750
1728 /* Set the last buffer to wrap. */ 1751 /* Set the last buffer to wrap. */
1729 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 1752 bdp = fec_enet_get_prevdesc(bdp, fep);
1730 bdp->cbd_sc |= BD_SC_WRAP; 1753 bdp->cbd_sc |= BD_SC_WRAP;
1731 1754
1732 return 0; 1755 return 0;
@@ -1966,13 +1989,17 @@ static int fec_enet_init(struct net_device *ndev)
1966 /* Get the Ethernet address */ 1989 /* Get the Ethernet address */
1967 fec_get_mac(ndev); 1990 fec_get_mac(ndev);
1968 1991
1992 /* init the tx & rx ring size */
1993 fep->tx_ring_size = TX_RING_SIZE;
1994 fep->rx_ring_size = RX_RING_SIZE;
1995
1969 /* Set receive and transmit descriptor base. */ 1996 /* Set receive and transmit descriptor base. */
1970 fep->rx_bd_base = cbd_base; 1997 fep->rx_bd_base = cbd_base;
1971 if (fep->bufdesc_ex) 1998 if (fep->bufdesc_ex)
1972 fep->tx_bd_base = (struct bufdesc *) 1999 fep->tx_bd_base = (struct bufdesc *)
1973 (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE); 2000 (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
1974 else 2001 else
1975 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 2002 fep->tx_bd_base = cbd_base + fep->rx_ring_size;
1976 2003
1977 /* The FEC Ethernet specific entries in the device structure */ 2004 /* The FEC Ethernet specific entries in the device structure */
1978 ndev->watchdog_timeo = TX_TIMEOUT; 2005 ndev->watchdog_timeo = TX_TIMEOUT;
@@ -1980,7 +2007,7 @@ static int fec_enet_init(struct net_device *ndev)
1980 ndev->ethtool_ops = &fec_enet_ethtool_ops; 2007 ndev->ethtool_ops = &fec_enet_ethtool_ops;
1981 2008
1982 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 2009 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
1983 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); 2010 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
1984 2011
1985 if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) { 2012 if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) {
1986 /* enable hw VLAN support */ 2013 /* enable hw VLAN support */
@@ -2055,10 +2082,6 @@ fec_probe(struct platform_device *pdev)
2055 if (of_id) 2082 if (of_id)
2056 pdev->id_entry = of_id->data; 2083 pdev->id_entry = of_id->data;
2057 2084
2058 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2059 if (!r)
2060 return -ENXIO;
2061
2062 /* Init network device */ 2085 /* Init network device */
2063 ndev = alloc_etherdev(sizeof(struct fec_enet_private)); 2086 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
2064 if (!ndev) 2087 if (!ndev)
@@ -2076,6 +2099,7 @@ fec_probe(struct platform_device *pdev)
2076 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 2099 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
2077#endif 2100#endif
2078 2101
2102 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2079 fep->hwp = devm_ioremap_resource(&pdev->dev, r); 2103 fep->hwp = devm_ioremap_resource(&pdev->dev, r);
2080 if (IS_ERR(fep->hwp)) { 2104 if (IS_ERR(fep->hwp)) {
2081 ret = PTR_ERR(fep->hwp); 2105 ret = PTR_ERR(fep->hwp);
@@ -2091,7 +2115,7 @@ fec_probe(struct platform_device *pdev)
2091 2115
2092 ret = of_get_phy_mode(pdev->dev.of_node); 2116 ret = of_get_phy_mode(pdev->dev.of_node);
2093 if (ret < 0) { 2117 if (ret < 0) {
2094 pdata = pdev->dev.platform_data; 2118 pdata = dev_get_platdata(&pdev->dev);
2095 if (pdata) 2119 if (pdata)
2096 fep->phy_interface = pdata->phy; 2120 fep->phy_interface = pdata->phy;
2097 else 2121 else
@@ -2125,10 +2149,25 @@ fec_probe(struct platform_device *pdev)
2125 fep->bufdesc_ex = 0; 2149 fep->bufdesc_ex = 0;
2126 } 2150 }
2127 2151
2128 clk_prepare_enable(fep->clk_ahb); 2152 ret = clk_prepare_enable(fep->clk_ahb);
2129 clk_prepare_enable(fep->clk_ipg); 2153 if (ret)
2130 clk_prepare_enable(fep->clk_enet_out); 2154 goto failed_clk;
2131 clk_prepare_enable(fep->clk_ptp); 2155
2156 ret = clk_prepare_enable(fep->clk_ipg);
2157 if (ret)
2158 goto failed_clk_ipg;
2159
2160 if (fep->clk_enet_out) {
2161 ret = clk_prepare_enable(fep->clk_enet_out);
2162 if (ret)
2163 goto failed_clk_enet_out;
2164 }
2165
2166 if (fep->clk_ptp) {
2167 ret = clk_prepare_enable(fep->clk_ptp);
2168 if (ret)
2169 goto failed_clk_ptp;
2170 }
2132 2171
2133 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 2172 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
2134 if (!IS_ERR(fep->reg_phy)) { 2173 if (!IS_ERR(fep->reg_phy)) {
@@ -2159,14 +2198,10 @@ fec_probe(struct platform_device *pdev)
2159 ret = irq; 2198 ret = irq;
2160 goto failed_irq; 2199 goto failed_irq;
2161 } 2200 }
2162 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); 2201 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
2163 if (ret) { 2202 IRQF_DISABLED, pdev->name, ndev);
2164 while (--i >= 0) { 2203 if (ret)
2165 irq = platform_get_irq(pdev, i);
2166 free_irq(irq, ndev);
2167 }
2168 goto failed_irq; 2204 goto failed_irq;
2169 }
2170 } 2205 }
2171 2206
2172 ret = fec_enet_mii_init(pdev); 2207 ret = fec_enet_mii_init(pdev);
@@ -2190,19 +2225,19 @@ failed_register:
2190 fec_enet_mii_remove(fep); 2225 fec_enet_mii_remove(fep);
2191failed_mii_init: 2226failed_mii_init:
2192failed_irq: 2227failed_irq:
2193 for (i = 0; i < FEC_IRQ_NUM; i++) {
2194 irq = platform_get_irq(pdev, i);
2195 if (irq > 0)
2196 free_irq(irq, ndev);
2197 }
2198failed_init: 2228failed_init:
2199 if (fep->reg_phy) 2229 if (fep->reg_phy)
2200 regulator_disable(fep->reg_phy); 2230 regulator_disable(fep->reg_phy);
2201failed_regulator: 2231failed_regulator:
2202 clk_disable_unprepare(fep->clk_ahb); 2232 if (fep->clk_ptp)
2233 clk_disable_unprepare(fep->clk_ptp);
2234failed_clk_ptp:
2235 if (fep->clk_enet_out)
2236 clk_disable_unprepare(fep->clk_enet_out);
2237failed_clk_enet_out:
2203 clk_disable_unprepare(fep->clk_ipg); 2238 clk_disable_unprepare(fep->clk_ipg);
2204 clk_disable_unprepare(fep->clk_enet_out); 2239failed_clk_ipg:
2205 clk_disable_unprepare(fep->clk_ptp); 2240 clk_disable_unprepare(fep->clk_ahb);
2206failed_clk: 2241failed_clk:
2207failed_ioremap: 2242failed_ioremap:
2208 free_netdev(ndev); 2243 free_netdev(ndev);
@@ -2215,25 +2250,21 @@ fec_drv_remove(struct platform_device *pdev)
2215{ 2250{
2216 struct net_device *ndev = platform_get_drvdata(pdev); 2251 struct net_device *ndev = platform_get_drvdata(pdev);
2217 struct fec_enet_private *fep = netdev_priv(ndev); 2252 struct fec_enet_private *fep = netdev_priv(ndev);
2218 int i;
2219 2253
2220 cancel_delayed_work_sync(&(fep->delay_work.delay_work)); 2254 cancel_delayed_work_sync(&(fep->delay_work.delay_work));
2221 unregister_netdev(ndev); 2255 unregister_netdev(ndev);
2222 fec_enet_mii_remove(fep); 2256 fec_enet_mii_remove(fep);
2223 del_timer_sync(&fep->time_keep); 2257 del_timer_sync(&fep->time_keep);
2224 for (i = 0; i < FEC_IRQ_NUM; i++) {
2225 int irq = platform_get_irq(pdev, i);
2226 if (irq > 0)
2227 free_irq(irq, ndev);
2228 }
2229 if (fep->reg_phy) 2258 if (fep->reg_phy)
2230 regulator_disable(fep->reg_phy); 2259 regulator_disable(fep->reg_phy);
2231 clk_disable_unprepare(fep->clk_ptp); 2260 if (fep->clk_ptp)
2261 clk_disable_unprepare(fep->clk_ptp);
2232 if (fep->ptp_clock) 2262 if (fep->ptp_clock)
2233 ptp_clock_unregister(fep->ptp_clock); 2263 ptp_clock_unregister(fep->ptp_clock);
2234 clk_disable_unprepare(fep->clk_enet_out); 2264 if (fep->clk_enet_out)
2235 clk_disable_unprepare(fep->clk_ahb); 2265 clk_disable_unprepare(fep->clk_enet_out);
2236 clk_disable_unprepare(fep->clk_ipg); 2266 clk_disable_unprepare(fep->clk_ipg);
2267 clk_disable_unprepare(fep->clk_ahb);
2237 free_netdev(ndev); 2268 free_netdev(ndev);
2238 2269
2239 return 0; 2270 return 0;
@@ -2250,9 +2281,12 @@ fec_suspend(struct device *dev)
2250 fec_stop(ndev); 2281 fec_stop(ndev);
2251 netif_device_detach(ndev); 2282 netif_device_detach(ndev);
2252 } 2283 }
2253 clk_disable_unprepare(fep->clk_enet_out); 2284 if (fep->clk_ptp)
2254 clk_disable_unprepare(fep->clk_ahb); 2285 clk_disable_unprepare(fep->clk_ptp);
2286 if (fep->clk_enet_out)
2287 clk_disable_unprepare(fep->clk_enet_out);
2255 clk_disable_unprepare(fep->clk_ipg); 2288 clk_disable_unprepare(fep->clk_ipg);
2289 clk_disable_unprepare(fep->clk_ahb);
2256 2290
2257 if (fep->reg_phy) 2291 if (fep->reg_phy)
2258 regulator_disable(fep->reg_phy); 2292 regulator_disable(fep->reg_phy);
@@ -2273,15 +2307,44 @@ fec_resume(struct device *dev)
2273 return ret; 2307 return ret;
2274 } 2308 }
2275 2309
2276 clk_prepare_enable(fep->clk_enet_out); 2310 ret = clk_prepare_enable(fep->clk_ahb);
2277 clk_prepare_enable(fep->clk_ahb); 2311 if (ret)
2278 clk_prepare_enable(fep->clk_ipg); 2312 goto failed_clk_ahb;
2313
2314 ret = clk_prepare_enable(fep->clk_ipg);
2315 if (ret)
2316 goto failed_clk_ipg;
2317
2318 if (fep->clk_enet_out) {
2319 ret = clk_prepare_enable(fep->clk_enet_out);
2320 if (ret)
2321 goto failed_clk_enet_out;
2322 }
2323
2324 if (fep->clk_ptp) {
2325 ret = clk_prepare_enable(fep->clk_ptp);
2326 if (ret)
2327 goto failed_clk_ptp;
2328 }
2329
2279 if (netif_running(ndev)) { 2330 if (netif_running(ndev)) {
2280 fec_restart(ndev, fep->full_duplex); 2331 fec_restart(ndev, fep->full_duplex);
2281 netif_device_attach(ndev); 2332 netif_device_attach(ndev);
2282 } 2333 }
2283 2334
2284 return 0; 2335 return 0;
2336
2337failed_clk_ptp:
2338 if (fep->clk_enet_out)
2339 clk_disable_unprepare(fep->clk_enet_out);
2340failed_clk_enet_out:
2341 clk_disable_unprepare(fep->clk_ipg);
2342failed_clk_ipg:
2343 clk_disable_unprepare(fep->clk_ahb);
2344failed_clk_ahb:
2345 if (fep->reg_phy)
2346 regulator_disable(fep->reg_phy);
2347 return ret;
2285} 2348}
2286#endif /* CONFIG_PM_SLEEP */ 2349#endif /* CONFIG_PM_SLEEP */
2287 2350
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index 360a578c2bb7..e0528900db02 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -123,12 +123,10 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
123 123
124static int mpc52xx_fec_mdio_remove(struct platform_device *of) 124static int mpc52xx_fec_mdio_remove(struct platform_device *of)
125{ 125{
126 struct device *dev = &of->dev; 126 struct mii_bus *bus = platform_get_drvdata(of);
127 struct mii_bus *bus = dev_get_drvdata(dev);
128 struct mpc52xx_fec_mdio_priv *priv = bus->priv; 127 struct mpc52xx_fec_mdio_priv *priv = bus->priv;
129 128
130 mdiobus_unregister(bus); 129 mdiobus_unregister(bus);
131 dev_set_drvdata(dev, NULL);
132 iounmap(priv->regs); 130 iounmap(priv->regs);
133 kfree(priv); 131 kfree(priv);
134 mdiobus_free(bus); 132 mdiobus_free(bus);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 8de53a14a6f4..6b60582ce8cf 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -583,7 +583,6 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
583 struct sk_buff *skb) 583 struct sk_buff *skb)
584{ 584{
585 struct sk_buff *new_skb; 585 struct sk_buff *new_skb;
586 struct fs_enet_private *fep = netdev_priv(dev);
587 586
588 /* Alloc new skb */ 587 /* Alloc new skb */
589 new_skb = netdev_alloc_skb(dev, skb->len + 4); 588 new_skb = netdev_alloc_skb(dev, skb->len + 4);
@@ -1000,6 +999,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
1000 struct fs_enet_private *fep; 999 struct fs_enet_private *fep;
1001 struct fs_platform_info *fpi; 1000 struct fs_platform_info *fpi;
1002 const u32 *data; 1001 const u32 *data;
1002 struct clk *clk;
1003 int err;
1003 const u8 *mac_addr; 1004 const u8 *mac_addr;
1004 const char *phy_connection_type; 1005 const char *phy_connection_type;
1005 int privsize, len, ret = -ENODEV; 1006 int privsize, len, ret = -ENODEV;
@@ -1037,6 +1038,20 @@ static int fs_enet_probe(struct platform_device *ofdev)
1037 fpi->use_rmii = 1; 1038 fpi->use_rmii = 1;
1038 } 1039 }
1039 1040
1041 /* make clock lookup non-fatal (the driver is shared among platforms),
1042 * but require enable to succeed when a clock was specified/found,
1043 * keep a reference to the clock upon successful acquisition
1044 */
1045 clk = devm_clk_get(&ofdev->dev, "per");
1046 if (!IS_ERR(clk)) {
1047 err = clk_prepare_enable(clk);
1048 if (err) {
1049 ret = err;
1050 goto out_free_fpi;
1051 }
1052 fpi->clk_per = clk;
1053 }
1054
1040 privsize = sizeof(*fep) + 1055 privsize = sizeof(*fep) +
1041 sizeof(struct sk_buff **) * 1056 sizeof(struct sk_buff **) *
1042 (fpi->rx_ring + fpi->tx_ring); 1057 (fpi->rx_ring + fpi->tx_ring);
@@ -1108,6 +1123,8 @@ out_free_dev:
1108 free_netdev(ndev); 1123 free_netdev(ndev);
1109out_put: 1124out_put:
1110 of_node_put(fpi->phy_node); 1125 of_node_put(fpi->phy_node);
1126 if (fpi->clk_per)
1127 clk_disable_unprepare(fpi->clk_per);
1111out_free_fpi: 1128out_free_fpi:
1112 kfree(fpi); 1129 kfree(fpi);
1113 return ret; 1130 return ret;
@@ -1124,6 +1141,8 @@ static int fs_enet_remove(struct platform_device *ofdev)
1124 fep->ops->cleanup_data(ndev); 1141 fep->ops->cleanup_data(ndev);
1125 dev_set_drvdata(fep->dev, NULL); 1142 dev_set_drvdata(fep->dev, NULL);
1126 of_node_put(fep->fpi->phy_node); 1143 of_node_put(fep->fpi->phy_node);
1144 if (fep->fpi->clk_per)
1145 clk_disable_unprepare(fep->fpi->clk_per);
1127 free_netdev(ndev); 1146 free_netdev(ndev);
1128 return 0; 1147 return 0;
1129} 1148}
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index c93a05654b46..c4f65067cf7c 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -409,7 +409,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
409 priv->regs = priv->map + data->mii_offset; 409 priv->regs = priv->map + data->mii_offset;
410 410
411 new_bus->parent = &pdev->dev; 411 new_bus->parent = &pdev->dev;
412 dev_set_drvdata(&pdev->dev, new_bus); 412 platform_set_drvdata(pdev, new_bus);
413 413
414 if (data->get_tbipa) { 414 if (data->get_tbipa) {
415 for_each_child_of_node(np, tbi) { 415 for_each_child_of_node(np, tbi) {
@@ -468,8 +468,6 @@ static int fsl_pq_mdio_remove(struct platform_device *pdev)
468 468
469 mdiobus_unregister(bus); 469 mdiobus_unregister(bus);
470 470
471 dev_set_drvdata(device, NULL);
472
473 iounmap(priv->map); 471 iounmap(priv->map);
474 mdiobus_free(bus); 472 mdiobus_free(bus);
475 473
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 8d2db7b808b7..c4eaadeb572f 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -593,7 +593,6 @@ static int gfar_parse_group(struct device_node *np,
593 return -EINVAL; 593 return -EINVAL;
594 } 594 }
595 595
596 grp->grp_id = priv->num_grps;
597 grp->priv = priv; 596 grp->priv = priv;
598 spin_lock_init(&grp->grplock); 597 spin_lock_init(&grp->grplock);
599 if (priv->mode == MQ_MG_MODE) { 598 if (priv->mode == MQ_MG_MODE) {
@@ -1017,7 +1016,14 @@ static int gfar_probe(struct platform_device *ofdev)
1017 /* We need to delay at least 3 TX clocks */ 1016 /* We need to delay at least 3 TX clocks */
1018 udelay(2); 1017 udelay(2);
1019 1018
1020 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 1019 tempval = 0;
1020 if (!priv->pause_aneg_en && priv->tx_pause_en)
1021 tempval |= MACCFG1_TX_FLOW;
1022 if (!priv->pause_aneg_en && priv->rx_pause_en)
1023 tempval |= MACCFG1_RX_FLOW;
1024 /* the soft reset bit is not self-resetting, so we need to
1025 * clear it before resuming normal operation
1026 */
1021 gfar_write(&regs->maccfg1, tempval); 1027 gfar_write(&regs->maccfg1, tempval);
1022 1028
1023 /* Initialize MACCFG2. */ 1029 /* Initialize MACCFG2. */
@@ -1461,7 +1467,7 @@ static int init_phy(struct net_device *dev)
1461 struct gfar_private *priv = netdev_priv(dev); 1467 struct gfar_private *priv = netdev_priv(dev);
1462 uint gigabit_support = 1468 uint gigabit_support =
1463 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? 1469 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1464 SUPPORTED_1000baseT_Full : 0; 1470 GFAR_SUPPORTED_GBIT : 0;
1465 phy_interface_t interface; 1471 phy_interface_t interface;
1466 1472
1467 priv->oldlink = 0; 1473 priv->oldlink = 0;
@@ -2052,6 +2058,24 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2052 return skip_txbd(bdp, 1, base, ring_size); 2058 return skip_txbd(bdp, 1, base, ring_size);
2053} 2059}
2054 2060
2061/* eTSEC12: csum generation not supported for some fcb offsets */
2062static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2063 unsigned long fcb_addr)
2064{
2065 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2066 (fcb_addr % 0x20) > 0x18);
2067}
2068
2069/* eTSEC76: csum generation for frames larger than 2500 may
2070 * cause excess delays before start of transmission
2071 */
2072static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2073 unsigned int len)
2074{
2075 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2076 (len > 2500));
2077}
2078
2055/* This is called by the kernel when a frame is ready for transmission. 2079/* This is called by the kernel when a frame is ready for transmission.
2056 * It is pointed to by the dev->hard_start_xmit function pointer 2080 * It is pointed to by the dev->hard_start_xmit function pointer
2057 */ 2081 */
@@ -2064,23 +2088,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2064 struct txfcb *fcb = NULL; 2088 struct txfcb *fcb = NULL;
2065 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; 2089 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2066 u32 lstatus; 2090 u32 lstatus;
2067 int i, rq = 0, do_tstamp = 0; 2091 int i, rq = 0;
2092 int do_tstamp, do_csum, do_vlan;
2068 u32 bufaddr; 2093 u32 bufaddr;
2069 unsigned long flags; 2094 unsigned long flags;
2070 unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN; 2095 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2071
2072 /* TOE=1 frames larger than 2500 bytes may see excess delays
2073 * before start of transmission.
2074 */
2075 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2076 skb->ip_summed == CHECKSUM_PARTIAL &&
2077 skb->len > 2500)) {
2078 int ret;
2079
2080 ret = skb_checksum_help(skb);
2081 if (ret)
2082 return ret;
2083 }
2084 2096
2085 rq = skb->queue_mapping; 2097 rq = skb->queue_mapping;
2086 tx_queue = priv->tx_queue[rq]; 2098 tx_queue = priv->tx_queue[rq];
@@ -2088,21 +2100,23 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2088 base = tx_queue->tx_bd_base; 2100 base = tx_queue->tx_bd_base;
2089 regs = tx_queue->grp->regs; 2101 regs = tx_queue->grp->regs;
2090 2102
2103 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2104 do_vlan = vlan_tx_tag_present(skb);
2105 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2106 priv->hwts_tx_en;
2107
2108 if (do_csum || do_vlan)
2109 fcb_len = GMAC_FCB_LEN;
2110
2091 /* check if time stamp should be generated */ 2111 /* check if time stamp should be generated */
2092 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 2112 if (unlikely(do_tstamp))
2093 priv->hwts_tx_en)) { 2113 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2094 do_tstamp = 1;
2095 fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2096 }
2097 2114
2098 /* make space for additional header when fcb is needed */ 2115 /* make space for additional header when fcb is needed */
2099 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 2116 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2100 vlan_tx_tag_present(skb) ||
2101 unlikely(do_tstamp)) &&
2102 (skb_headroom(skb) < fcb_length)) {
2103 struct sk_buff *skb_new; 2117 struct sk_buff *skb_new;
2104 2118
2105 skb_new = skb_realloc_headroom(skb, fcb_length); 2119 skb_new = skb_realloc_headroom(skb, fcb_len);
2106 if (!skb_new) { 2120 if (!skb_new) {
2107 dev->stats.tx_errors++; 2121 dev->stats.tx_errors++;
2108 kfree_skb(skb); 2122 kfree_skb(skb);
@@ -2133,7 +2147,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2133 } 2147 }
2134 2148
2135 /* Update transmit stats */ 2149 /* Update transmit stats */
2136 tx_queue->stats.tx_bytes += skb->len; 2150 bytes_sent = skb->len;
2151 tx_queue->stats.tx_bytes += bytes_sent;
2152 /* keep Tx bytes on wire for BQL accounting */
2153 GFAR_CB(skb)->bytes_sent = bytes_sent;
2137 tx_queue->stats.tx_packets++; 2154 tx_queue->stats.tx_packets++;
2138 2155
2139 txbdp = txbdp_start = tx_queue->cur_tx; 2156 txbdp = txbdp_start = tx_queue->cur_tx;
@@ -2153,12 +2170,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2153 } else { 2170 } else {
2154 /* Place the fragment addresses and lengths into the TxBDs */ 2171 /* Place the fragment addresses and lengths into the TxBDs */
2155 for (i = 0; i < nr_frags; i++) { 2172 for (i = 0; i < nr_frags; i++) {
2173 unsigned int frag_len;
2156 /* Point at the next BD, wrapping as needed */ 2174 /* Point at the next BD, wrapping as needed */
2157 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2175 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2158 2176
2159 length = skb_shinfo(skb)->frags[i].size; 2177 frag_len = skb_shinfo(skb)->frags[i].size;
2160 2178
2161 lstatus = txbdp->lstatus | length | 2179 lstatus = txbdp->lstatus | frag_len |
2162 BD_LFLAG(TXBD_READY); 2180 BD_LFLAG(TXBD_READY);
2163 2181
2164 /* Handle the last BD specially */ 2182 /* Handle the last BD specially */
@@ -2168,7 +2186,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2168 bufaddr = skb_frag_dma_map(priv->dev, 2186 bufaddr = skb_frag_dma_map(priv->dev,
2169 &skb_shinfo(skb)->frags[i], 2187 &skb_shinfo(skb)->frags[i],
2170 0, 2188 0,
2171 length, 2189 frag_len,
2172 DMA_TO_DEVICE); 2190 DMA_TO_DEVICE);
2173 2191
2174 /* set the TxBD length and buffer pointer */ 2192 /* set the TxBD length and buffer pointer */
@@ -2185,36 +2203,38 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2185 memset(skb->data, 0, GMAC_TXPAL_LEN); 2203 memset(skb->data, 0, GMAC_TXPAL_LEN);
2186 } 2204 }
2187 2205
2188 /* Set up checksumming */ 2206 /* Add TxFCB if required */
2189 if (CHECKSUM_PARTIAL == skb->ip_summed) { 2207 if (fcb_len) {
2190 fcb = gfar_add_fcb(skb); 2208 fcb = gfar_add_fcb(skb);
2191 /* as specified by errata */ 2209 lstatus |= BD_LFLAG(TXBD_TOE);
2192 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) && 2210 }
2193 ((unsigned long)fcb % 0x20) > 0x18)) { 2211
2212 /* Set up checksumming */
2213 if (do_csum) {
2214 gfar_tx_checksum(skb, fcb, fcb_len);
2215
2216 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2217 unlikely(gfar_csum_errata_76(priv, skb->len))) {
2194 __skb_pull(skb, GMAC_FCB_LEN); 2218 __skb_pull(skb, GMAC_FCB_LEN);
2195 skb_checksum_help(skb); 2219 skb_checksum_help(skb);
2196 } else { 2220 if (do_vlan || do_tstamp) {
2197 lstatus |= BD_LFLAG(TXBD_TOE); 2221 /* put back a new fcb for vlan/tstamp TOE */
2198 gfar_tx_checksum(skb, fcb, fcb_length); 2222 fcb = gfar_add_fcb(skb);
2223 } else {
2224 /* Tx TOE not used */
2225 lstatus &= ~(BD_LFLAG(TXBD_TOE));
2226 fcb = NULL;
2227 }
2199 } 2228 }
2200 } 2229 }
2201 2230
2202 if (vlan_tx_tag_present(skb)) { 2231 if (do_vlan)
2203 if (unlikely(NULL == fcb)) {
2204 fcb = gfar_add_fcb(skb);
2205 lstatus |= BD_LFLAG(TXBD_TOE);
2206 }
2207
2208 gfar_tx_vlan(skb, fcb); 2232 gfar_tx_vlan(skb, fcb);
2209 }
2210 2233
2211 /* Setup tx hardware time stamping if requested */ 2234 /* Setup tx hardware time stamping if requested */
2212 if (unlikely(do_tstamp)) { 2235 if (unlikely(do_tstamp)) {
2213 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2236 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2214 if (fcb == NULL)
2215 fcb = gfar_add_fcb(skb);
2216 fcb->ptp = 1; 2237 fcb->ptp = 1;
2217 lstatus |= BD_LFLAG(TXBD_TOE);
2218 } 2238 }
2219 2239
2220 txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data, 2240 txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
@@ -2226,15 +2246,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2226 * the full frame length. 2246 * the full frame length.
2227 */ 2247 */
2228 if (unlikely(do_tstamp)) { 2248 if (unlikely(do_tstamp)) {
2229 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length; 2249 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
2230 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | 2250 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2231 (skb_headlen(skb) - fcb_length); 2251 (skb_headlen(skb) - fcb_len);
2232 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; 2252 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2233 } else { 2253 } else {
2234 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2254 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2235 } 2255 }
2236 2256
2237 netdev_tx_sent_queue(txq, skb->len); 2257 netdev_tx_sent_queue(txq, bytes_sent);
2238 2258
2239 /* We can work in parallel with gfar_clean_tx_ring(), except 2259 /* We can work in parallel with gfar_clean_tx_ring(), except
2240 * when modifying num_txbdfree. Note that we didn't grab the lock 2260 * when modifying num_txbdfree. Note that we didn't grab the lock
@@ -2554,7 +2574,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2554 bdp = next_txbd(bdp, base, tx_ring_size); 2574 bdp = next_txbd(bdp, base, tx_ring_size);
2555 } 2575 }
2556 2576
2557 bytes_sent += skb->len; 2577 bytes_sent += GFAR_CB(skb)->bytes_sent;
2558 2578
2559 dev_kfree_skb_any(skb); 2579 dev_kfree_skb_any(skb);
2560 2580
@@ -3014,6 +3034,41 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3014 return IRQ_HANDLED; 3034 return IRQ_HANDLED;
3015} 3035}
3016 3036
3037static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3038{
3039 struct phy_device *phydev = priv->phydev;
3040 u32 val = 0;
3041
3042 if (!phydev->duplex)
3043 return val;
3044
3045 if (!priv->pause_aneg_en) {
3046 if (priv->tx_pause_en)
3047 val |= MACCFG1_TX_FLOW;
3048 if (priv->rx_pause_en)
3049 val |= MACCFG1_RX_FLOW;
3050 } else {
3051 u16 lcl_adv, rmt_adv;
3052 u8 flowctrl;
3053 /* get link partner capabilities */
3054 rmt_adv = 0;
3055 if (phydev->pause)
3056 rmt_adv = LPA_PAUSE_CAP;
3057 if (phydev->asym_pause)
3058 rmt_adv |= LPA_PAUSE_ASYM;
3059
3060 lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3061
3062 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3063 if (flowctrl & FLOW_CTRL_TX)
3064 val |= MACCFG1_TX_FLOW;
3065 if (flowctrl & FLOW_CTRL_RX)
3066 val |= MACCFG1_RX_FLOW;
3067 }
3068
3069 return val;
3070}
3071
3017/* Called every time the controller might need to be made 3072/* Called every time the controller might need to be made
3018 * aware of new link state. The PHY code conveys this 3073 * aware of new link state. The PHY code conveys this
3019 * information through variables in the phydev structure, and this 3074 * information through variables in the phydev structure, and this
@@ -3032,6 +3087,7 @@ static void adjust_link(struct net_device *dev)
3032 lock_tx_qs(priv); 3087 lock_tx_qs(priv);
3033 3088
3034 if (phydev->link) { 3089 if (phydev->link) {
3090 u32 tempval1 = gfar_read(&regs->maccfg1);
3035 u32 tempval = gfar_read(&regs->maccfg2); 3091 u32 tempval = gfar_read(&regs->maccfg2);
3036 u32 ecntrl = gfar_read(&regs->ecntrl); 3092 u32 ecntrl = gfar_read(&regs->ecntrl);
3037 3093
@@ -3080,6 +3136,10 @@ static void adjust_link(struct net_device *dev)
3080 priv->oldspeed = phydev->speed; 3136 priv->oldspeed = phydev->speed;
3081 } 3137 }
3082 3138
3139 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3140 tempval1 |= gfar_get_flowctrl_cfg(priv);
3141
3142 gfar_write(&regs->maccfg1, tempval1);
3083 gfar_write(&regs->maccfg2, tempval); 3143 gfar_write(&regs->maccfg2, tempval);
3084 gfar_write(&regs->ecntrl, ecntrl); 3144 gfar_write(&regs->ecntrl, ecntrl);
3085 3145
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 04b552cd419d..04112b98ff5d 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -146,6 +146,10 @@ extern const char gfar_driver_version[];
146 | SUPPORTED_Autoneg \ 146 | SUPPORTED_Autoneg \
147 | SUPPORTED_MII) 147 | SUPPORTED_MII)
148 148
149#define GFAR_SUPPORTED_GBIT (SUPPORTED_1000baseT_Full \
150 | SUPPORTED_Pause \
151 | SUPPORTED_Asym_Pause)
152
149/* TBI register addresses */ 153/* TBI register addresses */
150#define MII_TBICON 0x11 154#define MII_TBICON 0x11
151 155
@@ -571,7 +575,7 @@ struct rxfcb {
571}; 575};
572 576
573struct gianfar_skb_cb { 577struct gianfar_skb_cb {
574 int alignamount; 578 unsigned int bytes_sent; /* bytes-on-wire (i.e. no FCB) */
575}; 579};
576 580
577#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb)) 581#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb))
@@ -1009,7 +1013,6 @@ struct gfar_irqinfo {
1009 * @napi: the napi poll function 1013 * @napi: the napi poll function
1010 * @priv: back pointer to the priv structure 1014 * @priv: back pointer to the priv structure
1011 * @regs: the ioremapped register space for this group 1015 * @regs: the ioremapped register space for this group
1012 * @grp_id: group id for this group
1013 * @irqinfo: TX/RX/ER irq data for this group 1016 * @irqinfo: TX/RX/ER irq data for this group
1014 */ 1017 */
1015 1018
@@ -1018,11 +1021,10 @@ struct gfar_priv_grp {
1018 struct napi_struct napi; 1021 struct napi_struct napi;
1019 struct gfar_private *priv; 1022 struct gfar_private *priv;
1020 struct gfar __iomem *regs; 1023 struct gfar __iomem *regs;
1021 unsigned int grp_id; 1024 unsigned int rstat;
1022 unsigned long num_rx_queues; 1025 unsigned long num_rx_queues;
1023 unsigned long rx_bit_map; 1026 unsigned long rx_bit_map;
1024 /* cacheline 3 */ 1027 /* cacheline 3 */
1025 unsigned int rstat;
1026 unsigned int tstat; 1028 unsigned int tstat;
1027 unsigned long num_tx_queues; 1029 unsigned long num_tx_queues;
1028 unsigned long tx_bit_map; 1030 unsigned long tx_bit_map;
@@ -1102,7 +1104,11 @@ struct gfar_private {
1102 /* Wake-on-LAN enabled */ 1104 /* Wake-on-LAN enabled */
1103 wol_en:1, 1105 wol_en:1,
1104 /* Enable priorty based Tx scheduling in Hw */ 1106 /* Enable priorty based Tx scheduling in Hw */
1105 prio_sched_en:1; 1107 prio_sched_en:1,
1108 /* Flow control flags */
1109 pause_aneg_en:1,
1110 tx_pause_en:1,
1111 rx_pause_en:1;
1106 1112
1107 /* The total tx and rx ring size for the enabled queues */ 1113 /* The total tx and rx ring size for the enabled queues */
1108 unsigned int total_tx_ring_size; 1114 unsigned int total_tx_ring_size;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 21cd88124ca9..d3d7ede27ef1 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -535,6 +535,78 @@ static int gfar_sringparam(struct net_device *dev,
535 return err; 535 return err;
536} 536}
537 537
538static void gfar_gpauseparam(struct net_device *dev,
539 struct ethtool_pauseparam *epause)
540{
541 struct gfar_private *priv = netdev_priv(dev);
542
543 epause->autoneg = !!priv->pause_aneg_en;
544 epause->rx_pause = !!priv->rx_pause_en;
545 epause->tx_pause = !!priv->tx_pause_en;
546}
547
548static int gfar_spauseparam(struct net_device *dev,
549 struct ethtool_pauseparam *epause)
550{
551 struct gfar_private *priv = netdev_priv(dev);
552 struct phy_device *phydev = priv->phydev;
553 struct gfar __iomem *regs = priv->gfargrp[0].regs;
554 u32 oldadv, newadv;
555
556 if (!(phydev->supported & SUPPORTED_Pause) ||
557 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
558 (epause->rx_pause != epause->tx_pause)))
559 return -EINVAL;
560
561 priv->rx_pause_en = priv->tx_pause_en = 0;
562 if (epause->rx_pause) {
563 priv->rx_pause_en = 1;
564
565 if (epause->tx_pause) {
566 priv->tx_pause_en = 1;
567 /* FLOW_CTRL_RX & TX */
568 newadv = ADVERTISED_Pause;
569 } else /* FLOW_CTLR_RX */
570 newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
571 } else if (epause->tx_pause) {
572 priv->tx_pause_en = 1;
573 /* FLOW_CTLR_TX */
574 newadv = ADVERTISED_Asym_Pause;
575 } else
576 newadv = 0;
577
578 if (epause->autoneg)
579 priv->pause_aneg_en = 1;
580 else
581 priv->pause_aneg_en = 0;
582
583 oldadv = phydev->advertising &
584 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
585 if (oldadv != newadv) {
586 phydev->advertising &=
587 ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
588 phydev->advertising |= newadv;
589 if (phydev->autoneg)
590 /* inform link partner of our
591 * new flow ctrl settings
592 */
593 return phy_start_aneg(phydev);
594
595 if (!epause->autoneg) {
596 u32 tempval;
597 tempval = gfar_read(&regs->maccfg1);
598 tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
599 if (priv->tx_pause_en)
600 tempval |= MACCFG1_TX_FLOW;
601 if (priv->rx_pause_en)
602 tempval |= MACCFG1_RX_FLOW;
603 gfar_write(&regs->maccfg1, tempval);
604 }
605 }
606
607 return 0;
608}
609
538int gfar_set_features(struct net_device *dev, netdev_features_t features) 610int gfar_set_features(struct net_device *dev, netdev_features_t features)
539{ 611{
540 struct gfar_private *priv = netdev_priv(dev); 612 struct gfar_private *priv = netdev_priv(dev);
@@ -1806,6 +1878,8 @@ const struct ethtool_ops gfar_ethtool_ops = {
1806 .set_coalesce = gfar_scoalesce, 1878 .set_coalesce = gfar_scoalesce,
1807 .get_ringparam = gfar_gringparam, 1879 .get_ringparam = gfar_gringparam,
1808 .set_ringparam = gfar_sringparam, 1880 .set_ringparam = gfar_sringparam,
1881 .get_pauseparam = gfar_gpauseparam,
1882 .set_pauseparam = gfar_spauseparam,
1809 .get_strings = gfar_gstrings, 1883 .get_strings = gfar_gstrings,
1810 .get_sset_count = gfar_sset_count, 1884 .get_sset_count = gfar_sset_count,
1811 .get_ethtool_stats = gfar_fill_stats, 1885 .get_ethtool_stats = gfar_fill_stats,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 3c43dac894ec..5930c39672db 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3911,14 +3911,12 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3911 3911
3912static int ucc_geth_remove(struct platform_device* ofdev) 3912static int ucc_geth_remove(struct platform_device* ofdev)
3913{ 3913{
3914 struct device *device = &ofdev->dev; 3914 struct net_device *dev = platform_get_drvdata(ofdev);
3915 struct net_device *dev = dev_get_drvdata(device);
3916 struct ucc_geth_private *ugeth = netdev_priv(dev); 3915 struct ucc_geth_private *ugeth = netdev_priv(dev);
3917 3916
3918 unregister_netdev(dev); 3917 unregister_netdev(dev);
3919 free_netdev(dev); 3918 free_netdev(dev);
3920 ucc_geth_memclean(ugeth); 3919 ucc_geth_memclean(ugeth);
3921 dev_set_drvdata(device, NULL);
3922 3920
3923 return 0; 3921 return 0;
3924} 3922}
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.h b/drivers/net/ethernet/i825xx/sun3_82586.h
index 93346f00486b..79aef681ac85 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.h
+++ b/drivers/net/ethernet/i825xx/sun3_82586.h
@@ -133,8 +133,8 @@ struct rfd_struct
133 unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */ 133 unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */
134 unsigned short next; /* linkoffset to next RFD */ 134 unsigned short next; /* linkoffset to next RFD */
135 unsigned short rbd_offset; /* pointeroffset to RBD-buffer */ 135 unsigned short rbd_offset; /* pointeroffset to RBD-buffer */
136 unsigned char dest[6]; /* ethernet-address, destination */ 136 unsigned char dest[ETH_ALEN]; /* ethernet-address, destination */
137 unsigned char source[6]; /* ethernet-address, source */ 137 unsigned char source[ETH_ALEN]; /* ethernet-address, source */
138 unsigned short length; /* 802.3 frame-length */ 138 unsigned short length; /* 802.3 frame-length */
139 unsigned short zero_dummy; /* dummy */ 139 unsigned short zero_dummy; /* dummy */
140}; 140};
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index d300a0c0eafc..6b5c7222342c 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2312,7 +2312,7 @@ static int emac_check_deps(struct emac_instance *dev,
2312 if (deps[i].ofdev == NULL) 2312 if (deps[i].ofdev == NULL)
2313 continue; 2313 continue;
2314 if (deps[i].drvdata == NULL) 2314 if (deps[i].drvdata == NULL)
2315 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev); 2315 deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
2316 if (deps[i].drvdata != NULL) 2316 if (deps[i].drvdata != NULL)
2317 there++; 2317 there++;
2318 } 2318 }
@@ -2799,9 +2799,9 @@ static int emac_probe(struct platform_device *ofdev)
2799 /* display more info about what's missing ? */ 2799 /* display more info about what's missing ? */
2800 goto err_reg_unmap; 2800 goto err_reg_unmap;
2801 } 2801 }
2802 dev->mal = dev_get_drvdata(&dev->mal_dev->dev); 2802 dev->mal = platform_get_drvdata(dev->mal_dev);
2803 if (dev->mdio_dev != NULL) 2803 if (dev->mdio_dev != NULL)
2804 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev); 2804 dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
2805 2805
2806 /* Register with MAL */ 2806 /* Register with MAL */
2807 dev->commac.ops = &emac_commac_ops; 2807 dev->commac.ops = &emac_commac_ops;
@@ -2892,7 +2892,7 @@ static int emac_probe(struct platform_device *ofdev)
2892 * fully initialized 2892 * fully initialized
2893 */ 2893 */
2894 wmb(); 2894 wmb();
2895 dev_set_drvdata(&ofdev->dev, dev); 2895 platform_set_drvdata(ofdev, dev);
2896 2896
2897 /* There's a new kid in town ! Let's tell everybody */ 2897 /* There's a new kid in town ! Let's tell everybody */
2898 wake_up_all(&emac_probe_wait); 2898 wake_up_all(&emac_probe_wait);
@@ -2951,12 +2951,10 @@ static int emac_probe(struct platform_device *ofdev)
2951 2951
2952static int emac_remove(struct platform_device *ofdev) 2952static int emac_remove(struct platform_device *ofdev)
2953{ 2953{
2954 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev); 2954 struct emac_instance *dev = platform_get_drvdata(ofdev);
2955 2955
2956 DBG(dev, "remove" NL); 2956 DBG(dev, "remove" NL);
2957 2957
2958 dev_set_drvdata(&ofdev->dev, NULL);
2959
2960 unregister_netdev(dev->ndev); 2958 unregister_netdev(dev->ndev);
2961 2959
2962 cancel_work_sync(&dev->reset_work); 2960 cancel_work_sync(&dev->reset_work);
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 856ea66c9223..dac564c25440 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -637,8 +637,8 @@ static int mal_probe(struct platform_device *ofdev)
637 bd_size = sizeof(struct mal_descriptor) * 637 bd_size = sizeof(struct mal_descriptor) *
638 (NUM_TX_BUFF * mal->num_tx_chans + 638 (NUM_TX_BUFF * mal->num_tx_chans +
639 NUM_RX_BUFF * mal->num_rx_chans); 639 NUM_RX_BUFF * mal->num_rx_chans);
640 mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, 640 mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
641 GFP_KERNEL | __GFP_ZERO); 641 GFP_KERNEL);
642 if (mal->bd_virt == NULL) { 642 if (mal->bd_virt == NULL) {
643 err = -ENOMEM; 643 err = -ENOMEM;
644 goto fail_unmap; 644 goto fail_unmap;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 70fd55968844..5d41aee69d16 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -106,7 +106,7 @@ struct ibmveth_stat ibmveth_stats[] = {
106/* simple methods of getting data from the current rxq entry */ 106/* simple methods of getting data from the current rxq entry */
107static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter) 107static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
108{ 108{
109 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off; 109 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
110} 110}
111 111
112static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) 112static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
@@ -132,7 +132,7 @@ static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
132 132
133static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) 133static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
134{ 134{
135 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length; 135 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
136} 136}
137 137
138static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) 138static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 43a794fab9ff..84066bafe057 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -164,14 +164,26 @@ struct ibmveth_adapter {
164 u64 tx_send_failed; 164 u64 tx_send_failed;
165}; 165};
166 166
167/*
168 * We pass struct ibmveth_buf_desc_fields to the hypervisor in registers,
169 * so we don't need to byteswap the two elements. However since we use
170 * a union (ibmveth_buf_desc) to convert from the struct to a u64 we
171 * do end up with endian specific ordering of the elements and that
172 * needs correcting.
173 */
167struct ibmveth_buf_desc_fields { 174struct ibmveth_buf_desc_fields {
175#ifdef __BIG_ENDIAN
176 u32 flags_len;
177 u32 address;
178#else
179 u32 address;
168 u32 flags_len; 180 u32 flags_len;
181#endif
169#define IBMVETH_BUF_VALID 0x80000000 182#define IBMVETH_BUF_VALID 0x80000000
170#define IBMVETH_BUF_TOGGLE 0x40000000 183#define IBMVETH_BUF_TOGGLE 0x40000000
171#define IBMVETH_BUF_NO_CSUM 0x02000000 184#define IBMVETH_BUF_NO_CSUM 0x02000000
172#define IBMVETH_BUF_CSUM_GOOD 0x01000000 185#define IBMVETH_BUF_CSUM_GOOD 0x01000000
173#define IBMVETH_BUF_LEN_MASK 0x00FFFFFF 186#define IBMVETH_BUF_LEN_MASK 0x00FFFFFF
174 u32 address;
175}; 187};
176 188
177union ibmveth_buf_desc { 189union ibmveth_buf_desc {
@@ -180,7 +192,7 @@ union ibmveth_buf_desc {
180}; 192};
181 193
182struct ibmveth_rx_q_entry { 194struct ibmveth_rx_q_entry {
183 u32 flags_off; 195 __be32 flags_off;
184#define IBMVETH_RXQ_TOGGLE 0x80000000 196#define IBMVETH_RXQ_TOGGLE 0x80000000
185#define IBMVETH_RXQ_TOGGLE_SHIFT 31 197#define IBMVETH_RXQ_TOGGLE_SHIFT 31
186#define IBMVETH_RXQ_VALID 0x40000000 198#define IBMVETH_RXQ_VALID 0x40000000
@@ -188,7 +200,8 @@ struct ibmveth_rx_q_entry {
188#define IBMVETH_RXQ_CSUM_GOOD 0x01000000 200#define IBMVETH_RXQ_CSUM_GOOD 0x01000000
189#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF 201#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF
190 202
191 u32 length; 203 __be32 length;
204 /* correlator is only used by the OS, no need to byte swap */
192 u64 correlator; 205 u64 correlator;
193}; 206};
194 207
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 1fde90b96685..bdf5023724e7 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -1004,7 +1004,7 @@ static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
1004 /* Check to see if the NIC has been initialized via nic_open, 1004 /* Check to see if the NIC has been initialized via nic_open,
1005 * before trying to read statistic registers. 1005 * before trying to read statistic registers.
1006 */ 1006 */
1007 if (!test_bit(__LINK_STATE_START, &dev->state)) 1007 if (!netif_running(dev))
1008 return &sp->stats; 1008 return &sp->stats;
1009 1009
1010 sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK); 1010 sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 5115ae76a5d1..ada6e210279f 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1175,15 +1175,12 @@ static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1175 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ 1175 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1176 } 1176 }
1177 1177
1178 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, 1178 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n",
1179 "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", 1179 c + 0);
1180 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]); 1180 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n",
1181 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, 1181 c + 8);
1182 "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", 1182 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n",
1183 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]); 1183 c + 16);
1184 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1185 "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1186 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1187 return 0; 1184 return 0;
1188} 1185}
1189 1186
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 82a967c95598..73a8aeefb92a 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1019,8 +1019,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1019 1019
1020 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1020 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1021 txdr->size = ALIGN(txdr->size, 4096); 1021 txdr->size = ALIGN(txdr->size, 4096);
1022 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 1022 txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1023 GFP_KERNEL | __GFP_ZERO); 1023 GFP_KERNEL);
1024 if (!txdr->desc) { 1024 if (!txdr->desc) {
1025 ret_val = 2; 1025 ret_val = 2;
1026 goto err_nomem; 1026 goto err_nomem;
@@ -1077,8 +1077,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1077 } 1077 }
1078 1078
1079 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1079 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1080 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1080 rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1081 GFP_KERNEL | __GFP_ZERO); 1081 GFP_KERNEL);
1082 if (!rxdr->desc) { 1082 if (!rxdr->desc) {
1083 ret_val = 6; 1083 ret_val = 6;
1084 goto err_nomem; 1084 goto err_nomem;
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 4c303e2a7cb3..8fed74e3fa53 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1011,6 +1011,11 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1011 1011
1012 /* Must release MDIO ownership and mutex after MAC reset. */ 1012 /* Must release MDIO ownership and mutex after MAC reset. */
1013 switch (hw->mac.type) { 1013 switch (hw->mac.type) {
1014 case e1000_82573:
1015 /* Release mutex only if the hw semaphore is acquired */
1016 if (!ret_val)
1017 e1000_put_hw_semaphore_82573(hw);
1018 break;
1014 case e1000_82574: 1019 case e1000_82574:
1015 case e1000_82583: 1020 case e1000_82583:
1016 /* Release mutex only if the hw semaphore is acquired */ 1021 /* Release mutex only if the hw semaphore is acquired */
@@ -2057,6 +2062,7 @@ const struct e1000_info e1000_82583_info = {
2057 | FLAG_HAS_JUMBO_FRAMES 2062 | FLAG_HAS_JUMBO_FRAMES
2058 | FLAG_HAS_CTRLEXT_ON_LOAD, 2063 | FLAG_HAS_CTRLEXT_ON_LOAD,
2059 .flags2 = FLAG2_DISABLE_ASPM_L0S 2064 .flags2 = FLAG2_DISABLE_ASPM_L0S
2065 | FLAG2_DISABLE_ASPM_L1
2060 | FLAG2_NO_DISABLE_RX, 2066 | FLAG2_NO_DISABLE_RX,
2061 .pba = 32, 2067 .pba = 32,
2062 .max_hw_frame_size = DEFAULT_JUMBO, 2068 .max_hw_frame_size = DEFAULT_JUMBO,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ffbc08f56c40..ad0edd11015d 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -90,9 +90,6 @@ struct e1000_info;
90 90
91#define E1000_MNG_VLAN_NONE (-1) 91#define E1000_MNG_VLAN_NONE (-1)
92 92
93/* Number of packet split data buffers (not including the header buffer) */
94#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
95
96#define DEFAULT_JUMBO 9234 93#define DEFAULT_JUMBO 9234
97 94
98/* Time to wait before putting the device into D3 if there's no link (in ms). */ 95/* Time to wait before putting the device into D3 if there's no link (in ms). */
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 59c22bf18701..a8633b8f0ac5 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -173,7 +173,7 @@ static int e1000_get_settings(struct net_device *netdev,
173 speed = adapter->link_speed; 173 speed = adapter->link_speed;
174 ecmd->duplex = adapter->link_duplex - 1; 174 ecmd->duplex = adapter->link_duplex - 1;
175 } 175 }
176 } else { 176 } else if (!pm_runtime_suspended(netdev->dev.parent)) {
177 u32 status = er32(STATUS); 177 u32 status = er32(STATUS);
178 if (status & E1000_STATUS_LU) { 178 if (status & E1000_STATUS_LU) {
179 if (status & E1000_STATUS_SPEED_1000) 179 if (status & E1000_STATUS_SPEED_1000)
@@ -264,6 +264,9 @@ static int e1000_set_settings(struct net_device *netdev,
264{ 264{
265 struct e1000_adapter *adapter = netdev_priv(netdev); 265 struct e1000_adapter *adapter = netdev_priv(netdev);
266 struct e1000_hw *hw = &adapter->hw; 266 struct e1000_hw *hw = &adapter->hw;
267 int ret_val = 0;
268
269 pm_runtime_get_sync(netdev->dev.parent);
267 270
268 /* When SoL/IDER sessions are active, autoneg/speed/duplex 271 /* When SoL/IDER sessions are active, autoneg/speed/duplex
269 * cannot be changed 272 * cannot be changed
@@ -271,7 +274,8 @@ static int e1000_set_settings(struct net_device *netdev,
271 if (hw->phy.ops.check_reset_block && 274 if (hw->phy.ops.check_reset_block &&
272 hw->phy.ops.check_reset_block(hw)) { 275 hw->phy.ops.check_reset_block(hw)) {
273 e_err("Cannot change link characteristics when SoL/IDER is active.\n"); 276 e_err("Cannot change link characteristics when SoL/IDER is active.\n");
274 return -EINVAL; 277 ret_val = -EINVAL;
278 goto out;
275 } 279 }
276 280
277 /* MDI setting is only allowed when autoneg enabled because 281 /* MDI setting is only allowed when autoneg enabled because
@@ -279,13 +283,16 @@ static int e1000_set_settings(struct net_device *netdev,
279 * duplex is forced. 283 * duplex is forced.
280 */ 284 */
281 if (ecmd->eth_tp_mdix_ctrl) { 285 if (ecmd->eth_tp_mdix_ctrl) {
282 if (hw->phy.media_type != e1000_media_type_copper) 286 if (hw->phy.media_type != e1000_media_type_copper) {
283 return -EOPNOTSUPP; 287 ret_val = -EOPNOTSUPP;
288 goto out;
289 }
284 290
285 if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && 291 if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
286 (ecmd->autoneg != AUTONEG_ENABLE)) { 292 (ecmd->autoneg != AUTONEG_ENABLE)) {
287 e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); 293 e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
288 return -EINVAL; 294 ret_val = -EINVAL;
295 goto out;
289 } 296 }
290 } 297 }
291 298
@@ -307,8 +314,8 @@ static int e1000_set_settings(struct net_device *netdev,
307 u32 speed = ethtool_cmd_speed(ecmd); 314 u32 speed = ethtool_cmd_speed(ecmd);
308 /* calling this overrides forced MDI setting */ 315 /* calling this overrides forced MDI setting */
309 if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { 316 if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
310 clear_bit(__E1000_RESETTING, &adapter->state); 317 ret_val = -EINVAL;
311 return -EINVAL; 318 goto out;
312 } 319 }
313 } 320 }
314 321
@@ -331,8 +338,10 @@ static int e1000_set_settings(struct net_device *netdev,
331 e1000e_reset(adapter); 338 e1000e_reset(adapter);
332 } 339 }
333 340
341out:
342 pm_runtime_put_sync(netdev->dev.parent);
334 clear_bit(__E1000_RESETTING, &adapter->state); 343 clear_bit(__E1000_RESETTING, &adapter->state);
335 return 0; 344 return ret_val;
336} 345}
337 346
338static void e1000_get_pauseparam(struct net_device *netdev, 347static void e1000_get_pauseparam(struct net_device *netdev,
@@ -366,6 +375,8 @@ static int e1000_set_pauseparam(struct net_device *netdev,
366 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 375 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
367 usleep_range(1000, 2000); 376 usleep_range(1000, 2000);
368 377
378 pm_runtime_get_sync(netdev->dev.parent);
379
369 if (adapter->fc_autoneg == AUTONEG_ENABLE) { 380 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
370 hw->fc.requested_mode = e1000_fc_default; 381 hw->fc.requested_mode = e1000_fc_default;
371 if (netif_running(adapter->netdev)) { 382 if (netif_running(adapter->netdev)) {
@@ -398,6 +409,7 @@ static int e1000_set_pauseparam(struct net_device *netdev,
398 } 409 }
399 410
400out: 411out:
412 pm_runtime_put_sync(netdev->dev.parent);
401 clear_bit(__E1000_RESETTING, &adapter->state); 413 clear_bit(__E1000_RESETTING, &adapter->state);
402 return retval; 414 return retval;
403} 415}
@@ -428,6 +440,8 @@ static void e1000_get_regs(struct net_device *netdev,
428 u32 *regs_buff = p; 440 u32 *regs_buff = p;
429 u16 phy_data; 441 u16 phy_data;
430 442
443 pm_runtime_get_sync(netdev->dev.parent);
444
431 memset(p, 0, E1000_REGS_LEN * sizeof(u32)); 445 memset(p, 0, E1000_REGS_LEN * sizeof(u32));
432 446
433 regs->version = (1 << 24) | (adapter->pdev->revision << 16) | 447 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
@@ -472,6 +486,8 @@ static void e1000_get_regs(struct net_device *netdev,
472 e1e_rphy(hw, MII_STAT1000, &phy_data); 486 e1e_rphy(hw, MII_STAT1000, &phy_data);
473 regs_buff[24] = (u32)phy_data; /* phy local receiver status */ 487 regs_buff[24] = (u32)phy_data; /* phy local receiver status */
474 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ 488 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
489
490 pm_runtime_put_sync(netdev->dev.parent);
475} 491}
476 492
477static int e1000_get_eeprom_len(struct net_device *netdev) 493static int e1000_get_eeprom_len(struct net_device *netdev)
@@ -504,6 +520,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
504 if (!eeprom_buff) 520 if (!eeprom_buff)
505 return -ENOMEM; 521 return -ENOMEM;
506 522
523 pm_runtime_get_sync(netdev->dev.parent);
524
507 if (hw->nvm.type == e1000_nvm_eeprom_spi) { 525 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
508 ret_val = e1000_read_nvm(hw, first_word, 526 ret_val = e1000_read_nvm(hw, first_word,
509 last_word - first_word + 1, 527 last_word - first_word + 1,
@@ -517,6 +535,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
517 } 535 }
518 } 536 }
519 537
538 pm_runtime_put_sync(netdev->dev.parent);
539
520 if (ret_val) { 540 if (ret_val) {
521 /* a read error occurred, throw away the result */ 541 /* a read error occurred, throw away the result */
522 memset(eeprom_buff, 0xff, sizeof(u16) * 542 memset(eeprom_buff, 0xff, sizeof(u16) *
@@ -566,6 +586,8 @@ static int e1000_set_eeprom(struct net_device *netdev,
566 586
567 ptr = (void *)eeprom_buff; 587 ptr = (void *)eeprom_buff;
568 588
589 pm_runtime_get_sync(netdev->dev.parent);
590
569 if (eeprom->offset & 1) { 591 if (eeprom->offset & 1) {
570 /* need read/modify/write of first changed EEPROM word */ 592 /* need read/modify/write of first changed EEPROM word */
571 /* only the second byte of the word is being modified */ 593 /* only the second byte of the word is being modified */
@@ -606,6 +628,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
606 ret_val = e1000e_update_nvm_checksum(hw); 628 ret_val = e1000e_update_nvm_checksum(hw);
607 629
608out: 630out:
631 pm_runtime_put_sync(netdev->dev.parent);
609 kfree(eeprom_buff); 632 kfree(eeprom_buff);
610 return ret_val; 633 return ret_val;
611} 634}
@@ -701,6 +724,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
701 } 724 }
702 } 725 }
703 726
727 pm_runtime_get_sync(netdev->dev.parent);
728
704 e1000e_down(adapter); 729 e1000e_down(adapter);
705 730
706 /* We can't just free everything and then setup again, because the 731 /* We can't just free everything and then setup again, because the
@@ -739,6 +764,7 @@ err_setup_rx:
739 e1000e_free_tx_resources(temp_tx); 764 e1000e_free_tx_resources(temp_tx);
740err_setup: 765err_setup:
741 e1000e_up(adapter); 766 e1000e_up(adapter);
767 pm_runtime_put_sync(netdev->dev.parent);
742free_temp: 768free_temp:
743 vfree(temp_tx); 769 vfree(temp_tx);
744 vfree(temp_rx); 770 vfree(temp_rx);
@@ -1639,7 +1665,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1639 ret_val = 13; /* ret_val is the same as mis-compare */ 1665 ret_val = 13; /* ret_val is the same as mis-compare */
1640 break; 1666 break;
1641 } 1667 }
1642 if (jiffies >= (time + 20)) { 1668 if (time_after(jiffies, time + 20)) {
1643 ret_val = 14; /* error code for time out error */ 1669 ret_val = 14; /* error code for time out error */
1644 break; 1670 break;
1645 } 1671 }
@@ -1732,6 +1758,8 @@ static void e1000_diag_test(struct net_device *netdev,
1732 u8 autoneg; 1758 u8 autoneg;
1733 bool if_running = netif_running(netdev); 1759 bool if_running = netif_running(netdev);
1734 1760
1761 pm_runtime_get_sync(netdev->dev.parent);
1762
1735 set_bit(__E1000_TESTING, &adapter->state); 1763 set_bit(__E1000_TESTING, &adapter->state);
1736 1764
1737 if (!if_running) { 1765 if (!if_running) {
@@ -1817,6 +1845,8 @@ static void e1000_diag_test(struct net_device *netdev,
1817 } 1845 }
1818 1846
1819 msleep_interruptible(4 * 1000); 1847 msleep_interruptible(4 * 1000);
1848
1849 pm_runtime_put_sync(netdev->dev.parent);
1820} 1850}
1821 1851
1822static void e1000_get_wol(struct net_device *netdev, 1852static void e1000_get_wol(struct net_device *netdev,
@@ -1891,6 +1921,8 @@ static int e1000_set_phys_id(struct net_device *netdev,
1891 1921
1892 switch (state) { 1922 switch (state) {
1893 case ETHTOOL_ID_ACTIVE: 1923 case ETHTOOL_ID_ACTIVE:
1924 pm_runtime_get_sync(netdev->dev.parent);
1925
1894 if (!hw->mac.ops.blink_led) 1926 if (!hw->mac.ops.blink_led)
1895 return 2; /* cycle on/off twice per second */ 1927 return 2; /* cycle on/off twice per second */
1896 1928
@@ -1902,6 +1934,7 @@ static int e1000_set_phys_id(struct net_device *netdev,
1902 e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); 1934 e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
1903 hw->mac.ops.led_off(hw); 1935 hw->mac.ops.led_off(hw);
1904 hw->mac.ops.cleanup_led(hw); 1936 hw->mac.ops.cleanup_led(hw);
1937 pm_runtime_put_sync(netdev->dev.parent);
1905 break; 1938 break;
1906 1939
1907 case ETHTOOL_ID_ON: 1940 case ETHTOOL_ID_ON:
@@ -1912,6 +1945,7 @@ static int e1000_set_phys_id(struct net_device *netdev,
1912 hw->mac.ops.led_off(hw); 1945 hw->mac.ops.led_off(hw);
1913 break; 1946 break;
1914 } 1947 }
1948
1915 return 0; 1949 return 0;
1916} 1950}
1917 1951
@@ -1950,11 +1984,15 @@ static int e1000_set_coalesce(struct net_device *netdev,
1950 adapter->itr_setting = adapter->itr & ~3; 1984 adapter->itr_setting = adapter->itr & ~3;
1951 } 1985 }
1952 1986
1987 pm_runtime_get_sync(netdev->dev.parent);
1988
1953 if (adapter->itr_setting != 0) 1989 if (adapter->itr_setting != 0)
1954 e1000e_write_itr(adapter, adapter->itr); 1990 e1000e_write_itr(adapter, adapter->itr);
1955 else 1991 else
1956 e1000e_write_itr(adapter, 0); 1992 e1000e_write_itr(adapter, 0);
1957 1993
1994 pm_runtime_put_sync(netdev->dev.parent);
1995
1958 return 0; 1996 return 0;
1959} 1997}
1960 1998
@@ -1968,7 +2006,9 @@ static int e1000_nway_reset(struct net_device *netdev)
1968 if (!adapter->hw.mac.autoneg) 2006 if (!adapter->hw.mac.autoneg)
1969 return -EINVAL; 2007 return -EINVAL;
1970 2008
2009 pm_runtime_get_sync(netdev->dev.parent);
1971 e1000e_reinit_locked(adapter); 2010 e1000e_reinit_locked(adapter);
2011 pm_runtime_put_sync(netdev->dev.parent);
1972 2012
1973 return 0; 2013 return 0;
1974} 2014}
@@ -1982,7 +2022,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1982 int i; 2022 int i;
1983 char *p = NULL; 2023 char *p = NULL;
1984 2024
2025 pm_runtime_get_sync(netdev->dev.parent);
2026
1985 e1000e_get_stats64(netdev, &net_stats); 2027 e1000e_get_stats64(netdev, &net_stats);
2028
2029 pm_runtime_put_sync(netdev->dev.parent);
2030
1986 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 2031 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1987 switch (e1000_gstrings_stats[i].type) { 2032 switch (e1000_gstrings_stats[i].type) {
1988 case NETDEV_STATS: 2033 case NETDEV_STATS:
@@ -2033,7 +2078,11 @@ static int e1000_get_rxnfc(struct net_device *netdev,
2033 case ETHTOOL_GRXFH: { 2078 case ETHTOOL_GRXFH: {
2034 struct e1000_adapter *adapter = netdev_priv(netdev); 2079 struct e1000_adapter *adapter = netdev_priv(netdev);
2035 struct e1000_hw *hw = &adapter->hw; 2080 struct e1000_hw *hw = &adapter->hw;
2036 u32 mrqc = er32(MRQC); 2081 u32 mrqc;
2082
2083 pm_runtime_get_sync(netdev->dev.parent);
2084 mrqc = er32(MRQC);
2085 pm_runtime_put_sync(netdev->dev.parent);
2037 2086
2038 if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK)) 2087 if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
2039 return 0; 2088 return 0;
@@ -2096,9 +2145,13 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2096 return -EOPNOTSUPP; 2145 return -EOPNOTSUPP;
2097 } 2146 }
2098 2147
2148 pm_runtime_get_sync(netdev->dev.parent);
2149
2099 ret_val = hw->phy.ops.acquire(hw); 2150 ret_val = hw->phy.ops.acquire(hw);
2100 if (ret_val) 2151 if (ret_val) {
2152 pm_runtime_put_sync(netdev->dev.parent);
2101 return -EBUSY; 2153 return -EBUSY;
2154 }
2102 2155
2103 /* EEE Capability */ 2156 /* EEE Capability */
2104 ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data); 2157 ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
@@ -2117,14 +2170,11 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2117 2170
2118 /* EEE PCS Status */ 2171 /* EEE PCS Status */
2119 ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data); 2172 ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data);
2173 if (ret_val)
2174 goto release;
2120 if (hw->phy.type == e1000_phy_82579) 2175 if (hw->phy.type == e1000_phy_82579)
2121 phy_data <<= 8; 2176 phy_data <<= 8;
2122 2177
2123release:
2124 hw->phy.ops.release(hw);
2125 if (ret_val)
2126 return -ENODATA;
2127
2128 /* Result of the EEE auto negotiation - there is no register that 2178 /* Result of the EEE auto negotiation - there is no register that
2129 * has the status of the EEE negotiation so do a best-guess based 2179 * has the status of the EEE negotiation so do a best-guess based
2130 * on whether Tx or Rx LPI indications have been received. 2180 * on whether Tx or Rx LPI indications have been received.
@@ -2136,7 +2186,14 @@ release:
2136 edata->tx_lpi_enabled = true; 2186 edata->tx_lpi_enabled = true;
2137 edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT; 2187 edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT;
2138 2188
2139 return 0; 2189release:
2190 hw->phy.ops.release(hw);
2191 if (ret_val)
2192 ret_val = -ENODATA;
2193
2194 pm_runtime_put_sync(netdev->dev.parent);
2195
2196 return ret_val;
2140} 2197}
2141 2198
2142static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) 2199static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
@@ -2169,12 +2226,16 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
2169 2226
2170 hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled; 2227 hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
2171 2228
2229 pm_runtime_get_sync(netdev->dev.parent);
2230
2172 /* reset the link */ 2231 /* reset the link */
2173 if (netif_running(netdev)) 2232 if (netif_running(netdev))
2174 e1000e_reinit_locked(adapter); 2233 e1000e_reinit_locked(adapter);
2175 else 2234 else
2176 e1000e_reset(adapter); 2235 e1000e_reset(adapter);
2177 2236
2237 pm_runtime_put_sync(netdev->dev.parent);
2238
2178 return 0; 2239 return 0;
2179} 2240}
2180 2241
@@ -2212,19 +2273,7 @@ static int e1000e_get_ts_info(struct net_device *netdev,
2212 return 0; 2273 return 0;
2213} 2274}
2214 2275
2215static int e1000e_ethtool_begin(struct net_device *netdev)
2216{
2217 return pm_runtime_get_sync(netdev->dev.parent);
2218}
2219
2220static void e1000e_ethtool_complete(struct net_device *netdev)
2221{
2222 pm_runtime_put_sync(netdev->dev.parent);
2223}
2224
2225static const struct ethtool_ops e1000_ethtool_ops = { 2276static const struct ethtool_ops e1000_ethtool_ops = {
2226 .begin = e1000e_ethtool_begin,
2227 .complete = e1000e_ethtool_complete,
2228 .get_settings = e1000_get_settings, 2277 .get_settings = e1000_get_settings,
2229 .set_settings = e1000_set_settings, 2278 .set_settings = e1000_set_settings,
2230 .get_drvinfo = e1000_get_drvinfo, 2279 .get_drvinfo = e1000_get_drvinfo,
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index a6f903a9b773..b7f38435d1fd 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -90,6 +90,10 @@ struct e1000_hw;
90#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B 90#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
91#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A 91#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
92#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 92#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
93#define E1000_DEV_ID_PCH_I218_LM2 0x15A0
94#define E1000_DEV_ID_PCH_I218_V2 0x15A1
95#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */
96#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */
93 97
94#define E1000_REVISION_4 4 98#define E1000_REVISION_4 4
95 99
@@ -227,6 +231,10 @@ union e1000_rx_desc_extended {
227}; 231};
228 232
229#define MAX_PS_BUFFERS 4 233#define MAX_PS_BUFFERS 4
234
235/* Number of packet split data buffers (not including the header buffer) */
236#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
237
230/* Receive Descriptor - Packet Split */ 238/* Receive Descriptor - Packet Split */
231union e1000_rx_desc_packet_split { 239union e1000_rx_desc_packet_split {
232 struct { 240 struct {
@@ -251,7 +259,8 @@ union e1000_rx_desc_packet_split {
251 } middle; 259 } middle;
252 struct { 260 struct {
253 __le16 header_status; 261 __le16 header_status;
254 __le16 length[3]; /* length of buffers 1-3 */ 262 /* length of buffers 1-3 */
263 __le16 length[PS_PAGE_BUFFERS];
255 } upper; 264 } upper;
256 __le64 reserved; 265 __le64 reserved;
257 } wb; /* writeback */ 266 } wb; /* writeback */
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9dde390f7e71..af08188d7e62 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -185,6 +185,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
185 u32 phy_id = 0; 185 u32 phy_id = 0;
186 s32 ret_val; 186 s32 ret_val;
187 u16 retry_count; 187 u16 retry_count;
188 u32 mac_reg = 0;
188 189
189 for (retry_count = 0; retry_count < 2; retry_count++) { 190 for (retry_count = 0; retry_count < 2; retry_count++) {
190 ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg); 191 ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
@@ -203,11 +204,11 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
203 204
204 if (hw->phy.id) { 205 if (hw->phy.id) {
205 if (hw->phy.id == phy_id) 206 if (hw->phy.id == phy_id)
206 return true; 207 goto out;
207 } else if (phy_id) { 208 } else if (phy_id) {
208 hw->phy.id = phy_id; 209 hw->phy.id = phy_id;
209 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); 210 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
210 return true; 211 goto out;
211 } 212 }
212 213
213 /* In case the PHY needs to be in mdio slow mode, 214 /* In case the PHY needs to be in mdio slow mode,
@@ -219,7 +220,22 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
219 ret_val = e1000e_get_phy_id(hw); 220 ret_val = e1000e_get_phy_id(hw);
220 hw->phy.ops.acquire(hw); 221 hw->phy.ops.acquire(hw);
221 222
222 return !ret_val; 223 if (ret_val)
224 return false;
225out:
226 if (hw->mac.type == e1000_pch_lpt) {
227 /* Unforce SMBus mode in PHY */
228 e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
229 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
230 e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
231
232 /* Unforce SMBus mode in MAC */
233 mac_reg = er32(CTRL_EXT);
234 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
235 ew32(CTRL_EXT, mac_reg);
236 }
237
238 return true;
223} 239}
224 240
225/** 241/**
@@ -233,7 +249,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
233{ 249{
234 u32 mac_reg, fwsm = er32(FWSM); 250 u32 mac_reg, fwsm = er32(FWSM);
235 s32 ret_val; 251 s32 ret_val;
236 u16 phy_reg;
237 252
238 /* Gate automatic PHY configuration by hardware on managed and 253 /* Gate automatic PHY configuration by hardware on managed and
239 * non-managed 82579 and newer adapters. 254 * non-managed 82579 and newer adapters.
@@ -262,22 +277,16 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
262 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; 277 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
263 ew32(CTRL_EXT, mac_reg); 278 ew32(CTRL_EXT, mac_reg);
264 279
280 /* Wait 50 milliseconds for MAC to finish any retries
281 * that it might be trying to perform from previous
282 * attempts to acknowledge any phy read requests.
283 */
284 msleep(50);
285
265 /* fall-through */ 286 /* fall-through */
266 case e1000_pch2lan: 287 case e1000_pch2lan:
267 if (e1000_phy_is_accessible_pchlan(hw)) { 288 if (e1000_phy_is_accessible_pchlan(hw))
268 if (hw->mac.type == e1000_pch_lpt) {
269 /* Unforce SMBus mode in PHY */
270 e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
271 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
272 e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
273
274 /* Unforce SMBus mode in MAC */
275 mac_reg = er32(CTRL_EXT);
276 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
277 ew32(CTRL_EXT, mac_reg);
278 }
279 break; 289 break;
280 }
281 290
282 /* fall-through */ 291 /* fall-through */
283 case e1000_pchlan: 292 case e1000_pchlan:
@@ -287,6 +296,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
287 296
288 if (hw->phy.ops.check_reset_block(hw)) { 297 if (hw->phy.ops.check_reset_block(hw)) {
289 e_dbg("Required LANPHYPC toggle blocked by ME\n"); 298 e_dbg("Required LANPHYPC toggle blocked by ME\n");
299 ret_val = -E1000_ERR_PHY;
290 break; 300 break;
291 } 301 }
292 302
@@ -298,15 +308,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
298 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; 308 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
299 ew32(FEXTNVM3, mac_reg); 309 ew32(FEXTNVM3, mac_reg);
300 310
301 if (hw->mac.type == e1000_pch_lpt) {
302 /* Toggling LANPHYPC brings the PHY out of SMBus mode
303 * So ensure that the MAC is also out of SMBus mode
304 */
305 mac_reg = er32(CTRL_EXT);
306 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
307 ew32(CTRL_EXT, mac_reg);
308 }
309
310 /* Toggle LANPHYPC Value bit */ 311 /* Toggle LANPHYPC Value bit */
311 mac_reg = er32(CTRL); 312 mac_reg = er32(CTRL);
312 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; 313 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
@@ -325,6 +326,21 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
325 usleep_range(5000, 10000); 326 usleep_range(5000, 10000);
326 } while (!(er32(CTRL_EXT) & 327 } while (!(er32(CTRL_EXT) &
327 E1000_CTRL_EXT_LPCD) && count--); 328 E1000_CTRL_EXT_LPCD) && count--);
329 usleep_range(30000, 60000);
330 if (e1000_phy_is_accessible_pchlan(hw))
331 break;
332
333 /* Toggling LANPHYPC brings the PHY out of SMBus mode
334 * so ensure that the MAC is also out of SMBus mode
335 */
336 mac_reg = er32(CTRL_EXT);
337 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
338 ew32(CTRL_EXT, mac_reg);
339
340 if (e1000_phy_is_accessible_pchlan(hw))
341 break;
342
343 ret_val = -E1000_ERR_PHY;
328 } 344 }
329 break; 345 break;
330 default: 346 default:
@@ -332,13 +348,14 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
332 } 348 }
333 349
334 hw->phy.ops.release(hw); 350 hw->phy.ops.release(hw);
335 351 if (!ret_val) {
336 /* Reset the PHY before any access to it. Doing so, ensures 352 /* Reset the PHY before any access to it. Doing so, ensures
337 * that the PHY is in a known good state before we read/write 353 * that the PHY is in a known good state before we read/write
338 * PHY registers. The generic reset is sufficient here, 354 * PHY registers. The generic reset is sufficient here,
339 * because we haven't determined the PHY type yet. 355 * because we haven't determined the PHY type yet.
340 */ 356 */
341 ret_val = e1000e_phy_hw_reset_generic(hw); 357 ret_val = e1000e_phy_hw_reset_generic(hw);
358 }
342 359
343out: 360out:
344 /* Ungate automatic PHY configuration on non-managed 82579 */ 361 /* Ungate automatic PHY configuration on non-managed 82579 */
@@ -793,29 +810,31 @@ release:
793 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications 810 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
794 * preventing further DMA write requests. Workaround the issue by disabling 811 * preventing further DMA write requests. Workaround the issue by disabling
795 * the de-assertion of the clock request when in 1Gpbs mode. 812 * the de-assertion of the clock request when in 1Gpbs mode.
813 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
814 * speeds in order to avoid Tx hangs.
796 **/ 815 **/
797static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) 816static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
798{ 817{
799 u32 fextnvm6 = er32(FEXTNVM6); 818 u32 fextnvm6 = er32(FEXTNVM6);
819 u32 status = er32(STATUS);
800 s32 ret_val = 0; 820 s32 ret_val = 0;
821 u16 reg;
801 822
802 if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) { 823 if (link && (status & E1000_STATUS_SPEED_1000)) {
803 u16 kmrn_reg;
804
805 ret_val = hw->phy.ops.acquire(hw); 824 ret_val = hw->phy.ops.acquire(hw);
806 if (ret_val) 825 if (ret_val)
807 return ret_val; 826 return ret_val;
808 827
809 ret_val = 828 ret_val =
810 e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, 829 e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
811 &kmrn_reg); 830 &reg);
812 if (ret_val) 831 if (ret_val)
813 goto release; 832 goto release;
814 833
815 ret_val = 834 ret_val =
816 e1000e_write_kmrn_reg_locked(hw, 835 e1000e_write_kmrn_reg_locked(hw,
817 E1000_KMRNCTRLSTA_K1_CONFIG, 836 E1000_KMRNCTRLSTA_K1_CONFIG,
818 kmrn_reg & 837 reg &
819 ~E1000_KMRNCTRLSTA_K1_ENABLE); 838 ~E1000_KMRNCTRLSTA_K1_ENABLE);
820 if (ret_val) 839 if (ret_val)
821 goto release; 840 goto release;
@@ -827,12 +846,45 @@ static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
827 ret_val = 846 ret_val =
828 e1000e_write_kmrn_reg_locked(hw, 847 e1000e_write_kmrn_reg_locked(hw,
829 E1000_KMRNCTRLSTA_K1_CONFIG, 848 E1000_KMRNCTRLSTA_K1_CONFIG,
830 kmrn_reg); 849 reg);
831release: 850release:
832 hw->phy.ops.release(hw); 851 hw->phy.ops.release(hw);
833 } else { 852 } else {
834 /* clear FEXTNVM6 bit 8 on link down or 10/100 */ 853 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
835 ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); 854 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
855
856 if (!link || ((status & E1000_STATUS_SPEED_100) &&
857 (status & E1000_STATUS_FD)))
858 goto update_fextnvm6;
859
860 ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
861 if (ret_val)
862 return ret_val;
863
864 /* Clear link status transmit timeout */
865 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
866
867 if (status & E1000_STATUS_SPEED_100) {
868 /* Set inband Tx timeout to 5x10us for 100Half */
869 reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
870
871 /* Do not extend the K1 entry latency for 100Half */
872 fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
873 } else {
874 /* Set inband Tx timeout to 50x10us for 10Full/Half */
875 reg |= 50 <<
876 I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
877
878 /* Extend the K1 entry latency for 10 Mbps */
879 fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
880 }
881
882 ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg);
883 if (ret_val)
884 return ret_val;
885
886update_fextnvm6:
887 ew32(FEXTNVM6, fextnvm6);
836 } 888 }
837 889
838 return ret_val; 890 return ret_val;
@@ -993,7 +1045,9 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
993 1045
994 /* Work-around I218 hang issue */ 1046 /* Work-around I218 hang issue */
995 if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || 1047 if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
996 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) { 1048 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1049 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
1050 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
997 ret_val = e1000_k1_workaround_lpt_lp(hw, link); 1051 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
998 if (ret_val) 1052 if (ret_val)
999 return ret_val; 1053 return ret_val;
@@ -4168,7 +4222,9 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4168 u16 phy_reg, device_id = hw->adapter->pdev->device; 4222 u16 phy_reg, device_id = hw->adapter->pdev->device;
4169 4223
4170 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || 4224 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4171 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) { 4225 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
4226 (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
4227 (device_id == E1000_DEV_ID_PCH_I218_V3)) {
4172 u32 fextnvm6 = er32(FEXTNVM6); 4228 u32 fextnvm6 = er32(FEXTNVM6);
4173 4229
4174 ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); 4230 ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 80034a2b297c..59865695b282 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -93,6 +93,7 @@
93#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 93#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
94 94
95#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 95#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
96#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
96 97
97#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 98#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
98 99
@@ -197,6 +198,11 @@
197 198
198#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ 199#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
199 200
201/* Inband Control */
202#define I217_INBAND_CTRL PHY_REG(770, 18)
203#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00
204#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8
205
200/* PHY Low Power Idle Control */ 206/* PHY Low Power Idle Control */
201#define I82579_LPI_CTRL PHY_REG(772, 20) 207#define I82579_LPI_CTRL PHY_REG(772, 20)
202#define I82579_LPI_CTRL_100_ENABLE 0x2000 208#define I82579_LPI_CTRL_100_ENABLE 0x2000
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 77f81cbb601a..e87e9b01f404 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -64,8 +64,6 @@ static int debug = -1;
64module_param(debug, int, 0); 64module_param(debug, int, 0);
65MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 65MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
66 66
67static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
68
69static const struct e1000_info *e1000_info_tbl[] = { 67static const struct e1000_info *e1000_info_tbl[] = {
70 [board_82571] = &e1000_82571_info, 68 [board_82571] = &e1000_82571_info,
71 [board_82572] = &e1000_82572_info, 69 [board_82572] = &e1000_82572_info,
@@ -2979,17 +2977,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2979 u32 pages = 0; 2977 u32 pages = 0;
2980 2978
2981 /* Workaround Si errata on PCHx - configure jumbo frame flow */ 2979 /* Workaround Si errata on PCHx - configure jumbo frame flow */
2982 if (hw->mac.type >= e1000_pch2lan) { 2980 if ((hw->mac.type >= e1000_pch2lan) &&
2983 s32 ret_val; 2981 (adapter->netdev->mtu > ETH_DATA_LEN) &&
2984 2982 e1000_lv_jumbo_workaround_ich8lan(hw, true))
2985 if (adapter->netdev->mtu > ETH_DATA_LEN) 2983 e_dbg("failed to enable jumbo frame workaround mode\n");
2986 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2987 else
2988 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2989
2990 if (ret_val)
2991 e_dbg("failed to enable jumbo frame workaround mode\n");
2992 }
2993 2984
2994 /* Program MC offset vector base */ 2985 /* Program MC offset vector base */
2995 rctl = er32(RCTL); 2986 rctl = er32(RCTL);
@@ -3826,6 +3817,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
3826 break; 3817 break;
3827 } 3818 }
3828 3819
3820 pba = 14;
3821 ew32(PBA, pba);
3829 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; 3822 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
3830 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; 3823 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
3831 break; 3824 break;
@@ -4034,6 +4027,12 @@ void e1000e_down(struct e1000_adapter *adapter)
4034 adapter->link_speed = 0; 4027 adapter->link_speed = 0;
4035 adapter->link_duplex = 0; 4028 adapter->link_duplex = 0;
4036 4029
4030 /* Disable Si errata workaround on PCHx for jumbo frame flow */
4031 if ((hw->mac.type >= e1000_pch2lan) &&
4032 (adapter->netdev->mtu > ETH_DATA_LEN) &&
4033 e1000_lv_jumbo_workaround_ich8lan(hw, false))
4034 e_dbg("failed to disable jumbo frame workaround mode\n");
4035
4037 if (!pci_channel_offline(adapter->pdev)) 4036 if (!pci_channel_offline(adapter->pdev))
4038 e1000e_reset(adapter); 4037 e1000e_reset(adapter);
4039 4038
@@ -4683,11 +4682,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
4683 struct e1000_hw *hw = &adapter->hw; 4682 struct e1000_hw *hw = &adapter->hw;
4684 struct e1000_phy_regs *phy = &adapter->phy_regs; 4683 struct e1000_phy_regs *phy = &adapter->phy_regs;
4685 4684
4686 if ((er32(STATUS) & E1000_STATUS_LU) && 4685 if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) &&
4686 (er32(STATUS) & E1000_STATUS_LU) &&
4687 (adapter->hw.phy.media_type == e1000_media_type_copper)) { 4687 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4688 int ret_val; 4688 int ret_val;
4689 4689
4690 pm_runtime_get_sync(&adapter->pdev->dev);
4691 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); 4690 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
4692 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); 4691 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
4693 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); 4692 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
@@ -4698,7 +4697,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
4698 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); 4697 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
4699 if (ret_val) 4698 if (ret_val)
4700 e_warn("Error reading PHY register\n"); 4699 e_warn("Error reading PHY register\n");
4701 pm_runtime_put_sync(&adapter->pdev->dev);
4702 } else { 4700 } else {
4703 /* Do not read PHY registers if link is not up 4701 /* Do not read PHY registers if link is not up
4704 * Set values to typical power-on defaults 4702 * Set values to typical power-on defaults
@@ -5995,15 +5993,24 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5995 */ 5993 */
5996 e1000e_release_hw_control(adapter); 5994 e1000e_release_hw_control(adapter);
5997 5995
5996 pci_clear_master(pdev);
5997
5998 /* The pci-e switch on some quad port adapters will report a 5998 /* The pci-e switch on some quad port adapters will report a
5999 * correctable error when the MAC transitions from D0 to D3. To 5999 * correctable error when the MAC transitions from D0 to D3. To
6000 * prevent this we need to mask off the correctable errors on the 6000 * prevent this we need to mask off the correctable errors on the
6001 * downstream port of the pci-e switch. 6001 * downstream port of the pci-e switch.
6002 *
6003 * We don't have the associated upstream bridge while assigning
6004 * the PCI device into guest. For example, the KVM on power is
6005 * one of the cases.
6002 */ 6006 */
6003 if (adapter->flags & FLAG_IS_QUAD_PORT) { 6007 if (adapter->flags & FLAG_IS_QUAD_PORT) {
6004 struct pci_dev *us_dev = pdev->bus->self; 6008 struct pci_dev *us_dev = pdev->bus->self;
6005 u16 devctl; 6009 u16 devctl;
6006 6010
6011 if (!us_dev)
6012 return 0;
6013
6007 pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl); 6014 pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
6008 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, 6015 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
6009 (devctl & ~PCI_EXP_DEVCTL_CERE)); 6016 (devctl & ~PCI_EXP_DEVCTL_CERE));
@@ -6017,38 +6024,73 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
6017 return 0; 6024 return 0;
6018} 6025}
6019 6026
6020#ifdef CONFIG_PCIEASPM 6027/**
6021static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 6028 * e1000e_disable_aspm - Disable ASPM states
6029 * @pdev: pointer to PCI device struct
6030 * @state: bit-mask of ASPM states to disable
6031 *
6032 * Some devices *must* have certain ASPM states disabled per hardware errata.
6033 **/
6034static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6022{ 6035{
6036 struct pci_dev *parent = pdev->bus->self;
6037 u16 aspm_dis_mask = 0;
6038 u16 pdev_aspmc, parent_aspmc;
6039
6040 switch (state) {
6041 case PCIE_LINK_STATE_L0S:
6042 case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1:
6043 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
6044 /* fall-through - can't have L1 without L0s */
6045 case PCIE_LINK_STATE_L1:
6046 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
6047 break;
6048 default:
6049 return;
6050 }
6051
6052 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6053 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6054
6055 if (parent) {
6056 pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
6057 &parent_aspmc);
6058 parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6059 }
6060
6061 /* Nothing to do if the ASPM states to be disabled already are */
6062 if (!(pdev_aspmc & aspm_dis_mask) &&
6063 (!parent || !(parent_aspmc & aspm_dis_mask)))
6064 return;
6065
6066 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
6067 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ?
6068 "L0s" : "",
6069 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ?
6070 "L1" : "");
6071
6072#ifdef CONFIG_PCIEASPM
6023 pci_disable_link_state_locked(pdev, state); 6073 pci_disable_link_state_locked(pdev, state);
6024}
6025#else
6026static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6027{
6028 u16 aspm_ctl = 0;
6029 6074
6030 if (state & PCIE_LINK_STATE_L0S) 6075 /* Double-check ASPM control. If not disabled by the above, the
6031 aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L0S; 6076 * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
6032 if (state & PCIE_LINK_STATE_L1) 6077 * not enabled); override by writing PCI config space directly.
6033 aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L1; 6078 */
6079 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6080 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6081
6082 if (!(aspm_dis_mask & pdev_aspmc))
6083 return;
6084#endif
6034 6085
6035 /* Both device and parent should have the same ASPM setting. 6086 /* Both device and parent should have the same ASPM setting.
6036 * Disable ASPM in downstream component first and then upstream. 6087 * Disable ASPM in downstream component first and then upstream.
6037 */ 6088 */
6038 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_ctl); 6089 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
6039 6090
6040 if (pdev->bus->self) 6091 if (parent)
6041 pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL, 6092 pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
6042 aspm_ctl); 6093 aspm_dis_mask);
6043}
6044#endif
6045static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6046{
6047 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
6048 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
6049 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
6050
6051 __e1000e_disable_aspm(pdev, state);
6052} 6094}
6053 6095
6054#ifdef CONFIG_PM 6096#ifdef CONFIG_PM
@@ -6723,10 +6765,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6723 adapter->hw.fc.current_mode = e1000_fc_default; 6765 adapter->hw.fc.current_mode = e1000_fc_default;
6724 adapter->hw.phy.autoneg_advertised = 0x2f; 6766 adapter->hw.phy.autoneg_advertised = 0x2f;
6725 6767
6726 /* ring size defaults */
6727 adapter->rx_ring->count = E1000_DEFAULT_RXD;
6728 adapter->tx_ring->count = E1000_DEFAULT_TXD;
6729
6730 /* Initial Wake on LAN setting - If APM wake is enabled in 6768 /* Initial Wake on LAN setting - If APM wake is enabled in
6731 * the EEPROM, enable the ACPI Magic Packet filter 6769 * the EEPROM, enable the ACPI Magic Packet filter
6732 */ 6770 */
@@ -6976,6 +7014,10 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6976 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, 7014 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
6977 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt }, 7015 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
6978 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt }, 7016 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
7017 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt },
7018 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
7019 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
7020 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
6979 7021
6980 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ 7022 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
6981}; 7023};
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index f21a91a299a2..79b58353d849 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -176,7 +176,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
176 176
177 /* Verify phy id and set remaining function pointers */ 177 /* Verify phy id and set remaining function pointers */
178 switch (phy->id) { 178 switch (phy->id) {
179 case M88E1545_E_PHY_ID: 179 case M88E1543_E_PHY_ID:
180 case I347AT4_E_PHY_ID: 180 case I347AT4_E_PHY_ID:
181 case M88E1112_E_PHY_ID: 181 case M88E1112_E_PHY_ID:
182 case M88E1111_I_PHY_ID: 182 case M88E1111_I_PHY_ID:
@@ -238,6 +238,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
238 238
239 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 239 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
240 E1000_EECD_SIZE_EX_SHIFT); 240 E1000_EECD_SIZE_EX_SHIFT);
241
241 /* Added to a constant, "size" becomes the left-shift value 242 /* Added to a constant, "size" becomes the left-shift value
242 * for setting word_size. 243 * for setting word_size.
243 */ 244 */
@@ -250,86 +251,52 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
250 size = 15; 251 size = 15;
251 252
252 nvm->word_size = 1 << size; 253 nvm->word_size = 1 << size;
253 if (hw->mac.type < e1000_i210) { 254 nvm->opcode_bits = 8;
254 nvm->opcode_bits = 8; 255 nvm->delay_usec = 1;
255 nvm->delay_usec = 1;
256
257 switch (nvm->override) {
258 case e1000_nvm_override_spi_large:
259 nvm->page_size = 32;
260 nvm->address_bits = 16;
261 break;
262 case e1000_nvm_override_spi_small:
263 nvm->page_size = 8;
264 nvm->address_bits = 8;
265 break;
266 default:
267 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
268 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
269 16 : 8;
270 break;
271 }
272 if (nvm->word_size == (1 << 15))
273 nvm->page_size = 128;
274 256
275 nvm->type = e1000_nvm_eeprom_spi; 257 switch (nvm->override) {
276 } else { 258 case e1000_nvm_override_spi_large:
277 nvm->type = e1000_nvm_flash_hw; 259 nvm->page_size = 32;
260 nvm->address_bits = 16;
261 break;
262 case e1000_nvm_override_spi_small:
263 nvm->page_size = 8;
264 nvm->address_bits = 8;
265 break;
266 default:
267 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
268 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
269 16 : 8;
270 break;
278 } 271 }
272 if (nvm->word_size == (1 << 15))
273 nvm->page_size = 128;
274
275 nvm->type = e1000_nvm_eeprom_spi;
279 276
280 /* NVM Function Pointers */ 277 /* NVM Function Pointers */
278 nvm->ops.acquire = igb_acquire_nvm_82575;
279 nvm->ops.release = igb_release_nvm_82575;
280 nvm->ops.write = igb_write_nvm_spi;
281 nvm->ops.validate = igb_validate_nvm_checksum;
282 nvm->ops.update = igb_update_nvm_checksum;
283 if (nvm->word_size < (1 << 15))
284 nvm->ops.read = igb_read_nvm_eerd;
285 else
286 nvm->ops.read = igb_read_nvm_spi;
287
288 /* override generic family function pointers for specific descendants */
281 switch (hw->mac.type) { 289 switch (hw->mac.type) {
282 case e1000_82580: 290 case e1000_82580:
283 nvm->ops.validate = igb_validate_nvm_checksum_82580; 291 nvm->ops.validate = igb_validate_nvm_checksum_82580;
284 nvm->ops.update = igb_update_nvm_checksum_82580; 292 nvm->ops.update = igb_update_nvm_checksum_82580;
285 nvm->ops.acquire = igb_acquire_nvm_82575;
286 nvm->ops.release = igb_release_nvm_82575;
287 if (nvm->word_size < (1 << 15))
288 nvm->ops.read = igb_read_nvm_eerd;
289 else
290 nvm->ops.read = igb_read_nvm_spi;
291 nvm->ops.write = igb_write_nvm_spi;
292 break; 293 break;
293 case e1000_i354: 294 case e1000_i354:
294 case e1000_i350: 295 case e1000_i350:
295 nvm->ops.validate = igb_validate_nvm_checksum_i350; 296 nvm->ops.validate = igb_validate_nvm_checksum_i350;
296 nvm->ops.update = igb_update_nvm_checksum_i350; 297 nvm->ops.update = igb_update_nvm_checksum_i350;
297 nvm->ops.acquire = igb_acquire_nvm_82575;
298 nvm->ops.release = igb_release_nvm_82575;
299 if (nvm->word_size < (1 << 15))
300 nvm->ops.read = igb_read_nvm_eerd;
301 else
302 nvm->ops.read = igb_read_nvm_spi;
303 nvm->ops.write = igb_write_nvm_spi;
304 break;
305 case e1000_i210:
306 nvm->ops.validate = igb_validate_nvm_checksum_i210;
307 nvm->ops.update = igb_update_nvm_checksum_i210;
308 nvm->ops.acquire = igb_acquire_nvm_i210;
309 nvm->ops.release = igb_release_nvm_i210;
310 nvm->ops.read = igb_read_nvm_srrd_i210;
311 nvm->ops.write = igb_write_nvm_srwr_i210;
312 nvm->ops.valid_led_default = igb_valid_led_default_i210;
313 break;
314 case e1000_i211:
315 nvm->ops.acquire = igb_acquire_nvm_i210;
316 nvm->ops.release = igb_release_nvm_i210;
317 nvm->ops.read = igb_read_nvm_i211;
318 nvm->ops.valid_led_default = igb_valid_led_default_i210;
319 nvm->ops.validate = NULL;
320 nvm->ops.update = NULL;
321 nvm->ops.write = NULL;
322 break; 298 break;
323 default: 299 default:
324 nvm->ops.validate = igb_validate_nvm_checksum;
325 nvm->ops.update = igb_update_nvm_checksum;
326 nvm->ops.acquire = igb_acquire_nvm_82575;
327 nvm->ops.release = igb_release_nvm_82575;
328 if (nvm->word_size < (1 << 15))
329 nvm->ops.read = igb_read_nvm_eerd;
330 else
331 nvm->ops.read = igb_read_nvm_spi;
332 nvm->ops.write = igb_write_nvm_spi;
333 break; 300 break;
334 } 301 }
335 302
@@ -516,6 +483,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
516 case E1000_DEV_ID_I210_FIBER: 483 case E1000_DEV_ID_I210_FIBER:
517 case E1000_DEV_ID_I210_SERDES: 484 case E1000_DEV_ID_I210_SERDES:
518 case E1000_DEV_ID_I210_SGMII: 485 case E1000_DEV_ID_I210_SGMII:
486 case E1000_DEV_ID_I210_COPPER_FLASHLESS:
487 case E1000_DEV_ID_I210_SERDES_FLASHLESS:
519 mac->type = e1000_i210; 488 mac->type = e1000_i210;
520 break; 489 break;
521 case E1000_DEV_ID_I211_COPPER: 490 case E1000_DEV_ID_I211_COPPER:
@@ -601,6 +570,15 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
601 570
602 /* NVM initialization */ 571 /* NVM initialization */
603 ret_val = igb_init_nvm_params_82575(hw); 572 ret_val = igb_init_nvm_params_82575(hw);
573 switch (hw->mac.type) {
574 case e1000_i210:
575 case e1000_i211:
576 ret_val = igb_init_nvm_params_i210(hw);
577 break;
578 default:
579 break;
580 }
581
604 if (ret_val) 582 if (ret_val)
605 goto out; 583 goto out;
606 584
@@ -1163,6 +1141,31 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
1163} 1141}
1164 1142
1165/** 1143/**
1144 * igb_get_link_up_info_82575 - Get link speed/duplex info
1145 * @hw: pointer to the HW structure
1146 * @speed: stores the current speed
1147 * @duplex: stores the current duplex
1148 *
1149 * This is a wrapper function, if using the serial gigabit media independent
1150 * interface, use PCS to retrieve the link speed and duplex information.
1151 * Otherwise, use the generic function to get the link speed and duplex info.
1152 **/
1153static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
1154 u16 *duplex)
1155{
1156 s32 ret_val;
1157
1158 if (hw->phy.media_type != e1000_media_type_copper)
1159 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed,
1160 duplex);
1161 else
1162 ret_val = igb_get_speed_and_duplex_copper(hw, speed,
1163 duplex);
1164
1165 return ret_val;
1166}
1167
1168/**
1166 * igb_check_for_link_82575 - Check for link 1169 * igb_check_for_link_82575 - Check for link
1167 * @hw: pointer to the HW structure 1170 * @hw: pointer to the HW structure
1168 * 1171 *
@@ -1239,7 +1242,7 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
1239 u16 *duplex) 1242 u16 *duplex)
1240{ 1243{
1241 struct e1000_mac_info *mac = &hw->mac; 1244 struct e1000_mac_info *mac = &hw->mac;
1242 u32 pcs; 1245 u32 pcs, status;
1243 1246
1244 /* Set up defaults for the return values of this function */ 1247 /* Set up defaults for the return values of this function */
1245 mac->serdes_has_link = false; 1248 mac->serdes_has_link = false;
@@ -1260,20 +1263,31 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
1260 mac->serdes_has_link = true; 1263 mac->serdes_has_link = true;
1261 1264
1262 /* Detect and store PCS speed */ 1265 /* Detect and store PCS speed */
1263 if (pcs & E1000_PCS_LSTS_SPEED_1000) { 1266 if (pcs & E1000_PCS_LSTS_SPEED_1000)
1264 *speed = SPEED_1000; 1267 *speed = SPEED_1000;
1265 } else if (pcs & E1000_PCS_LSTS_SPEED_100) { 1268 else if (pcs & E1000_PCS_LSTS_SPEED_100)
1266 *speed = SPEED_100; 1269 *speed = SPEED_100;
1267 } else { 1270 else
1268 *speed = SPEED_10; 1271 *speed = SPEED_10;
1269 }
1270 1272
1271 /* Detect and store PCS duplex */ 1273 /* Detect and store PCS duplex */
1272 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) { 1274 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
1273 *duplex = FULL_DUPLEX; 1275 *duplex = FULL_DUPLEX;
1274 } else { 1276 else
1275 *duplex = HALF_DUPLEX; 1277 *duplex = HALF_DUPLEX;
1278
1279 /* Check if it is an I354 2.5Gb backplane connection. */
1280 if (mac->type == e1000_i354) {
1281 status = rd32(E1000_STATUS);
1282 if ((status & E1000_STATUS_2P5_SKU) &&
1283 !(status & E1000_STATUS_2P5_SKU_OVER)) {
1284 *speed = SPEED_2500;
1285 *duplex = FULL_DUPLEX;
1286 hw_dbg("2500 Mbs, ");
1287 hw_dbg("Full Duplex\n");
1288 }
1276 } 1289 }
1290
1277 } 1291 }
1278 1292
1279 return 0; 1293 return 0;
@@ -1320,7 +1334,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
1320 **/ 1334 **/
1321static s32 igb_reset_hw_82575(struct e1000_hw *hw) 1335static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1322{ 1336{
1323 u32 ctrl, icr; 1337 u32 ctrl;
1324 s32 ret_val; 1338 s32 ret_val;
1325 1339
1326 /* Prevent the PCI-E bus from sticking if there is no TLP connection 1340 /* Prevent the PCI-E bus from sticking if there is no TLP connection
@@ -1365,7 +1379,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1365 1379
1366 /* Clear any pending interrupt events. */ 1380 /* Clear any pending interrupt events. */
1367 wr32(E1000_IMC, 0xffffffff); 1381 wr32(E1000_IMC, 0xffffffff);
1368 icr = rd32(E1000_ICR); 1382 rd32(E1000_ICR);
1369 1383
1370 /* Install any alternate MAC address into RAR0 */ 1384 /* Install any alternate MAC address into RAR0 */
1371 ret_val = igb_check_alt_mac_addr(hw); 1385 ret_val = igb_check_alt_mac_addr(hw);
@@ -1443,11 +1457,18 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1443 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1457 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1444 wr32(E1000_CTRL, ctrl); 1458 wr32(E1000_CTRL, ctrl);
1445 1459
1446 /* Clear Go Link Disconnect bit */ 1460 /* Clear Go Link Disconnect bit on supported devices */
1447 if (hw->mac.type >= e1000_82580) { 1461 switch (hw->mac.type) {
1462 case e1000_82580:
1463 case e1000_i350:
1464 case e1000_i210:
1465 case e1000_i211:
1448 phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); 1466 phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
1449 phpm_reg &= ~E1000_82580_PM_GO_LINKD; 1467 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1450 wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); 1468 wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
1469 break;
1470 default:
1471 break;
1451 } 1472 }
1452 1473
1453 ret_val = igb_setup_serdes_link_82575(hw); 1474 ret_val = igb_setup_serdes_link_82575(hw);
@@ -1470,7 +1491,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1470 switch (hw->phy.id) { 1491 switch (hw->phy.id) {
1471 case I347AT4_E_PHY_ID: 1492 case I347AT4_E_PHY_ID:
1472 case M88E1112_E_PHY_ID: 1493 case M88E1112_E_PHY_ID:
1473 case M88E1545_E_PHY_ID: 1494 case M88E1543_E_PHY_ID:
1474 case I210_I_PHY_ID: 1495 case I210_I_PHY_ID:
1475 ret_val = igb_copper_link_setup_m88_gen2(hw); 1496 ret_val = igb_copper_link_setup_m88_gen2(hw);
1476 break; 1497 break;
@@ -2103,10 +2124,9 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2103 s32 ret_val = 0; 2124 s32 ret_val = 0;
2104 /* BH SW mailbox bit in SW_FW_SYNC */ 2125 /* BH SW mailbox bit in SW_FW_SYNC */
2105 u16 swmbsw_mask = E1000_SW_SYNCH_MB; 2126 u16 swmbsw_mask = E1000_SW_SYNCH_MB;
2106 u32 ctrl, icr; 2127 u32 ctrl;
2107 bool global_device_reset = hw->dev_spec._82575.global_device_reset; 2128 bool global_device_reset = hw->dev_spec._82575.global_device_reset;
2108 2129
2109
2110 hw->dev_spec._82575.global_device_reset = false; 2130 hw->dev_spec._82575.global_device_reset = false;
2111 2131
2112 /* due to hw errata, global device reset doesn't always 2132 /* due to hw errata, global device reset doesn't always
@@ -2165,7 +2185,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2165 2185
2166 /* Clear any pending interrupt events. */ 2186 /* Clear any pending interrupt events. */
2167 wr32(E1000_IMC, 0xffffffff); 2187 wr32(E1000_IMC, 0xffffffff);
2168 icr = rd32(E1000_ICR); 2188 rd32(E1000_ICR);
2169 2189
2170 ret_val = igb_reset_mdicnfg_82580(hw); 2190 ret_val = igb_reset_mdicnfg_82580(hw);
2171 if (ret_val) 2191 if (ret_val)
@@ -2500,28 +2520,28 @@ s32 igb_set_eee_i354(struct e1000_hw *hw)
2500 u16 phy_data; 2520 u16 phy_data;
2501 2521
2502 if ((hw->phy.media_type != e1000_media_type_copper) || 2522 if ((hw->phy.media_type != e1000_media_type_copper) ||
2503 (phy->id != M88E1545_E_PHY_ID)) 2523 (phy->id != M88E1543_E_PHY_ID))
2504 goto out; 2524 goto out;
2505 2525
2506 if (!hw->dev_spec._82575.eee_disable) { 2526 if (!hw->dev_spec._82575.eee_disable) {
2507 /* Switch to PHY page 18. */ 2527 /* Switch to PHY page 18. */
2508 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 18); 2528 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
2509 if (ret_val) 2529 if (ret_val)
2510 goto out; 2530 goto out;
2511 2531
2512 ret_val = phy->ops.read_reg(hw, E1000_M88E1545_EEE_CTRL_1, 2532 ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
2513 &phy_data); 2533 &phy_data);
2514 if (ret_val) 2534 if (ret_val)
2515 goto out; 2535 goto out;
2516 2536
2517 phy_data |= E1000_M88E1545_EEE_CTRL_1_MS; 2537 phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
2518 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_EEE_CTRL_1, 2538 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
2519 phy_data); 2539 phy_data);
2520 if (ret_val) 2540 if (ret_val)
2521 goto out; 2541 goto out;
2522 2542
2523 /* Return the PHY to page 0. */ 2543 /* Return the PHY to page 0. */
2524 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 0); 2544 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
2525 if (ret_val) 2545 if (ret_val)
2526 goto out; 2546 goto out;
2527 2547
@@ -2572,7 +2592,7 @@ s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
2572 2592
2573 /* Check if EEE is supported on this device. */ 2593 /* Check if EEE is supported on this device. */
2574 if ((hw->phy.media_type != e1000_media_type_copper) || 2594 if ((hw->phy.media_type != e1000_media_type_copper) ||
2575 (phy->id != M88E1545_E_PHY_ID)) 2595 (phy->id != M88E1543_E_PHY_ID))
2576 goto out; 2596 goto out;
2577 2597
2578 ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, 2598 ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
@@ -2728,7 +2748,7 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
2728 .check_for_link = igb_check_for_link_82575, 2748 .check_for_link = igb_check_for_link_82575,
2729 .rar_set = igb_rar_set, 2749 .rar_set = igb_rar_set,
2730 .read_mac_addr = igb_read_mac_addr_82575, 2750 .read_mac_addr = igb_read_mac_addr_82575,
2731 .get_speed_and_duplex = igb_get_speed_and_duplex_copper, 2751 .get_speed_and_duplex = igb_get_link_up_info_82575,
2732#ifdef CONFIG_IGB_HWMON 2752#ifdef CONFIG_IGB_HWMON
2733 .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, 2753 .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
2734 .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, 2754 .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index aa201abb8ad2..978eca31ceda 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -620,6 +620,7 @@
620#define E1000_EECD_SIZE_EX_SHIFT 11 620#define E1000_EECD_SIZE_EX_SHIFT 11
621#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ 621#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
622#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ 622#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
623#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */
623#define E1000_FLUDONE_ATTEMPTS 20000 624#define E1000_FLUDONE_ATTEMPTS 20000
624#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ 625#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
625#define E1000_I210_FIFO_SEL_RX 0x00 626#define E1000_I210_FIFO_SEL_RX 0x00
@@ -627,6 +628,11 @@
627#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) 628#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
628#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 629#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
629#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 630#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
631#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */
632/* Secure FLASH mode requires removing MSb */
633#define E1000_I210_FW_PTR_MASK 0x7FFF
634/* Firmware code revision field word offset*/
635#define E1000_I210_FW_VER_OFFSET 328
630#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ 636#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
631#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ 637#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
632#define E1000_FLUDONE_ATTEMPTS 20000 638#define E1000_FLUDONE_ATTEMPTS 20000
@@ -665,20 +671,26 @@
665#define NVM_INIT_CTRL_4 0x0013 671#define NVM_INIT_CTRL_4 0x0013
666#define NVM_LED_1_CFG 0x001C 672#define NVM_LED_1_CFG 0x001C
667#define NVM_LED_0_2_CFG 0x001F 673#define NVM_LED_0_2_CFG 0x001F
668
669/* NVM version defines */
670#define NVM_ETRACK_WORD 0x0042 674#define NVM_ETRACK_WORD 0x0042
675#define NVM_ETRACK_HIWORD 0x0043
671#define NVM_COMB_VER_OFF 0x0083 676#define NVM_COMB_VER_OFF 0x0083
672#define NVM_COMB_VER_PTR 0x003d 677#define NVM_COMB_VER_PTR 0x003d
673#define NVM_MAJOR_MASK 0xF000 678
674#define NVM_MINOR_MASK 0x0FF0 679/* NVM version defines */
675#define NVM_BUILD_MASK 0x000F 680#define NVM_MAJOR_MASK 0xF000
676#define NVM_COMB_VER_MASK 0x00FF 681#define NVM_MINOR_MASK 0x0FF0
677#define NVM_MAJOR_SHIFT 12 682#define NVM_IMAGE_ID_MASK 0x000F
678#define NVM_MINOR_SHIFT 4 683#define NVM_COMB_VER_MASK 0x00FF
679#define NVM_COMB_VER_SHFT 8 684#define NVM_MAJOR_SHIFT 12
680#define NVM_VER_INVALID 0xFFFF 685#define NVM_MINOR_SHIFT 4
681#define NVM_ETRACK_SHIFT 16 686#define NVM_COMB_VER_SHFT 8
687#define NVM_VER_INVALID 0xFFFF
688#define NVM_ETRACK_SHIFT 16
689#define NVM_ETRACK_VALID 0x8000
690#define NVM_NEW_DEC_MASK 0x0F00
691#define NVM_HEX_CONV 16
692#define NVM_HEX_TENS 10
693
682#define NVM_ETS_CFG 0x003E 694#define NVM_ETS_CFG 0x003E
683#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 695#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0
684#define NVM_ETS_LTHRES_DELTA_SHIFT 6 696#define NVM_ETS_LTHRES_DELTA_SHIFT 6
@@ -775,7 +787,7 @@
775#define I350_I_PHY_ID 0x015403B0 787#define I350_I_PHY_ID 0x015403B0
776#define M88_VENDOR 0x0141 788#define M88_VENDOR 0x0141
777#define I210_I_PHY_ID 0x01410C00 789#define I210_I_PHY_ID 0x01410C00
778#define M88E1545_E_PHY_ID 0x01410EA0 790#define M88E1543_E_PHY_ID 0x01410EA0
779 791
780/* M88E1000 Specific Registers */ 792/* M88E1000 Specific Registers */
781#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ 793#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
@@ -897,9 +909,9 @@
897#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ 909#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */
898#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ 910#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */
899#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ 911#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
900#define E1000_M88E1545_PAGE_ADDR 0x16 /* Page Offset Register */ 912#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */
901#define E1000_M88E1545_EEE_CTRL_1 0x0 913#define E1000_M88E1543_EEE_CTRL_1 0x0
902#define E1000_M88E1545_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ 914#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
903#define E1000_EEE_ADV_DEV_I354 7 915#define E1000_EEE_ADV_DEV_I354 7
904#define E1000_EEE_ADV_ADDR_I354 60 916#define E1000_EEE_ADV_ADDR_I354 60
905#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ 917#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 94d7866b9c20..37a9c06a6c68 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -67,6 +67,8 @@ struct e1000_hw;
67#define E1000_DEV_ID_I210_FIBER 0x1536 67#define E1000_DEV_ID_I210_FIBER 0x1536
68#define E1000_DEV_ID_I210_SERDES 0x1537 68#define E1000_DEV_ID_I210_SERDES 0x1537
69#define E1000_DEV_ID_I210_SGMII 0x1538 69#define E1000_DEV_ID_I210_SGMII 0x1538
70#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B
71#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C
70#define E1000_DEV_ID_I211_COPPER 0x1539 72#define E1000_DEV_ID_I211_COPPER 0x1539
71#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 73#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
72#define E1000_DEV_ID_I354_SGMII 0x1F41 74#define E1000_DEV_ID_I354_SGMII 0x1F41
@@ -110,6 +112,7 @@ enum e1000_nvm_type {
110 e1000_nvm_none, 112 e1000_nvm_none,
111 e1000_nvm_eeprom_spi, 113 e1000_nvm_eeprom_spi,
112 e1000_nvm_flash_hw, 114 e1000_nvm_flash_hw,
115 e1000_nvm_invm,
113 e1000_nvm_flash_sw 116 e1000_nvm_flash_sw
114}; 117};
115 118
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index ddb3cf51b9b9..0c0393316a3a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -335,57 +335,101 @@ s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
335} 335}
336 336
337/** 337/**
338 * igb_read_nvm_i211 - Read NVM wrapper function for I211 338 * igb_read_invm_word_i210 - Reads OTP
339 * @hw: pointer to the HW structure
340 * @address: the word address (aka eeprom offset) to read
341 * @data: pointer to the data read
342 *
343 * Reads 16-bit words from the OTP. Return error when the word is not
344 * stored in OTP.
345 **/
346static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
347{
348 s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
349 u32 invm_dword;
350 u16 i;
351 u8 record_type, word_address;
352
353 for (i = 0; i < E1000_INVM_SIZE; i++) {
354 invm_dword = rd32(E1000_INVM_DATA_REG(i));
355 /* Get record type */
356 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
357 if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
358 break;
359 if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
360 i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
361 if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
362 i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
363 if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
364 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
365 if (word_address == address) {
366 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
367 hw_dbg("Read INVM Word 0x%02x = %x",
368 address, *data);
369 status = E1000_SUCCESS;
370 break;
371 }
372 }
373 }
374 if (status != E1000_SUCCESS)
375 hw_dbg("Requested word 0x%02x not found in OTP\n", address);
376 return status;
377}
378
379/**
380 * igb_read_invm_i210 - Read invm wrapper function for I210/I211
339 * @hw: pointer to the HW structure 381 * @hw: pointer to the HW structure
340 * @words: number of words to read 382 * @words: number of words to read
341 * @data: pointer to the data read 383 * @data: pointer to the data read
342 * 384 *
343 * Wrapper function to return data formerly found in the NVM. 385 * Wrapper function to return data formerly found in the NVM.
344 **/ 386 **/
345s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words, 387static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
346 u16 *data) 388 u16 words __always_unused, u16 *data)
347{ 389{
348 s32 ret_val = E1000_SUCCESS; 390 s32 ret_val = E1000_SUCCESS;
349 391
350 /* Only the MAC addr is required to be present in the iNVM */ 392 /* Only the MAC addr is required to be present in the iNVM */
351 switch (offset) { 393 switch (offset) {
352 case NVM_MAC_ADDR: 394 case NVM_MAC_ADDR:
353 ret_val = igb_read_invm_i211(hw, offset, &data[0]); 395 ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
354 ret_val |= igb_read_invm_i211(hw, offset+1, &data[1]); 396 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
355 ret_val |= igb_read_invm_i211(hw, offset+2, &data[2]); 397 &data[1]);
398 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
399 &data[2]);
356 if (ret_val != E1000_SUCCESS) 400 if (ret_val != E1000_SUCCESS)
357 hw_dbg("MAC Addr not found in iNVM\n"); 401 hw_dbg("MAC Addr not found in iNVM\n");
358 break; 402 break;
359 case NVM_INIT_CTRL_2: 403 case NVM_INIT_CTRL_2:
360 ret_val = igb_read_invm_i211(hw, (u8)offset, data); 404 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
361 if (ret_val != E1000_SUCCESS) { 405 if (ret_val != E1000_SUCCESS) {
362 *data = NVM_INIT_CTRL_2_DEFAULT_I211; 406 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
363 ret_val = E1000_SUCCESS; 407 ret_val = E1000_SUCCESS;
364 } 408 }
365 break; 409 break;
366 case NVM_INIT_CTRL_4: 410 case NVM_INIT_CTRL_4:
367 ret_val = igb_read_invm_i211(hw, (u8)offset, data); 411 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
368 if (ret_val != E1000_SUCCESS) { 412 if (ret_val != E1000_SUCCESS) {
369 *data = NVM_INIT_CTRL_4_DEFAULT_I211; 413 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
370 ret_val = E1000_SUCCESS; 414 ret_val = E1000_SUCCESS;
371 } 415 }
372 break; 416 break;
373 case NVM_LED_1_CFG: 417 case NVM_LED_1_CFG:
374 ret_val = igb_read_invm_i211(hw, (u8)offset, data); 418 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
375 if (ret_val != E1000_SUCCESS) { 419 if (ret_val != E1000_SUCCESS) {
376 *data = NVM_LED_1_CFG_DEFAULT_I211; 420 *data = NVM_LED_1_CFG_DEFAULT_I211;
377 ret_val = E1000_SUCCESS; 421 ret_val = E1000_SUCCESS;
378 } 422 }
379 break; 423 break;
380 case NVM_LED_0_2_CFG: 424 case NVM_LED_0_2_CFG:
381 igb_read_invm_i211(hw, offset, data); 425 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
382 if (ret_val != E1000_SUCCESS) { 426 if (ret_val != E1000_SUCCESS) {
383 *data = NVM_LED_0_2_CFG_DEFAULT_I211; 427 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
384 ret_val = E1000_SUCCESS; 428 ret_val = E1000_SUCCESS;
385 } 429 }
386 break; 430 break;
387 case NVM_ID_LED_SETTINGS: 431 case NVM_ID_LED_SETTINGS:
388 ret_val = igb_read_invm_i211(hw, (u8)offset, data); 432 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
389 if (ret_val != E1000_SUCCESS) { 433 if (ret_val != E1000_SUCCESS) {
390 *data = ID_LED_RESERVED_FFFF; 434 *data = ID_LED_RESERVED_FFFF;
391 ret_val = E1000_SUCCESS; 435 ret_val = E1000_SUCCESS;
@@ -411,48 +455,6 @@ s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
411} 455}
412 456
413/** 457/**
414 * igb_read_invm_i211 - Reads OTP
415 * @hw: pointer to the HW structure
416 * @address: the word address (aka eeprom offset) to read
417 * @data: pointer to the data read
418 *
419 * Reads 16-bit words from the OTP. Return error when the word is not
420 * stored in OTP.
421 **/
422s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data)
423{
424 s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
425 u32 invm_dword;
426 u16 i;
427 u8 record_type, word_address;
428
429 for (i = 0; i < E1000_INVM_SIZE; i++) {
430 invm_dword = rd32(E1000_INVM_DATA_REG(i));
431 /* Get record type */
432 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
433 if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
434 break;
435 if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
436 i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
437 if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
438 i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
439 if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
440 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
441 if (word_address == (u8)address) {
442 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
443 hw_dbg("Read INVM Word 0x%02x = %x",
444 address, *data);
445 status = E1000_SUCCESS;
446 break;
447 }
448 }
449 }
450 if (status != E1000_SUCCESS)
451 hw_dbg("Requested word 0x%02x not found in OTP\n", address);
452 return status;
453}
454
455/**
456 * igb_read_invm_version - Reads iNVM version and image type 458 * igb_read_invm_version - Reads iNVM version and image type
457 * @hw: pointer to the HW structure 459 * @hw: pointer to the HW structure
458 * @invm_ver: version structure for the version read 460 * @invm_ver: version structure for the version read
@@ -661,6 +663,23 @@ static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
661} 663}
662 664
663/** 665/**
666 * igb_get_flash_presence_i210 - Check if flash device is detected.
667 * @hw: pointer to the HW structure
668 *
669 **/
670bool igb_get_flash_presence_i210(struct e1000_hw *hw)
671{
672 u32 eec = 0;
673 bool ret_val = false;
674
675 eec = rd32(E1000_EECD);
676 if (eec & E1000_EECD_FLASH_DETECTED_I210)
677 ret_val = true;
678
679 return ret_val;
680}
681
682/**
664 * igb_update_flash_i210 - Commit EEPROM to the flash 683 * igb_update_flash_i210 - Commit EEPROM to the flash
665 * @hw: pointer to the HW structure 684 * @hw: pointer to the HW structure
666 * 685 *
@@ -786,3 +805,33 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
786{ 805{
787 return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); 806 return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
788} 807}
808
809/**
810 * igb_init_nvm_params_i210 - Init NVM func ptrs.
811 * @hw: pointer to the HW structure
812 **/
813s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
814{
815 s32 ret_val = 0;
816 struct e1000_nvm_info *nvm = &hw->nvm;
817
818 nvm->ops.acquire = igb_acquire_nvm_i210;
819 nvm->ops.release = igb_release_nvm_i210;
820 nvm->ops.valid_led_default = igb_valid_led_default_i210;
821
822 /* NVM Function Pointers */
823 if (igb_get_flash_presence_i210(hw)) {
824 hw->nvm.type = e1000_nvm_flash_hw;
825 nvm->ops.read = igb_read_nvm_srrd_i210;
826 nvm->ops.write = igb_write_nvm_srwr_i210;
827 nvm->ops.validate = igb_validate_nvm_checksum_i210;
828 nvm->ops.update = igb_update_nvm_checksum_i210;
829 } else {
830 hw->nvm.type = e1000_nvm_invm;
831 nvm->ops.read = igb_read_invm_i210;
832 nvm->ops.write = NULL;
833 nvm->ops.validate = NULL;
834 nvm->ops.update = NULL;
835 }
836 return ret_val;
837}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 5caa332e7556..dde3c4b7ea99 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -35,20 +35,19 @@ extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
35 u16 words, u16 *data); 35 u16 words, u16 *data);
36extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, 36extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
37 u16 words, u16 *data); 37 u16 words, u16 *data);
38extern s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data);
39extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); 38extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
40extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); 39extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
41extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw); 40extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
42extern void igb_release_nvm_i210(struct e1000_hw *hw); 41extern void igb_release_nvm_i210(struct e1000_hw *hw);
43extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); 42extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
44extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
45 u16 *data);
46extern s32 igb_read_invm_version(struct e1000_hw *hw, 43extern s32 igb_read_invm_version(struct e1000_hw *hw,
47 struct e1000_fw_version *invm_ver); 44 struct e1000_fw_version *invm_ver);
48extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, 45extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
49 u16 *data); 46 u16 *data);
50extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, 47extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
51 u16 data); 48 u16 data);
49extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
50extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
52 51
53#define E1000_STM_OPCODE 0xDB00 52#define E1000_STM_OPCODE 0xDB00
54#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 53#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index bab556a47fcc..f0dfd41dd4bd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1171,17 +1171,6 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1171 hw_dbg("Half Duplex\n"); 1171 hw_dbg("Half Duplex\n");
1172 } 1172 }
1173 1173
1174 /* Check if it is an I354 2.5Gb backplane connection. */
1175 if (hw->mac.type == e1000_i354) {
1176 if ((status & E1000_STATUS_2P5_SKU) &&
1177 !(status & E1000_STATUS_2P5_SKU_OVER)) {
1178 *speed = SPEED_2500;
1179 *duplex = FULL_DUPLEX;
1180 hw_dbg("2500 Mbs, ");
1181 hw_dbg("Full Duplex\n");
1182 }
1183 }
1184
1185 return 0; 1174 return 0;
1186} 1175}
1187 1176
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 7f9cd7cbd353..a7db7f3db914 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -709,11 +709,16 @@ out:
709 **/ 709 **/
710void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) 710void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
711{ 711{
712 u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset; 712 u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
713 u16 fw_version; 713 u8 q, hval, rem, result;
714 u16 comb_verh, comb_verl, comb_offset;
714 715
715 memset(fw_vers, 0, sizeof(struct e1000_fw_version)); 716 memset(fw_vers, 0, sizeof(struct e1000_fw_version));
716 717
718 /* basic eeprom version numbers and bits used vary by part and by tool
719 * used to create the nvm images. Check which data format we have.
720 */
721 hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
717 switch (hw->mac.type) { 722 switch (hw->mac.type) {
718 case e1000_i211: 723 case e1000_i211:
719 igb_read_invm_version(hw, fw_vers); 724 igb_read_invm_version(hw, fw_vers);
@@ -721,30 +726,30 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
721 case e1000_82575: 726 case e1000_82575:
722 case e1000_82576: 727 case e1000_82576:
723 case e1000_82580: 728 case e1000_82580:
724 case e1000_i354: 729 /* Use this format, unless EETRACK ID exists,
725 case e1000_i350: 730 * then use alternate format
726 case e1000_i210: 731 */
732 if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
733 hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
734 fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
735 >> NVM_MAJOR_SHIFT;
736 fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
737 >> NVM_MINOR_SHIFT;
738 fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
739 goto etrack_id;
740 }
727 break; 741 break;
728 default:
729 return;
730 }
731 /* basic eeprom version numbers */
732 hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
733 fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
734 fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK);
735
736 /* etrack id */
737 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
738 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
739 fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl;
740
741 switch (hw->mac.type) {
742 case e1000_i210: 742 case e1000_i210:
743 case e1000_i354: 743 if (!(igb_get_flash_presence_i210(hw))) {
744 igb_read_invm_version(hw, fw_vers);
745 return;
746 }
747 /* fall through */
744 case e1000_i350: 748 case e1000_i350:
745 /* find combo image version */ 749 /* find combo image version */
746 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); 750 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
747 if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) { 751 if ((comb_offset != 0x0) &&
752 (comb_offset != NVM_VER_INVALID)) {
748 753
749 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset 754 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
750 + 1), 1, &comb_verh); 755 + 1), 1, &comb_verh);
@@ -760,15 +765,42 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
760 fw_vers->or_major = 765 fw_vers->or_major =
761 comb_verl >> NVM_COMB_VER_SHFT; 766 comb_verl >> NVM_COMB_VER_SHFT;
762 fw_vers->or_build = 767 fw_vers->or_build =
763 ((comb_verl << NVM_COMB_VER_SHFT) 768 (comb_verl << NVM_COMB_VER_SHFT)
764 | (comb_verh >> NVM_COMB_VER_SHFT)); 769 | (comb_verh >> NVM_COMB_VER_SHFT);
765 fw_vers->or_patch = 770 fw_vers->or_patch =
766 comb_verh & NVM_COMB_VER_MASK; 771 comb_verh & NVM_COMB_VER_MASK;
767 } 772 }
768 } 773 }
769 break; 774 break;
770 default: 775 default:
771 break; 776 return;
777 }
778 hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
779 fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
780 >> NVM_MAJOR_SHIFT;
781
782 /* check for old style version format in newer images*/
783 if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
784 eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
785 } else {
786 eeprom_verl = (fw_version & NVM_MINOR_MASK)
787 >> NVM_MINOR_SHIFT;
788 }
789 /* Convert minor value to hex before assigning to output struct
790 * Val to be converted will not be higher than 99, per tool output
791 */
792 q = eeprom_verl / NVM_HEX_CONV;
793 hval = q * NVM_HEX_TENS;
794 rem = eeprom_verl % NVM_HEX_CONV;
795 result = hval + rem;
796 fw_vers->eep_minor = result;
797
798etrack_id:
799 if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
800 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
801 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
802 fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
803 | eeprom_verl;
772 } 804 }
773 return; 805 return;
774} 806}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 6bfc0c43aace..433b7419cb98 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -44,6 +44,7 @@ struct e1000_fw_version {
44 u32 etrack_id; 44 u32 etrack_id;
45 u16 eep_major; 45 u16 eep_major;
46 u16 eep_minor; 46 u16 eep_minor;
47 u16 eep_build;
47 48
48 u8 invm_major; 49 u8 invm_major;
49 u8 invm_minor; 50 u8 invm_minor;
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 60461946f98c..e7266759a10b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -731,15 +731,13 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
731 s32 ret_val; 731 s32 ret_val;
732 u16 phy_data; 732 u16 phy_data;
733 733
734 if (phy->reset_disable) { 734 if (phy->reset_disable)
735 ret_val = 0; 735 return 0;
736 goto out;
737 }
738 736
739 /* Enable CRS on Tx. This must be set for half-duplex operation. */ 737 /* Enable CRS on Tx. This must be set for half-duplex operation. */
740 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 738 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
741 if (ret_val) 739 if (ret_val)
742 goto out; 740 return ret_val;
743 741
744 /* Options: 742 /* Options:
745 * MDI/MDI-X = 0 (default) 743 * MDI/MDI-X = 0 (default)
@@ -780,23 +778,36 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
780 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 778 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
781 779
782 /* Enable downshift and setting it to X6 */ 780 /* Enable downshift and setting it to X6 */
781 if (phy->id == M88E1543_E_PHY_ID) {
782 phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE;
783 ret_val =
784 phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
785 if (ret_val)
786 return ret_val;
787
788 ret_val = igb_phy_sw_reset(hw);
789 if (ret_val) {
790 hw_dbg("Error committing the PHY changes\n");
791 return ret_val;
792 }
793 }
794
783 phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; 795 phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
784 phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; 796 phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
785 phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; 797 phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
786 798
787 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 799 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
788 if (ret_val) 800 if (ret_val)
789 goto out; 801 return ret_val;
790 802
791 /* Commit the changes. */ 803 /* Commit the changes. */
792 ret_val = igb_phy_sw_reset(hw); 804 ret_val = igb_phy_sw_reset(hw);
793 if (ret_val) { 805 if (ret_val) {
794 hw_dbg("Error committing the PHY changes\n"); 806 hw_dbg("Error committing the PHY changes\n");
795 goto out; 807 return ret_val;
796 } 808 }
797 809
798out: 810 return 0;
799 return ret_val;
800} 811}
801 812
802/** 813/**
@@ -1806,7 +1817,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
1806 phy->max_cable_length = phy_data / (is_cm ? 100 : 1); 1817 phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
1807 phy->cable_length = phy_data / (is_cm ? 100 : 1); 1818 phy->cable_length = phy_data / (is_cm ? 100 : 1);
1808 break; 1819 break;
1809 case M88E1545_E_PHY_ID: 1820 case M88E1543_E_PHY_ID:
1810 case I347AT4_E_PHY_ID: 1821 case I347AT4_E_PHY_ID:
1811 /* Remember the original page select and set it to 7 */ 1822 /* Remember the original page select and set it to 7 */
1812 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, 1823 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 15ea8dc9dad3..6807b098edae 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -343,6 +343,8 @@ struct hwmon_buff {
343 }; 343 };
344#endif 344#endif
345 345
346#define IGB_RETA_SIZE 128
347
346/* board specific private data structure */ 348/* board specific private data structure */
347struct igb_adapter { 349struct igb_adapter {
348 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 350 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -444,6 +446,10 @@ struct igb_adapter {
444 struct i2c_algo_bit_data i2c_algo; 446 struct i2c_algo_bit_data i2c_algo;
445 struct i2c_adapter i2c_adap; 447 struct i2c_adapter i2c_adap;
446 struct i2c_client *i2c_client; 448 struct i2c_client *i2c_client;
449 u32 rss_indir_tbl_init;
450 u8 rss_indir_tbl[IGB_RETA_SIZE];
451
452 unsigned long link_check_timeout;
447}; 453};
448 454
449#define IGB_FLAG_HAS_MSI (1 << 0) 455#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -455,6 +461,7 @@ struct igb_adapter {
455#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6) 461#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6)
456#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7) 462#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
457#define IGB_FLAG_WOL_SUPPORTED (1 << 8) 463#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
464#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9)
458 465
459/* DMA Coalescing defines */ 466/* DMA Coalescing defines */
460#define IGB_MIN_TXPBSIZE 20408 467#define IGB_MIN_TXPBSIZE 20408
@@ -480,6 +487,7 @@ extern int igb_up(struct igb_adapter *);
480extern void igb_down(struct igb_adapter *); 487extern void igb_down(struct igb_adapter *);
481extern void igb_reinit_locked(struct igb_adapter *); 488extern void igb_reinit_locked(struct igb_adapter *);
482extern void igb_reset(struct igb_adapter *); 489extern void igb_reset(struct igb_adapter *);
490extern void igb_write_rss_indir_tbl(struct igb_adapter *);
483extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8); 491extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
484extern int igb_setup_tx_resources(struct igb_ring *); 492extern int igb_setup_tx_resources(struct igb_ring *);
485extern int igb_setup_rx_resources(struct igb_ring *); 493extern int igb_setup_rx_resources(struct igb_ring *);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 85fe7b52f435..48cbc833b051 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -172,10 +172,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
172 SUPPORTED_Autoneg | 172 SUPPORTED_Autoneg |
173 SUPPORTED_Pause); 173 SUPPORTED_Pause);
174 ecmd->advertising = ADVERTISED_FIBRE; 174 ecmd->advertising = ADVERTISED_FIBRE;
175 if (hw->mac.type == e1000_i354) { 175
176 ecmd->supported |= SUPPORTED_2500baseX_Full;
177 ecmd->advertising |= ADVERTISED_2500baseX_Full;
178 }
179 if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) { 176 if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) {
180 ecmd->supported |= SUPPORTED_1000baseT_Full; 177 ecmd->supported |= SUPPORTED_1000baseT_Full;
181 ecmd->advertising |= ADVERTISED_1000baseT_Full; 178 ecmd->advertising |= ADVERTISED_1000baseT_Full;
@@ -209,16 +206,23 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
209 status = rd32(E1000_STATUS); 206 status = rd32(E1000_STATUS);
210 207
211 if (status & E1000_STATUS_LU) { 208 if (status & E1000_STATUS_LU) {
212 if ((hw->mac.type == e1000_i354) && 209 if (hw->mac.type == e1000_i354) {
213 (status & E1000_STATUS_2P5_SKU) && 210 if ((status & E1000_STATUS_2P5_SKU) &&
214 !(status & E1000_STATUS_2P5_SKU_OVER)) 211 !(status & E1000_STATUS_2P5_SKU_OVER)) {
215 ecmd->speed = SPEED_2500; 212 ecmd->supported = SUPPORTED_2500baseX_Full;
216 else if (status & E1000_STATUS_SPEED_1000) 213 ecmd->advertising = ADVERTISED_2500baseX_Full;
214 ecmd->speed = SPEED_2500;
215 } else {
216 ecmd->supported = SUPPORTED_1000baseT_Full;
217 ecmd->advertising = ADVERTISED_1000baseT_Full;
218 }
219 } else if (status & E1000_STATUS_SPEED_1000) {
217 ecmd->speed = SPEED_1000; 220 ecmd->speed = SPEED_1000;
218 else if (status & E1000_STATUS_SPEED_100) 221 } else if (status & E1000_STATUS_SPEED_100) {
219 ecmd->speed = SPEED_100; 222 ecmd->speed = SPEED_100;
220 else 223 } else {
221 ecmd->speed = SPEED_10; 224 ecmd->speed = SPEED_10;
225 }
222 if ((status & E1000_STATUS_FD) || 226 if ((status & E1000_STATUS_FD) ||
223 hw->phy.media_type != e1000_media_type_copper) 227 hw->phy.media_type != e1000_media_type_copper)
224 ecmd->duplex = DUPLEX_FULL; 228 ecmd->duplex = DUPLEX_FULL;
@@ -1335,12 +1339,23 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
1335 1339
1336static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) 1340static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1337{ 1341{
1342 struct e1000_hw *hw = &adapter->hw;
1343
1338 *data = 0; 1344 *data = 0;
1339 1345
1340 /* Validate eeprom on all parts but i211 */ 1346 /* Validate eeprom on all parts but flashless */
1341 if (adapter->hw.mac.type != e1000_i211) { 1347 switch (hw->mac.type) {
1348 case e1000_i210:
1349 case e1000_i211:
1350 if (igb_get_flash_presence_i210(hw)) {
1351 if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
1352 *data = 2;
1353 }
1354 break;
1355 default:
1342 if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) 1356 if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
1343 *data = 2; 1357 *data = 2;
1358 break;
1344 } 1359 }
1345 1360
1346 return *data; 1361 return *data;
@@ -2672,7 +2687,9 @@ static int igb_set_eee(struct net_device *netdev,
2672 igb_set_eee_i350(hw); 2687 igb_set_eee_i350(hw);
2673 2688
2674 /* reset link */ 2689 /* reset link */
2675 if (!netif_running(netdev)) 2690 if (netif_running(netdev))
2691 igb_reinit_locked(adapter);
2692 else
2676 igb_reset(adapter); 2693 igb_reset(adapter);
2677 } 2694 }
2678 2695
@@ -2771,6 +2788,90 @@ static void igb_ethtool_complete(struct net_device *netdev)
2771 pm_runtime_put(&adapter->pdev->dev); 2788 pm_runtime_put(&adapter->pdev->dev);
2772} 2789}
2773 2790
2791static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
2792{
2793 return IGB_RETA_SIZE;
2794}
2795
2796static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir)
2797{
2798 struct igb_adapter *adapter = netdev_priv(netdev);
2799 int i;
2800
2801 for (i = 0; i < IGB_RETA_SIZE; i++)
2802 indir[i] = adapter->rss_indir_tbl[i];
2803
2804 return 0;
2805}
2806
2807void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
2808{
2809 struct e1000_hw *hw = &adapter->hw;
2810 u32 reg = E1000_RETA(0);
2811 u32 shift = 0;
2812 int i = 0;
2813
2814 switch (hw->mac.type) {
2815 case e1000_82575:
2816 shift = 6;
2817 break;
2818 case e1000_82576:
2819 /* 82576 supports 2 RSS queues for SR-IOV */
2820 if (adapter->vfs_allocated_count)
2821 shift = 3;
2822 break;
2823 default:
2824 break;
2825 }
2826
2827 while (i < IGB_RETA_SIZE) {
2828 u32 val = 0;
2829 int j;
2830
2831 for (j = 3; j >= 0; j--) {
2832 val <<= 8;
2833 val |= adapter->rss_indir_tbl[i + j];
2834 }
2835
2836 wr32(reg, val << shift);
2837 reg += 4;
2838 i += 4;
2839 }
2840}
2841
2842static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
2843{
2844 struct igb_adapter *adapter = netdev_priv(netdev);
2845 struct e1000_hw *hw = &adapter->hw;
2846 int i;
2847 u32 num_queues;
2848
2849 num_queues = adapter->rss_queues;
2850
2851 switch (hw->mac.type) {
2852 case e1000_82576:
2853 /* 82576 supports 2 RSS queues for SR-IOV */
2854 if (adapter->vfs_allocated_count)
2855 num_queues = 2;
2856 break;
2857 default:
2858 break;
2859 }
2860
2861 /* Verify user input. */
2862 for (i = 0; i < IGB_RETA_SIZE; i++)
2863 if (indir[i] >= num_queues)
2864 return -EINVAL;
2865
2866
2867 for (i = 0; i < IGB_RETA_SIZE; i++)
2868 adapter->rss_indir_tbl[i] = indir[i];
2869
2870 igb_write_rss_indir_tbl(adapter);
2871
2872 return 0;
2873}
2874
2774static const struct ethtool_ops igb_ethtool_ops = { 2875static const struct ethtool_ops igb_ethtool_ops = {
2775 .get_settings = igb_get_settings, 2876 .get_settings = igb_get_settings,
2776 .set_settings = igb_set_settings, 2877 .set_settings = igb_set_settings,
@@ -2804,6 +2905,9 @@ static const struct ethtool_ops igb_ethtool_ops = {
2804 .set_eee = igb_set_eee, 2905 .set_eee = igb_set_eee,
2805 .get_module_info = igb_get_module_info, 2906 .get_module_info = igb_get_module_info,
2806 .get_module_eeprom = igb_get_module_eeprom, 2907 .get_module_eeprom = igb_get_module_eeprom,
2908 .get_rxfh_indir_size = igb_get_rxfh_indir_size,
2909 .get_rxfh_indir = igb_get_rxfh_indir,
2910 .set_rxfh_indir = igb_set_rxfh_indir,
2807 .begin = igb_ethtool_begin, 2911 .begin = igb_ethtool_begin,
2808 .complete = igb_ethtool_complete, 2912 .complete = igb_ethtool_complete,
2809}; 2913};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c1d72c03cb59..8cf44f2a8ccd 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -62,7 +62,7 @@
62 62
63#define MAJ 5 63#define MAJ 5
64#define MIN 0 64#define MIN 0
65#define BUILD 3 65#define BUILD 5
66#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 66#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
67__stringify(BUILD) "-k" 67__stringify(BUILD) "-k"
68char igb_driver_name[] = "igb"; 68char igb_driver_name[] = "igb";
@@ -85,6 +85,8 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, 85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 }, 86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 }, 87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, 90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, 91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, 92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
@@ -1013,7 +1015,7 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1013 adapter->q_vector[v_idx] = NULL; 1015 adapter->q_vector[v_idx] = NULL;
1014 netif_napi_del(&q_vector->napi); 1016 netif_napi_del(&q_vector->napi);
1015 1017
1016 /* ixgbe_get_stats64() might access the rings on this vector, 1018 /* igb_get_stats64() might access the rings on this vector,
1017 * we must wait a grace period before freeing it. 1019 * we must wait a grace period before freeing it.
1018 */ 1020 */
1019 kfree_rcu(q_vector, rcu); 1021 kfree_rcu(q_vector, rcu);
@@ -1669,6 +1671,8 @@ void igb_down(struct igb_adapter *adapter)
1669 1671
1670 igb_irq_disable(adapter); 1672 igb_irq_disable(adapter);
1671 1673
1674 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
1675
1672 for (i = 0; i < adapter->num_q_vectors; i++) { 1676 for (i = 0; i < adapter->num_q_vectors; i++) {
1673 napi_synchronize(&(adapter->q_vector[i]->napi)); 1677 napi_synchronize(&(adapter->q_vector[i]->napi));
1674 napi_disable(&(adapter->q_vector[i]->napi)); 1678 napi_disable(&(adapter->q_vector[i]->napi));
@@ -1929,12 +1933,17 @@ void igb_set_fw_version(struct igb_adapter *adapter)
1929 igb_get_fw_version(hw, &fw); 1933 igb_get_fw_version(hw, &fw);
1930 1934
1931 switch (hw->mac.type) { 1935 switch (hw->mac.type) {
1936 case e1000_i210:
1932 case e1000_i211: 1937 case e1000_i211:
1933 snprintf(adapter->fw_version, sizeof(adapter->fw_version), 1938 if (!(igb_get_flash_presence_i210(hw))) {
1934 "%2d.%2d-%d", 1939 snprintf(adapter->fw_version,
1935 fw.invm_major, fw.invm_minor, fw.invm_img_type); 1940 sizeof(adapter->fw_version),
1936 break; 1941 "%2d.%2d-%d",
1937 1942 fw.invm_major, fw.invm_minor,
1943 fw.invm_img_type);
1944 break;
1945 }
1946 /* fall through */
1938 default: 1947 default:
1939 /* if option is rom valid, display its version too */ 1948 /* if option is rom valid, display its version too */
1940 if (fw.or_valid) { 1949 if (fw.or_valid) {
@@ -1944,11 +1953,16 @@ void igb_set_fw_version(struct igb_adapter *adapter)
1944 fw.eep_major, fw.eep_minor, fw.etrack_id, 1953 fw.eep_major, fw.eep_minor, fw.etrack_id,
1945 fw.or_major, fw.or_build, fw.or_patch); 1954 fw.or_major, fw.or_build, fw.or_patch);
1946 /* no option rom */ 1955 /* no option rom */
1947 } else { 1956 } else if (fw.etrack_id != 0X0000) {
1948 snprintf(adapter->fw_version, 1957 snprintf(adapter->fw_version,
1949 sizeof(adapter->fw_version), 1958 sizeof(adapter->fw_version),
1950 "%d.%d, 0x%08x", 1959 "%d.%d, 0x%08x",
1951 fw.eep_major, fw.eep_minor, fw.etrack_id); 1960 fw.eep_major, fw.eep_minor, fw.etrack_id);
1961 } else {
1962 snprintf(adapter->fw_version,
1963 sizeof(adapter->fw_version),
1964 "%d.%d.%d",
1965 fw.eep_major, fw.eep_minor, fw.eep_build);
1952 } 1966 }
1953 break; 1967 break;
1954 } 1968 }
@@ -2166,15 +2180,28 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2166 */ 2180 */
2167 hw->mac.ops.reset_hw(hw); 2181 hw->mac.ops.reset_hw(hw);
2168 2182
2169 /* make sure the NVM is good , i211 parts have special NVM that 2183 /* make sure the NVM is good , i211/i210 parts can have special NVM
2170 * doesn't contain a checksum 2184 * that doesn't contain a checksum
2171 */ 2185 */
2172 if (hw->mac.type != e1000_i211) { 2186 switch (hw->mac.type) {
2187 case e1000_i210:
2188 case e1000_i211:
2189 if (igb_get_flash_presence_i210(hw)) {
2190 if (hw->nvm.ops.validate(hw) < 0) {
2191 dev_err(&pdev->dev,
2192 "The NVM Checksum Is Not Valid\n");
2193 err = -EIO;
2194 goto err_eeprom;
2195 }
2196 }
2197 break;
2198 default:
2173 if (hw->nvm.ops.validate(hw) < 0) { 2199 if (hw->nvm.ops.validate(hw) < 0) {
2174 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); 2200 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2175 err = -EIO; 2201 err = -EIO;
2176 goto err_eeprom; 2202 goto err_eeprom;
2177 } 2203 }
2204 break;
2178 } 2205 }
2179 2206
2180 /* copy the MAC address out of the NVM */ 2207 /* copy the MAC address out of the NVM */
@@ -2342,7 +2369,14 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2342 "Width x1" : "unknown"), netdev->dev_addr); 2369 "Width x1" : "unknown"), netdev->dev_addr);
2343 } 2370 }
2344 2371
2345 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH); 2372 if ((hw->mac.type >= e1000_i210 ||
2373 igb_get_flash_presence_i210(hw))) {
2374 ret_val = igb_read_part_string(hw, part_str,
2375 E1000_PBANUM_LENGTH);
2376 } else {
2377 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
2378 }
2379
2346 if (ret_val) 2380 if (ret_val)
2347 strcpy(part_str, "Unknown"); 2381 strcpy(part_str, "Unknown");
2348 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); 2382 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
@@ -2436,6 +2470,11 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
2436 int err = 0; 2470 int err = 0;
2437 int i; 2471 int i;
2438 2472
2473 if (!adapter->msix_entries) {
2474 err = -EPERM;
2475 goto out;
2476 }
2477
2439 if (!num_vfs) 2478 if (!num_vfs)
2440 goto out; 2479 goto out;
2441 else if (old_vfs && old_vfs == num_vfs) 2480 else if (old_vfs && old_vfs == num_vfs)
@@ -3096,7 +3135,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3096{ 3135{
3097 struct e1000_hw *hw = &adapter->hw; 3136 struct e1000_hw *hw = &adapter->hw;
3098 u32 mrqc, rxcsum; 3137 u32 mrqc, rxcsum;
3099 u32 j, num_rx_queues, shift = 0; 3138 u32 j, num_rx_queues;
3100 static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741, 3139 static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
3101 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE, 3140 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
3102 0xA32DCB77, 0x0CF23080, 0x3BB7426A, 3141 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
@@ -3109,35 +3148,21 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3109 num_rx_queues = adapter->rss_queues; 3148 num_rx_queues = adapter->rss_queues;
3110 3149
3111 switch (hw->mac.type) { 3150 switch (hw->mac.type) {
3112 case e1000_82575:
3113 shift = 6;
3114 break;
3115 case e1000_82576: 3151 case e1000_82576:
3116 /* 82576 supports 2 RSS queues for SR-IOV */ 3152 /* 82576 supports 2 RSS queues for SR-IOV */
3117 if (adapter->vfs_allocated_count) { 3153 if (adapter->vfs_allocated_count)
3118 shift = 3;
3119 num_rx_queues = 2; 3154 num_rx_queues = 2;
3120 }
3121 break; 3155 break;
3122 default: 3156 default:
3123 break; 3157 break;
3124 } 3158 }
3125 3159
3126 /* Populate the indirection table 4 entries at a time. To do this 3160 if (adapter->rss_indir_tbl_init != num_rx_queues) {
3127 * we are generating the results for n and n+2 and then interleaving 3161 for (j = 0; j < IGB_RETA_SIZE; j++)
3128 * those with the results with n+1 and n+3. 3162 adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE;
3129 */ 3163 adapter->rss_indir_tbl_init = num_rx_queues;
3130 for (j = 0; j < 32; j++) {
3131 /* first pass generates n and n+2 */
3132 u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
3133 u32 reta = (base & 0x07800780) >> (7 - shift);
3134
3135 /* second pass generates n+1 and n+3 */
3136 base += 0x00010001 * num_rx_queues;
3137 reta |= (base & 0x07800780) << (1 + shift);
3138
3139 wr32(E1000_RETA(j), reta);
3140 } 3164 }
3165 igb_write_rss_indir_tbl(adapter);
3141 3166
3142 /* Disable raw packet checksumming so that RSS hash is placed in 3167 /* Disable raw packet checksumming so that RSS hash is placed in
3143 * descriptor on writeback. No need to enable TCP/UDP/IP checksum 3168 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
@@ -3844,7 +3869,6 @@ bool igb_has_link(struct igb_adapter *adapter)
3844{ 3869{
3845 struct e1000_hw *hw = &adapter->hw; 3870 struct e1000_hw *hw = &adapter->hw;
3846 bool link_active = false; 3871 bool link_active = false;
3847 s32 ret_val = 0;
3848 3872
3849 /* get_link_status is set on LSC (link status) interrupt or 3873 /* get_link_status is set on LSC (link status) interrupt or
3850 * rx sequence error interrupt. get_link_status will stay 3874 * rx sequence error interrupt. get_link_status will stay
@@ -3853,22 +3877,28 @@ bool igb_has_link(struct igb_adapter *adapter)
3853 */ 3877 */
3854 switch (hw->phy.media_type) { 3878 switch (hw->phy.media_type) {
3855 case e1000_media_type_copper: 3879 case e1000_media_type_copper:
3856 if (hw->mac.get_link_status) { 3880 if (!hw->mac.get_link_status)
3857 ret_val = hw->mac.ops.check_for_link(hw); 3881 return true;
3858 link_active = !hw->mac.get_link_status;
3859 } else {
3860 link_active = true;
3861 }
3862 break;
3863 case e1000_media_type_internal_serdes: 3882 case e1000_media_type_internal_serdes:
3864 ret_val = hw->mac.ops.check_for_link(hw); 3883 hw->mac.ops.check_for_link(hw);
3865 link_active = hw->mac.serdes_has_link; 3884 link_active = !hw->mac.get_link_status;
3866 break; 3885 break;
3867 default: 3886 default:
3868 case e1000_media_type_unknown: 3887 case e1000_media_type_unknown:
3869 break; 3888 break;
3870 } 3889 }
3871 3890
3891 if (((hw->mac.type == e1000_i210) ||
3892 (hw->mac.type == e1000_i211)) &&
3893 (hw->phy.id == I210_I_PHY_ID)) {
3894 if (!netif_carrier_ok(adapter->netdev)) {
3895 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
3896 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
3897 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
3898 adapter->link_check_timeout = jiffies;
3899 }
3900 }
3901
3872 return link_active; 3902 return link_active;
3873} 3903}
3874 3904
@@ -3913,6 +3943,14 @@ static void igb_watchdog_task(struct work_struct *work)
3913 int i; 3943 int i;
3914 3944
3915 link = igb_has_link(adapter); 3945 link = igb_has_link(adapter);
3946
3947 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
3948 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
3949 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
3950 else
3951 link = false;
3952 }
3953
3916 if (link) { 3954 if (link) {
3917 /* Cancel scheduled suspend requests. */ 3955 /* Cancel scheduled suspend requests. */
3918 pm_runtime_resume(netdev->dev.parent); 3956 pm_runtime_resume(netdev->dev.parent);
@@ -4037,9 +4075,14 @@ static void igb_watchdog_task(struct work_struct *work)
4037 igb_ptp_rx_hang(adapter); 4075 igb_ptp_rx_hang(adapter);
4038 4076
4039 /* Reset the timer */ 4077 /* Reset the timer */
4040 if (!test_bit(__IGB_DOWN, &adapter->state)) 4078 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4041 mod_timer(&adapter->watchdog_timer, 4079 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
4042 round_jiffies(jiffies + 2 * HZ)); 4080 mod_timer(&adapter->watchdog_timer,
4081 round_jiffies(jiffies + HZ));
4082 else
4083 mod_timer(&adapter->watchdog_timer,
4084 round_jiffies(jiffies + 2 * HZ));
4085 }
4043} 4086}
4044 4087
4045enum latency_range { 4088enum latency_range {
@@ -4814,6 +4857,10 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4814 return -EINVAL; 4857 return -EINVAL;
4815 } 4858 }
4816 4859
4860 /* adjust max frame to be at least the size of a standard frame */
4861 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4862 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
4863
4817 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 4864 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4818 msleep(1); 4865 msleep(1);
4819 4866
@@ -4865,6 +4912,8 @@ void igb_update_stats(struct igb_adapter *adapter,
4865 4912
4866 bytes = 0; 4913 bytes = 0;
4867 packets = 0; 4914 packets = 0;
4915
4916 rcu_read_lock();
4868 for (i = 0; i < adapter->num_rx_queues; i++) { 4917 for (i = 0; i < adapter->num_rx_queues; i++) {
4869 u32 rqdpc = rd32(E1000_RQDPC(i)); 4918 u32 rqdpc = rd32(E1000_RQDPC(i));
4870 struct igb_ring *ring = adapter->rx_ring[i]; 4919 struct igb_ring *ring = adapter->rx_ring[i];
@@ -4900,6 +4949,7 @@ void igb_update_stats(struct igb_adapter *adapter,
4900 } 4949 }
4901 net_stats->tx_bytes = bytes; 4950 net_stats->tx_bytes = bytes;
4902 net_stats->tx_packets = packets; 4951 net_stats->tx_packets = packets;
4952 rcu_read_unlock();
4903 4953
4904 /* read stats registers */ 4954 /* read stats registers */
4905 adapter->stats.crcerrs += rd32(E1000_CRCERRS); 4955 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 7e8c477b0ab9..5a54e3dc535d 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -97,14 +97,14 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
97{ 97{
98 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); 98 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
99 struct e1000_hw *hw = &igb->hw; 99 struct e1000_hw *hw = &igb->hw;
100 u32 lo, hi;
100 u64 val; 101 u64 val;
101 u32 lo, hi, jk;
102 102
103 /* The timestamp latches on lowest register read. For the 82580 103 /* The timestamp latches on lowest register read. For the 82580
104 * the lowest register is SYSTIMR instead of SYSTIML. However we only 104 * the lowest register is SYSTIMR instead of SYSTIML. However we only
105 * need to provide nanosecond resolution, so we just ignore it. 105 * need to provide nanosecond resolution, so we just ignore it.
106 */ 106 */
107 jk = rd32(E1000_SYSTIMR); 107 rd32(E1000_SYSTIMR);
108 lo = rd32(E1000_SYSTIML); 108 lo = rd32(E1000_SYSTIML);
109 hi = rd32(E1000_SYSTIMH); 109 hi = rd32(E1000_SYSTIMH);
110 110
@@ -118,13 +118,13 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
118static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts) 118static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
119{ 119{
120 struct e1000_hw *hw = &adapter->hw; 120 struct e1000_hw *hw = &adapter->hw;
121 u32 sec, nsec, jk; 121 u32 sec, nsec;
122 122
123 /* The timestamp latches on lowest register read. For I210/I211, the 123 /* The timestamp latches on lowest register read. For I210/I211, the
124 * lowest register is SYSTIMR. Since we only need to provide nanosecond 124 * lowest register is SYSTIMR. Since we only need to provide nanosecond
125 * resolution, we can ignore it. 125 * resolution, we can ignore it.
126 */ 126 */
127 jk = rd32(E1000_SYSTIMR); 127 rd32(E1000_SYSTIMR);
128 nsec = rd32(E1000_SYSTIML); 128 nsec = rd32(E1000_SYSTIML);
129 sec = rd32(E1000_SYSTIMH); 129 sec = rd32(E1000_SYSTIMH);
130 130
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index fce3e92f9d11..9f6b236828e6 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -718,8 +718,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
718 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); 718 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
719 txdr->size = ALIGN(txdr->size, 4096); 719 txdr->size = ALIGN(txdr->size, 4096);
720 720
721 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 721 txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
722 GFP_KERNEL | __GFP_ZERO); 722 GFP_KERNEL);
723 if (!txdr->desc) { 723 if (!txdr->desc) {
724 vfree(txdr->buffer_info); 724 vfree(txdr->buffer_info);
725 return -ENOMEM; 725 return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index a6494e5daffe..0ac6b11c6e4e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -618,9 +618,8 @@ struct ixgbe_adapter {
618#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) 618#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
619#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) 619#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
620#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) 620#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
621#define IXGBE_FLAG2_PTP_ENABLED (u32)(1 << 10) 621#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
622#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11) 622#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 11)
623#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 12)
624 623
625 /* Tx fast path data */ 624 /* Tx fast path data */
626 int num_tx_queues; 625 int num_tx_queues;
@@ -754,7 +753,7 @@ enum ixgbe_state_t {
754 __IXGBE_DOWN, 753 __IXGBE_DOWN,
755 __IXGBE_SERVICE_SCHED, 754 __IXGBE_SERVICE_SCHED,
756 __IXGBE_IN_SFP_INIT, 755 __IXGBE_IN_SFP_INIT,
757 __IXGBE_READ_I2C, 756 __IXGBE_PTP_RUNNING,
758}; 757};
759 758
760struct ixgbe_cb { 759struct ixgbe_cb {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 4a5bfb6b3af0..a26f3fee4f35 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1018,8 +1018,17 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1018 u16 sfp_addr = 0; 1018 u16 sfp_addr = 0;
1019 u16 sfp_data = 0; 1019 u16 sfp_data = 0;
1020 u16 sfp_stat = 0; 1020 u16 sfp_stat = 0;
1021 u16 gssr;
1021 u32 i; 1022 u32 i;
1022 1023
1024 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1025 gssr = IXGBE_GSSR_PHY1_SM;
1026 else
1027 gssr = IXGBE_GSSR_PHY0_SM;
1028
1029 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
1030 return IXGBE_ERR_SWFW_SYNC;
1031
1023 if (hw->phy.type == ixgbe_phy_nl) { 1032 if (hw->phy.type == ixgbe_phy_nl) {
1024 /* 1033 /*
1025 * phy SDA/SCL registers are at addresses 0xC30A to 1034 * phy SDA/SCL registers are at addresses 0xC30A to
@@ -1028,17 +1037,17 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1028 */ 1037 */
1029 sfp_addr = (dev_addr << 8) + byte_offset; 1038 sfp_addr = (dev_addr << 8) + byte_offset;
1030 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); 1039 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1031 hw->phy.ops.write_reg(hw, 1040 hw->phy.ops.write_reg_mdi(hw,
1032 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, 1041 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1033 MDIO_MMD_PMAPMD, 1042 MDIO_MMD_PMAPMD,
1034 sfp_addr); 1043 sfp_addr);
1035 1044
1036 /* Poll status */ 1045 /* Poll status */
1037 for (i = 0; i < 100; i++) { 1046 for (i = 0; i < 100; i++) {
1038 hw->phy.ops.read_reg(hw, 1047 hw->phy.ops.read_reg_mdi(hw,
1039 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, 1048 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1040 MDIO_MMD_PMAPMD, 1049 MDIO_MMD_PMAPMD,
1041 &sfp_stat); 1050 &sfp_stat);
1042 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; 1051 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1043 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) 1052 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1044 break; 1053 break;
@@ -1052,8 +1061,8 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1052 } 1061 }
1053 1062
1054 /* Read data */ 1063 /* Read data */
1055 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, 1064 hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1056 MDIO_MMD_PMAPMD, &sfp_data); 1065 MDIO_MMD_PMAPMD, &sfp_data);
1057 1066
1058 *eeprom_data = (u8)(sfp_data >> 8); 1067 *eeprom_data = (u8)(sfp_data >> 8);
1059 } else { 1068 } else {
@@ -1061,6 +1070,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1061 } 1070 }
1062 1071
1063out: 1072out:
1073 hw->mac.ops.release_swfw_sync(hw, gssr);
1064 return status; 1074 return status;
1065} 1075}
1066 1076
@@ -1321,11 +1331,13 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1321 1331
1322static struct ixgbe_phy_operations phy_ops_82598 = { 1332static struct ixgbe_phy_operations phy_ops_82598 = {
1323 .identify = &ixgbe_identify_phy_generic, 1333 .identify = &ixgbe_identify_phy_generic,
1324 .identify_sfp = &ixgbe_identify_sfp_module_generic, 1334 .identify_sfp = &ixgbe_identify_module_generic,
1325 .init = &ixgbe_init_phy_ops_82598, 1335 .init = &ixgbe_init_phy_ops_82598,
1326 .reset = &ixgbe_reset_phy_generic, 1336 .reset = &ixgbe_reset_phy_generic,
1327 .read_reg = &ixgbe_read_phy_reg_generic, 1337 .read_reg = &ixgbe_read_phy_reg_generic,
1328 .write_reg = &ixgbe_write_phy_reg_generic, 1338 .write_reg = &ixgbe_write_phy_reg_generic,
1339 .read_reg_mdi = &ixgbe_read_phy_reg_mdi,
1340 .write_reg_mdi = &ixgbe_write_phy_reg_mdi,
1329 .setup_link = &ixgbe_setup_phy_link_generic, 1341 .setup_link = &ixgbe_setup_phy_link_generic,
1330 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, 1342 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
1331 .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598, 1343 .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 0b82d38bc97d..007a0083a636 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -49,6 +49,7 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
49static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 49static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
50 ixgbe_link_speed speed, 50 ixgbe_link_speed speed,
51 bool autoneg_wait_to_complete); 51 bool autoneg_wait_to_complete);
52static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
52static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 53static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
53 bool autoneg_wait_to_complete); 54 bool autoneg_wait_to_complete);
54static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 55static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
@@ -58,6 +59,10 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
58 ixgbe_link_speed speed, 59 ixgbe_link_speed speed,
59 bool autoneg_wait_to_complete); 60 bool autoneg_wait_to_complete);
60static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 61static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
62static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
63 u8 dev_addr, u8 *data);
64static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
65 u8 dev_addr, u8 data);
61 66
62static bool ixgbe_mng_enabled(struct ixgbe_hw *hw) 67static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
63{ 68{
@@ -137,11 +142,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
137 goto setup_sfp_out; 142 goto setup_sfp_out;
138 } 143 }
139 144
140 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 145 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
146 goto setup_sfp_err;
141 while (data_value != 0xffff) { 147 while (data_value != 0xffff) {
142 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); 148 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
143 IXGBE_WRITE_FLUSH(hw); 149 IXGBE_WRITE_FLUSH(hw);
144 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 150 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
151 goto setup_sfp_err;
145 } 152 }
146 153
147 /* Release the semaphore */ 154 /* Release the semaphore */
@@ -187,6 +194,17 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
187 194
188setup_sfp_out: 195setup_sfp_out:
189 return ret_val; 196 return ret_val;
197
198setup_sfp_err:
199 /* Release the semaphore */
200 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
201 /* Delay obtaining semaphore again to allow FW access,
202 * semaphore_delay is in ms usleep_range needs us.
203 */
204 usleep_range(hw->eeprom.semaphore_delay * 1000,
205 hw->eeprom.semaphore_delay * 2000);
206 hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
207 return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
190} 208}
191 209
192static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) 210static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
@@ -219,6 +237,25 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
219 struct ixgbe_mac_info *mac = &hw->mac; 237 struct ixgbe_mac_info *mac = &hw->mac;
220 struct ixgbe_phy_info *phy = &hw->phy; 238 struct ixgbe_phy_info *phy = &hw->phy;
221 s32 ret_val = 0; 239 s32 ret_val = 0;
240 u32 esdp;
241
242 if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
243 /* Store flag indicating I2C bus access control unit. */
244 hw->phy.qsfp_shared_i2c_bus = true;
245
246 /* Initialize access to QSFP+ I2C bus */
247 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
248 esdp |= IXGBE_ESDP_SDP0_DIR;
249 esdp &= ~IXGBE_ESDP_SDP1_DIR;
250 esdp &= ~IXGBE_ESDP_SDP0;
251 esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
252 esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
253 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
254 IXGBE_WRITE_FLUSH(hw);
255
256 phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599;
257 phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599;
258 }
222 259
223 /* Identify the PHY or SFP module */ 260 /* Identify the PHY or SFP module */
224 ret_val = phy->ops.identify(hw); 261 ret_val = phy->ops.identify(hw);
@@ -342,8 +379,13 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
342 379
343 if (hw->phy.multispeed_fiber) { 380 if (hw->phy.multispeed_fiber) {
344 *speed |= IXGBE_LINK_SPEED_10GB_FULL | 381 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
345 IXGBE_LINK_SPEED_1GB_FULL; 382 IXGBE_LINK_SPEED_1GB_FULL;
346 *autoneg = true; 383
384 /* QSFP must not enable auto-negotiation */
385 if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
386 *autoneg = false;
387 else
388 *autoneg = true;
347 } 389 }
348 390
349out: 391out:
@@ -397,6 +439,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
397 case IXGBE_DEV_ID_82599_LS: 439 case IXGBE_DEV_ID_82599_LS:
398 media_type = ixgbe_media_type_fiber_lco; 440 media_type = ixgbe_media_type_fiber_lco;
399 break; 441 break;
442 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
443 media_type = ixgbe_media_type_fiber_qsfp;
444 break;
400 default: 445 default:
401 media_type = ixgbe_media_type_unknown; 446 media_type = ixgbe_media_type_unknown;
402 break; 447 break;
@@ -406,6 +451,24 @@ out:
406} 451}
407 452
408/** 453/**
454 * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
455 * @hw: pointer to hardware structure
456 *
457 * Disables link, should be called during D3 power down sequence.
458 *
459 */
460static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
461{
462 u32 autoc2_reg;
463
464 if (!hw->mng_fw_enabled && !hw->wol_enabled) {
465 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
466 autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
467 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
468 }
469}
470
471/**
409 * ixgbe_start_mac_link_82599 - Setup MAC link settings 472 * ixgbe_start_mac_link_82599 - Setup MAC link settings
410 * @hw: pointer to hardware structure 473 * @hw: pointer to hardware structure
411 * @autoneg_wait_to_complete: true when waiting for completion is needed 474 * @autoneg_wait_to_complete: true when waiting for completion is needed
@@ -527,6 +590,75 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
527} 590}
528 591
529/** 592/**
593 * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
594 * @hw: pointer to hardware structure
595 * @speed: link speed to set
596 *
597 * We set the module speed differently for fixed fiber. For other
598 * multi-speed devices we don't have an error value so here if we
599 * detect an error we just log it and exit.
600 */
601static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
602 ixgbe_link_speed speed)
603{
604 s32 status;
605 u8 rs, eeprom_data;
606
607 switch (speed) {
608 case IXGBE_LINK_SPEED_10GB_FULL:
609 /* one bit mask same as setting on */
610 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
611 break;
612 case IXGBE_LINK_SPEED_1GB_FULL:
613 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
614 break;
615 default:
616 hw_dbg(hw, "Invalid fixed module speed\n");
617 return;
618 }
619
620 /* Set RS0 */
621 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
622 IXGBE_I2C_EEPROM_DEV_ADDR2,
623 &eeprom_data);
624 if (status) {
625 hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
626 goto out;
627 }
628
629 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
630
631 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
632 IXGBE_I2C_EEPROM_DEV_ADDR2,
633 eeprom_data);
634 if (status) {
635 hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
636 goto out;
637 }
638
639 /* Set RS1 */
640 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
641 IXGBE_I2C_EEPROM_DEV_ADDR2,
642 &eeprom_data);
643 if (status) {
644 hw_dbg(hw, "Failed to read Rx Rate Select RS1\n");
645 goto out;
646 }
647
648 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
649
650 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
651 IXGBE_I2C_EEPROM_DEV_ADDR2,
652 eeprom_data);
653 if (status) {
654 hw_dbg(hw, "Failed to write Rx Rate Select RS1\n");
655 goto out;
656 }
657out:
658 return;
659}
660
661/**
530 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 662 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
531 * @hw: pointer to hardware structure 663 * @hw: pointer to hardware structure
532 * @speed: new link speed 664 * @speed: new link speed
@@ -573,9 +705,19 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
573 goto out; 705 goto out;
574 706
575 /* Set the module link speed */ 707 /* Set the module link speed */
576 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); 708 switch (hw->phy.media_type) {
577 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 709 case ixgbe_media_type_fiber:
578 IXGBE_WRITE_FLUSH(hw); 710 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
711 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
712 IXGBE_WRITE_FLUSH(hw);
713 break;
714 case ixgbe_media_type_fiber_qsfp:
715 /* QSFP module automatically detects MAC link speed */
716 break;
717 default:
718 hw_dbg(hw, "Unexpected media type.\n");
719 break;
720 }
579 721
580 /* Allow module to change analog characteristics (1G->10G) */ 722 /* Allow module to change analog characteristics (1G->10G) */
581 msleep(40); 723 msleep(40);
@@ -625,10 +767,24 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
625 goto out; 767 goto out;
626 768
627 /* Set the module link speed */ 769 /* Set the module link speed */
628 esdp_reg &= ~IXGBE_ESDP_SDP5; 770 switch (hw->phy.media_type) {
629 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 771 case ixgbe_media_type_fiber_fixed:
630 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 772 ixgbe_set_fiber_fixed_speed(hw,
631 IXGBE_WRITE_FLUSH(hw); 773 IXGBE_LINK_SPEED_1GB_FULL);
774 break;
775 case ixgbe_media_type_fiber:
776 esdp_reg &= ~IXGBE_ESDP_SDP5;
777 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
778 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
779 IXGBE_WRITE_FLUSH(hw);
780 break;
781 case ixgbe_media_type_fiber_qsfp:
782 /* QSFP module automatically detects MAC link speed */
783 break;
784 default:
785 hw_dbg(hw, "Unexpected media type.\n");
786 break;
787 }
632 788
633 /* Allow module to change analog characteristics (10G->1G) */ 789 /* Allow module to change analog characteristics (10G->1G) */
634 msleep(40); 790 msleep(40);
@@ -1872,7 +2028,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1872 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) 2028 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1873 goto out; 2029 goto out;
1874 else 2030 else
1875 status = ixgbe_identify_sfp_module_generic(hw); 2031 status = ixgbe_identify_module_generic(hw);
1876 } 2032 }
1877 2033
1878 /* Set PHY type none if no PHY detected */ 2034 /* Set PHY type none if no PHY detected */
@@ -1978,10 +2134,12 @@ sfp_check:
1978 switch (hw->phy.type) { 2134 switch (hw->phy.type) {
1979 case ixgbe_phy_sfp_passive_tyco: 2135 case ixgbe_phy_sfp_passive_tyco:
1980 case ixgbe_phy_sfp_passive_unknown: 2136 case ixgbe_phy_sfp_passive_unknown:
2137 case ixgbe_phy_qsfp_passive_unknown:
1981 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 2138 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1982 break; 2139 break;
1983 case ixgbe_phy_sfp_ftl_active: 2140 case ixgbe_phy_sfp_ftl_active:
1984 case ixgbe_phy_sfp_active_unknown: 2141 case ixgbe_phy_sfp_active_unknown:
2142 case ixgbe_phy_qsfp_active_unknown:
1985 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; 2143 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
1986 break; 2144 break;
1987 case ixgbe_phy_sfp_avago: 2145 case ixgbe_phy_sfp_avago:
@@ -1999,6 +2157,15 @@ sfp_check:
1999 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) 2157 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
2000 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; 2158 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
2001 break; 2159 break;
2160 case ixgbe_phy_qsfp_intel:
2161 case ixgbe_phy_qsfp_unknown:
2162 hw->phy.ops.read_i2c_eeprom(hw,
2163 IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g);
2164 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2165 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2166 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2167 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2168 break;
2002 default: 2169 default:
2003 break; 2170 break;
2004 } 2171 }
@@ -2045,6 +2212,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2045{ 2212{
2046 s32 status = IXGBE_ERR_EEPROM_VERSION; 2213 s32 status = IXGBE_ERR_EEPROM_VERSION;
2047 u16 fw_offset, fw_ptp_cfg_offset; 2214 u16 fw_offset, fw_ptp_cfg_offset;
2215 u16 offset;
2048 u16 fw_version = 0; 2216 u16 fw_version = 0;
2049 2217
2050 /* firmware check is only necessary for SFI devices */ 2218 /* firmware check is only necessary for SFI devices */
@@ -2054,29 +2222,35 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2054 } 2222 }
2055 2223
2056 /* get the offset to the Firmware Module block */ 2224 /* get the offset to the Firmware Module block */
2057 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 2225 offset = IXGBE_FW_PTR;
2226 if (hw->eeprom.ops.read(hw, offset, &fw_offset))
2227 goto fw_version_err;
2058 2228
2059 if ((fw_offset == 0) || (fw_offset == 0xFFFF)) 2229 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2060 goto fw_version_out; 2230 goto fw_version_out;
2061 2231
2062 /* get the offset to the Pass Through Patch Configuration block */ 2232 /* get the offset to the Pass Through Patch Configuration block */
2063 hw->eeprom.ops.read(hw, (fw_offset + 2233 offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR;
2064 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), 2234 if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset))
2065 &fw_ptp_cfg_offset); 2235 goto fw_version_err;
2066 2236
2067 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) 2237 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2068 goto fw_version_out; 2238 goto fw_version_out;
2069 2239
2070 /* get the firmware version */ 2240 /* get the firmware version */
2071 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + 2241 offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4;
2072 IXGBE_FW_PATCH_VERSION_4), 2242 if (hw->eeprom.ops.read(hw, offset, &fw_version))
2073 &fw_version); 2243 goto fw_version_err;
2074 2244
2075 if (fw_version > 0x5) 2245 if (fw_version > 0x5)
2076 status = 0; 2246 status = 0;
2077 2247
2078fw_version_out: 2248fw_version_out:
2079 return status; 2249 return status;
2250
2251fw_version_err:
2252 hw_err(hw, "eeprom read at offset %d failed\n", offset);
2253 return IXGBE_ERR_EEPROM_VERSION;
2080} 2254}
2081 2255
2082/** 2256/**
@@ -2236,6 +2410,112 @@ reset_pipeline_out:
2236 return ret_val; 2410 return ret_val;
2237} 2411}
2238 2412
2413/**
2414 * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
2415 * @hw: pointer to hardware structure
2416 * @byte_offset: byte offset to read
2417 * @data: value read
2418 *
2419 * Performs byte read operation to SFP module's EEPROM over I2C interface at
2420 * a specified device address.
2421 **/
2422static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2423 u8 dev_addr, u8 *data)
2424{
2425 u32 esdp;
2426 s32 status;
2427 s32 timeout = 200;
2428
2429 if (hw->phy.qsfp_shared_i2c_bus == true) {
2430 /* Acquire I2C bus ownership. */
2431 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2432 esdp |= IXGBE_ESDP_SDP0;
2433 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2434 IXGBE_WRITE_FLUSH(hw);
2435
2436 while (timeout) {
2437 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2438 if (esdp & IXGBE_ESDP_SDP1)
2439 break;
2440
2441 usleep_range(5000, 10000);
2442 timeout--;
2443 }
2444
2445 if (!timeout) {
2446 hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
2447 status = IXGBE_ERR_I2C;
2448 goto release_i2c_access;
2449 }
2450 }
2451
2452 status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2453
2454release_i2c_access:
2455 if (hw->phy.qsfp_shared_i2c_bus == true) {
2456 /* Release I2C bus ownership. */
2457 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2458 esdp &= ~IXGBE_ESDP_SDP0;
2459 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2460 IXGBE_WRITE_FLUSH(hw);
2461 }
2462
2463 return status;
2464}
2465
2466/**
2467 * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
2468 * @hw: pointer to hardware structure
2469 * @byte_offset: byte offset to write
2470 * @data: value to write
2471 *
2472 * Performs byte write operation to SFP module's EEPROM over I2C interface at
2473 * a specified device address.
2474 **/
2475static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2476 u8 dev_addr, u8 data)
2477{
2478 u32 esdp;
2479 s32 status;
2480 s32 timeout = 200;
2481
2482 if (hw->phy.qsfp_shared_i2c_bus == true) {
2483 /* Acquire I2C bus ownership. */
2484 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2485 esdp |= IXGBE_ESDP_SDP0;
2486 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2487 IXGBE_WRITE_FLUSH(hw);
2488
2489 while (timeout) {
2490 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2491 if (esdp & IXGBE_ESDP_SDP1)
2492 break;
2493
2494 usleep_range(5000, 10000);
2495 timeout--;
2496 }
2497
2498 if (!timeout) {
2499 hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
2500 status = IXGBE_ERR_I2C;
2501 goto release_i2c_access;
2502 }
2503 }
2504
2505 status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2506
2507release_i2c_access:
2508 if (hw->phy.qsfp_shared_i2c_bus == true) {
2509 /* Release I2C bus ownership. */
2510 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2511 esdp &= ~IXGBE_ESDP_SDP0;
2512 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2513 IXGBE_WRITE_FLUSH(hw);
2514 }
2515
2516 return status;
2517}
2518
2239static struct ixgbe_mac_operations mac_ops_82599 = { 2519static struct ixgbe_mac_operations mac_ops_82599 = {
2240 .init_hw = &ixgbe_init_hw_generic, 2520 .init_hw = &ixgbe_init_hw_generic,
2241 .reset_hw = &ixgbe_reset_hw_82599, 2521 .reset_hw = &ixgbe_reset_hw_82599,
@@ -2255,6 +2535,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2255 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, 2535 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
2256 .read_analog_reg8 = &ixgbe_read_analog_reg8_82599, 2536 .read_analog_reg8 = &ixgbe_read_analog_reg8_82599,
2257 .write_analog_reg8 = &ixgbe_write_analog_reg8_82599, 2537 .write_analog_reg8 = &ixgbe_write_analog_reg8_82599,
2538 .stop_link_on_d3 = &ixgbe_stop_mac_link_on_d3_82599,
2258 .setup_link = &ixgbe_setup_mac_link_82599, 2539 .setup_link = &ixgbe_setup_mac_link_82599,
2259 .set_rxpba = &ixgbe_set_rxpba_generic, 2540 .set_rxpba = &ixgbe_set_rxpba_generic,
2260 .check_link = &ixgbe_check_mac_link_generic, 2541 .check_link = &ixgbe_check_mac_link_generic,
@@ -2300,7 +2581,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2300 2581
2301static struct ixgbe_phy_operations phy_ops_82599 = { 2582static struct ixgbe_phy_operations phy_ops_82599 = {
2302 .identify = &ixgbe_identify_phy_82599, 2583 .identify = &ixgbe_identify_phy_82599,
2303 .identify_sfp = &ixgbe_identify_sfp_module_generic, 2584 .identify_sfp = &ixgbe_identify_module_generic,
2304 .init = &ixgbe_init_phy_ops_82599, 2585 .init = &ixgbe_init_phy_ops_82599,
2305 .reset = &ixgbe_reset_phy_generic, 2586 .reset = &ixgbe_reset_phy_generic,
2306 .read_reg = &ixgbe_read_phy_reg_generic, 2587 .read_reg = &ixgbe_read_phy_reg_generic,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 9bcdeb89af5a..b5c434b617b1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -65,17 +65,42 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
65 * function check the device id to see if the associated phy supports 65 * function check the device id to see if the associated phy supports
66 * autoneg flow control. 66 * autoneg flow control.
67 **/ 67 **/
68s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) 68bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
69{ 69{
70 bool supported = false;
71 ixgbe_link_speed speed;
72 bool link_up;
70 73
71 switch (hw->device_id) { 74 switch (hw->phy.media_type) {
72 case IXGBE_DEV_ID_X540T: 75 case ixgbe_media_type_fiber_fixed:
73 case IXGBE_DEV_ID_X540T1: 76 case ixgbe_media_type_fiber:
74 case IXGBE_DEV_ID_82599_T3_LOM: 77 hw->mac.ops.check_link(hw, &speed, &link_up, false);
75 return 0; 78 /* if link is down, assume supported */
79 if (link_up)
80 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
81 true : false;
82 else
83 supported = true;
84 break;
85 case ixgbe_media_type_backplane:
86 supported = true;
87 break;
88 case ixgbe_media_type_copper:
89 /* only some copper devices support flow control autoneg */
90 switch (hw->device_id) {
91 case IXGBE_DEV_ID_82599_T3_LOM:
92 case IXGBE_DEV_ID_X540T:
93 case IXGBE_DEV_ID_X540T1:
94 supported = true;
95 break;
96 default:
97 break;
98 }
76 default: 99 default:
77 return IXGBE_ERR_FC_NOT_SUPPORTED; 100 break;
78 } 101 }
102
103 return supported;
79} 104}
80 105
81/** 106/**
@@ -114,6 +139,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
114 * we link at 10G, the 1G advertisement is harmless and vice versa. 139 * we link at 10G, the 1G advertisement is harmless and vice versa.
115 */ 140 */
116 switch (hw->phy.media_type) { 141 switch (hw->phy.media_type) {
142 case ixgbe_media_type_fiber_fixed:
117 case ixgbe_media_type_fiber: 143 case ixgbe_media_type_fiber:
118 case ixgbe_media_type_backplane: 144 case ixgbe_media_type_backplane:
119 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 145 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -234,7 +260,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
234 IXGBE_GSSR_MAC_CSR_SM); 260 IXGBE_GSSR_MAC_CSR_SM);
235 261
236 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 262 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
237 (ixgbe_device_supports_autoneg_fc(hw) == 0)) { 263 ixgbe_device_supports_autoneg_fc(hw)) {
238 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, 264 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
239 MDIO_MMD_AN, reg_cu); 265 MDIO_MMD_AN, reg_cu);
240 } 266 }
@@ -2380,6 +2406,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2380 2406
2381 switch (hw->phy.media_type) { 2407 switch (hw->phy.media_type) {
2382 /* Autoneg flow control on fiber adapters */ 2408 /* Autoneg flow control on fiber adapters */
2409 case ixgbe_media_type_fiber_fixed:
2383 case ixgbe_media_type_fiber: 2410 case ixgbe_media_type_fiber:
2384 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 2411 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2385 ret_val = ixgbe_fc_autoneg_fiber(hw); 2412 ret_val = ixgbe_fc_autoneg_fiber(hw);
@@ -2392,7 +2419,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2392 2419
2393 /* Autoneg flow control on copper adapters */ 2420 /* Autoneg flow control on copper adapters */
2394 case ixgbe_media_type_copper: 2421 case ixgbe_media_type_copper:
2395 if (ixgbe_device_supports_autoneg_fc(hw) == 0) 2422 if (ixgbe_device_supports_autoneg_fc(hw))
2396 ret_val = ixgbe_fc_autoneg_copper(hw); 2423 ret_val = ixgbe_fc_autoneg_copper(hw);
2397 break; 2424 break;
2398 2425
@@ -2479,42 +2506,39 @@ out:
2479 **/ 2506 **/
2480s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2507s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2481{ 2508{
2482 u32 gssr; 2509 u32 gssr = 0;
2483 u32 swmask = mask; 2510 u32 swmask = mask;
2484 u32 fwmask = mask << 5; 2511 u32 fwmask = mask << 5;
2485 s32 timeout = 200; 2512 u32 timeout = 200;
2513 u32 i;
2486 2514
2487 while (timeout) { 2515 for (i = 0; i < timeout; i++) {
2488 /* 2516 /*
2489 * SW EEPROM semaphore bit is used for access to all 2517 * SW NVM semaphore bit is used for access to all
2490 * SW_FW_SYNC/GSSR bits (not just EEPROM) 2518 * SW_FW_SYNC bits (not just NVM)
2491 */ 2519 */
2492 if (ixgbe_get_eeprom_semaphore(hw)) 2520 if (ixgbe_get_eeprom_semaphore(hw))
2493 return IXGBE_ERR_SWFW_SYNC; 2521 return IXGBE_ERR_SWFW_SYNC;
2494 2522
2495 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2523 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2496 if (!(gssr & (fwmask | swmask))) 2524 if (!(gssr & (fwmask | swmask))) {
2497 break; 2525 gssr |= swmask;
2498 2526 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2499 /* 2527 ixgbe_release_eeprom_semaphore(hw);
2500 * Firmware currently using resource (fwmask) or other software 2528 return 0;
2501 * thread currently using resource (swmask) 2529 } else {
2502 */ 2530 /* Resource is currently in use by FW or SW */
2503 ixgbe_release_eeprom_semaphore(hw); 2531 ixgbe_release_eeprom_semaphore(hw);
2504 usleep_range(5000, 10000); 2532 usleep_range(5000, 10000);
2505 timeout--; 2533 }
2506 }
2507
2508 if (!timeout) {
2509 hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
2510 return IXGBE_ERR_SWFW_SYNC;
2511 } 2534 }
2512 2535
2513 gssr |= swmask; 2536 /* If time expired clear the bits holding the lock and retry */
2514 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2537 if (gssr & (fwmask | swmask))
2538 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
2515 2539
2516 ixgbe_release_eeprom_semaphore(hw); 2540 usleep_range(5000, 10000);
2517 return 0; 2541 return IXGBE_ERR_SWFW_SYNC;
2518} 2542}
2519 2543
2520/** 2544/**
@@ -2716,13 +2740,19 @@ out:
2716static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 2740static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
2717 u16 *san_mac_offset) 2741 u16 *san_mac_offset)
2718{ 2742{
2743 s32 ret_val;
2744
2719 /* 2745 /*
2720 * First read the EEPROM pointer to see if the MAC addresses are 2746 * First read the EEPROM pointer to see if the MAC addresses are
2721 * available. 2747 * available.
2722 */ 2748 */
2723 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); 2749 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
2750 san_mac_offset);
2751 if (ret_val)
2752 hw_err(hw, "eeprom read at offset %d failed\n",
2753 IXGBE_SAN_MAC_ADDR_PTR);
2724 2754
2725 return 0; 2755 return ret_val;
2726} 2756}
2727 2757
2728/** 2758/**
@@ -2739,23 +2769,16 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2739{ 2769{
2740 u16 san_mac_data, san_mac_offset; 2770 u16 san_mac_data, san_mac_offset;
2741 u8 i; 2771 u8 i;
2772 s32 ret_val;
2742 2773
2743 /* 2774 /*
2744 * First read the EEPROM pointer to see if the MAC addresses are 2775 * First read the EEPROM pointer to see if the MAC addresses are
2745 * available. If they're not, no point in calling set_lan_id() here. 2776 * available. If they're not, no point in calling set_lan_id() here.
2746 */ 2777 */
2747 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 2778 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
2779 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
2748 2780
2749 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { 2781 goto san_mac_addr_clr;
2750 /*
2751 * No addresses available in this EEPROM. It's not an
2752 * error though, so just wipe the local address and return.
2753 */
2754 for (i = 0; i < 6; i++)
2755 san_mac_addr[i] = 0xFF;
2756
2757 goto san_mac_addr_out;
2758 }
2759 2782
2760 /* make sure we know which port we need to program */ 2783 /* make sure we know which port we need to program */
2761 hw->mac.ops.set_lan_id(hw); 2784 hw->mac.ops.set_lan_id(hw);
@@ -2763,14 +2786,26 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2763 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2786 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2764 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2787 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2765 for (i = 0; i < 3; i++) { 2788 for (i = 0; i < 3; i++) {
2766 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); 2789 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
2790 &san_mac_data);
2791 if (ret_val) {
2792 hw_err(hw, "eeprom read at offset %d failed\n",
2793 san_mac_offset);
2794 goto san_mac_addr_clr;
2795 }
2767 san_mac_addr[i * 2] = (u8)(san_mac_data); 2796 san_mac_addr[i * 2] = (u8)(san_mac_data);
2768 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 2797 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
2769 san_mac_offset++; 2798 san_mac_offset++;
2770 } 2799 }
2771
2772san_mac_addr_out:
2773 return 0; 2800 return 0;
2801
2802san_mac_addr_clr:
2803 /* No addresses available in this EEPROM. It's not necessarily an
2804 * error though, so just wipe the local address and return.
2805 */
2806 for (i = 0; i < 6; i++)
2807 san_mac_addr[i] = 0xFF;
2808 return ret_val;
2774} 2809}
2775 2810
2776/** 2811/**
@@ -3219,8 +3254,9 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3219 *wwpn_prefix = 0xFFFF; 3254 *wwpn_prefix = 0xFFFF;
3220 3255
3221 /* check if alternative SAN MAC is supported */ 3256 /* check if alternative SAN MAC is supported */
3222 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, 3257 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
3223 &alt_san_mac_blk_offset); 3258 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
3259 goto wwn_prefix_err;
3224 3260
3225 if ((alt_san_mac_blk_offset == 0) || 3261 if ((alt_san_mac_blk_offset == 0) ||
3226 (alt_san_mac_blk_offset == 0xFFFF)) 3262 (alt_san_mac_blk_offset == 0xFFFF))
@@ -3228,19 +3264,26 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3228 3264
3229 /* check capability in alternative san mac address block */ 3265 /* check capability in alternative san mac address block */
3230 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 3266 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3231 hw->eeprom.ops.read(hw, offset, &caps); 3267 if (hw->eeprom.ops.read(hw, offset, &caps))
3268 goto wwn_prefix_err;
3232 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) 3269 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3233 goto wwn_prefix_out; 3270 goto wwn_prefix_out;
3234 3271
3235 /* get the corresponding prefix for WWNN/WWPN */ 3272 /* get the corresponding prefix for WWNN/WWPN */
3236 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 3273 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
3237 hw->eeprom.ops.read(hw, offset, wwnn_prefix); 3274 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix))
3275 hw_err(hw, "eeprom read at offset %d failed\n", offset);
3238 3276
3239 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; 3277 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
3240 hw->eeprom.ops.read(hw, offset, wwpn_prefix); 3278 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
3279 goto wwn_prefix_err;
3241 3280
3242wwn_prefix_out: 3281wwn_prefix_out:
3243 return 0; 3282 return 0;
3283
3284wwn_prefix_err:
3285 hw_err(hw, "eeprom read at offset %d failed\n", offset);
3286 return 0;
3244} 3287}
3245 3288
3246/** 3289/**
@@ -3754,7 +3797,11 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
3754 u8 sensor_index; 3797 u8 sensor_index;
3755 u8 sensor_location; 3798 u8 sensor_location;
3756 3799
3757 hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor); 3800 if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) {
3801 hw_err(hw, "eeprom read at offset %d failed\n",
3802 ets_offset + 1 + i);
3803 continue;
3804 }
3758 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> 3805 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3759 IXGBE_ETS_DATA_INDEX_SHIFT); 3806 IXGBE_ETS_DATA_INDEX_SHIFT);
3760 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> 3807 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 22eee38868f1..d259dc76604e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -80,7 +80,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
80s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); 80s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
81s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); 81s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
82s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); 82s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
83s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); 83bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
84void ixgbe_fc_autoneg(struct ixgbe_hw *hw); 84void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
85 85
86s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); 86s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
@@ -143,8 +143,12 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
143 143
144#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) 144#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
145 145
146#define ixgbe_hw_to_netdev(hw) (((struct ixgbe_adapter *)(hw)->back)->netdev)
147
146#define hw_dbg(hw, format, arg...) \ 148#define hw_dbg(hw, format, arg...) \
147 netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg) 149 netdev_dbg(ixgbe_hw_to_netdev(hw), format, ## arg)
150#define hw_err(hw, format, arg...) \
151 netdev_err(ixgbe_hw_to_netdev(hw), format, ## arg)
148#define e_dev_info(format, arg...) \ 152#define e_dev_info(format, arg...) \
149 dev_info(&adapter->pdev->dev, format, ## arg) 153 dev_info(&adapter->pdev->dev, format, ## arg)
150#define e_dev_warn(format, arg...) \ 154#define e_dev_warn(format, arg...) \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 24e2e7aafda2..0e1b973659b0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -311,9 +311,6 @@ static int ixgbe_set_settings(struct net_device *netdev,
311 * this function does not support duplex forcing, but can 311 * this function does not support duplex forcing, but can
312 * limit the advertising of the adapter to the specified speed 312 * limit the advertising of the adapter to the specified speed
313 */ 313 */
314 if (ecmd->autoneg == AUTONEG_DISABLE)
315 return -EINVAL;
316
317 if (ecmd->advertising & ~ecmd->supported) 314 if (ecmd->advertising & ~ecmd->supported)
318 return -EINVAL; 315 return -EINVAL;
319 316
@@ -355,10 +352,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
355 struct ixgbe_adapter *adapter = netdev_priv(netdev); 352 struct ixgbe_adapter *adapter = netdev_priv(netdev);
356 struct ixgbe_hw *hw = &adapter->hw; 353 struct ixgbe_hw *hw = &adapter->hw;
357 354
358 if (hw->fc.disable_fc_autoneg) 355 if (ixgbe_device_supports_autoneg_fc(hw) &&
359 pause->autoneg = 0; 356 !hw->fc.disable_fc_autoneg)
360 else
361 pause->autoneg = 1; 357 pause->autoneg = 1;
358 else
359 pause->autoneg = 0;
362 360
363 if (hw->fc.current_mode == ixgbe_fc_rx_pause) { 361 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
364 pause->rx_pause = 1; 362 pause->rx_pause = 1;
@@ -384,7 +382,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
384 382
385 /* some devices do not support autoneg of link flow control */ 383 /* some devices do not support autoneg of link flow control */
386 if ((pause->autoneg == AUTONEG_ENABLE) && 384 if ((pause->autoneg == AUTONEG_ENABLE) &&
387 (ixgbe_device_supports_autoneg_fc(hw) != 0)) 385 !ixgbe_device_supports_autoneg_fc(hw))
388 return -EINVAL; 386 return -EINVAL;
389 387
390 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); 388 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
@@ -1048,7 +1046,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1048 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 1046 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1049 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1047 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1050 } 1048 }
1051 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { 1049 for (j = 0; j < netdev->num_tx_queues; j++) {
1052 ring = adapter->tx_ring[j]; 1050 ring = adapter->tx_ring[j];
1053 if (!ring) { 1051 if (!ring) {
1054 data[i] = 0; 1052 data[i] = 0;
@@ -1140,11 +1138,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1140 sprintf(p, "tx_queue_%u_bytes", i); 1138 sprintf(p, "tx_queue_%u_bytes", i);
1141 p += ETH_GSTRING_LEN; 1139 p += ETH_GSTRING_LEN;
1142#ifdef LL_EXTENDED_STATS 1140#ifdef LL_EXTENDED_STATS
1143 sprintf(p, "tx_q_%u_napi_yield", i); 1141 sprintf(p, "tx_queue_%u_ll_napi_yield", i);
1144 p += ETH_GSTRING_LEN; 1142 p += ETH_GSTRING_LEN;
1145 sprintf(p, "tx_q_%u_misses", i); 1143 sprintf(p, "tx_queue_%u_ll_misses", i);
1146 p += ETH_GSTRING_LEN; 1144 p += ETH_GSTRING_LEN;
1147 sprintf(p, "tx_q_%u_cleaned", i); 1145 sprintf(p, "tx_queue_%u_ll_cleaned", i);
1148 p += ETH_GSTRING_LEN; 1146 p += ETH_GSTRING_LEN;
1149#endif /* LL_EXTENDED_STATS */ 1147#endif /* LL_EXTENDED_STATS */
1150 } 1148 }
@@ -1154,11 +1152,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1154 sprintf(p, "rx_queue_%u_bytes", i); 1152 sprintf(p, "rx_queue_%u_bytes", i);
1155 p += ETH_GSTRING_LEN; 1153 p += ETH_GSTRING_LEN;
1156#ifdef LL_EXTENDED_STATS 1154#ifdef LL_EXTENDED_STATS
1157 sprintf(p, "rx_q_%u_ll_poll_yield", i); 1155 sprintf(p, "rx_queue_%u_ll_poll_yield", i);
1158 p += ETH_GSTRING_LEN; 1156 p += ETH_GSTRING_LEN;
1159 sprintf(p, "rx_q_%u_misses", i); 1157 sprintf(p, "rx_queue_%u_ll_misses", i);
1160 p += ETH_GSTRING_LEN; 1158 p += ETH_GSTRING_LEN;
1161 sprintf(p, "rx_q_%u_cleaned", i); 1159 sprintf(p, "rx_queue_%u_ll_cleaned", i);
1162 p += ETH_GSTRING_LEN; 1160 p += ETH_GSTRING_LEN;
1163#endif /* LL_EXTENDED_STATS */ 1161#endif /* LL_EXTENDED_STATS */
1164 } 1162 }
@@ -1884,11 +1882,12 @@ static void ixgbe_diag_test(struct net_device *netdev,
1884 struct ethtool_test *eth_test, u64 *data) 1882 struct ethtool_test *eth_test, u64 *data)
1885{ 1883{
1886 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1884 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1887 struct ixgbe_hw *hw = &adapter->hw;
1888 bool if_running = netif_running(netdev); 1885 bool if_running = netif_running(netdev);
1889 1886
1890 set_bit(__IXGBE_TESTING, &adapter->state); 1887 set_bit(__IXGBE_TESTING, &adapter->state);
1891 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1888 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1889 struct ixgbe_hw *hw = &adapter->hw;
1890
1892 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 1891 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
1893 int i; 1892 int i;
1894 for (i = 0; i < adapter->num_vfs; i++) { 1893 for (i = 0; i < adapter->num_vfs; i++) {
@@ -1912,21 +1911,18 @@ static void ixgbe_diag_test(struct net_device *netdev,
1912 /* Offline tests */ 1911 /* Offline tests */
1913 e_info(hw, "offline testing starting\n"); 1912 e_info(hw, "offline testing starting\n");
1914 1913
1915 if (if_running)
1916 /* indicate we're in test mode */
1917 dev_close(netdev);
1918
1919 /* bringing adapter down disables SFP+ optics */
1920 if (hw->mac.ops.enable_tx_laser)
1921 hw->mac.ops.enable_tx_laser(hw);
1922
1923 /* Link test performed before hardware reset so autoneg doesn't 1914 /* Link test performed before hardware reset so autoneg doesn't
1924 * interfere with test result 1915 * interfere with test result
1925 */ 1916 */
1926 if (ixgbe_link_test(adapter, &data[4])) 1917 if (ixgbe_link_test(adapter, &data[4]))
1927 eth_test->flags |= ETH_TEST_FL_FAILED; 1918 eth_test->flags |= ETH_TEST_FL_FAILED;
1928 1919
1929 ixgbe_reset(adapter); 1920 if (if_running)
1921 /* indicate we're in test mode */
1922 dev_close(netdev);
1923 else
1924 ixgbe_reset(adapter);
1925
1930 e_info(hw, "register testing starting\n"); 1926 e_info(hw, "register testing starting\n");
1931 if (ixgbe_reg_test(adapter, &data[0])) 1927 if (ixgbe_reg_test(adapter, &data[0]))
1932 eth_test->flags |= ETH_TEST_FL_FAILED; 1928 eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1963,13 +1959,11 @@ skip_loopback:
1963 clear_bit(__IXGBE_TESTING, &adapter->state); 1959 clear_bit(__IXGBE_TESTING, &adapter->state);
1964 if (if_running) 1960 if (if_running)
1965 dev_open(netdev); 1961 dev_open(netdev);
1962 else if (hw->mac.ops.disable_tx_laser)
1963 hw->mac.ops.disable_tx_laser(hw);
1966 } else { 1964 } else {
1967 e_info(hw, "online testing starting\n"); 1965 e_info(hw, "online testing starting\n");
1968 1966
1969 /* if adapter is down, SFP+ optics will be disabled */
1970 if (!if_running && hw->mac.ops.enable_tx_laser)
1971 hw->mac.ops.enable_tx_laser(hw);
1972
1973 /* Online tests */ 1967 /* Online tests */
1974 if (ixgbe_link_test(adapter, &data[4])) 1968 if (ixgbe_link_test(adapter, &data[4]))
1975 eth_test->flags |= ETH_TEST_FL_FAILED; 1969 eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1983,9 +1977,6 @@ skip_loopback:
1983 clear_bit(__IXGBE_TESTING, &adapter->state); 1977 clear_bit(__IXGBE_TESTING, &adapter->state);
1984 } 1978 }
1985 1979
1986 /* if adapter was down, ensure SFP+ optics are disabled again */
1987 if (!if_running && hw->mac.ops.disable_tx_laser)
1988 hw->mac.ops.disable_tx_laser(hw);
1989skip_ol_tests: 1980skip_ol_tests:
1990 msleep_interruptible(4 * 1000); 1981 msleep_interruptible(4 * 1000);
1991} 1982}
@@ -2909,33 +2900,21 @@ static int ixgbe_get_module_info(struct net_device *dev,
2909 struct ixgbe_hw *hw = &adapter->hw; 2900 struct ixgbe_hw *hw = &adapter->hw;
2910 u32 status; 2901 u32 status;
2911 u8 sff8472_rev, addr_mode; 2902 u8 sff8472_rev, addr_mode;
2912 int ret_val = 0;
2913 bool page_swap = false; 2903 bool page_swap = false;
2914 2904
2915 /* avoid concurent i2c reads */
2916 while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
2917 msleep(100);
2918
2919 /* used by the service task */
2920 set_bit(__IXGBE_READ_I2C, &adapter->state);
2921
2922 /* Check whether we support SFF-8472 or not */ 2905 /* Check whether we support SFF-8472 or not */
2923 status = hw->phy.ops.read_i2c_eeprom(hw, 2906 status = hw->phy.ops.read_i2c_eeprom(hw,
2924 IXGBE_SFF_SFF_8472_COMP, 2907 IXGBE_SFF_SFF_8472_COMP,
2925 &sff8472_rev); 2908 &sff8472_rev);
2926 if (status != 0) { 2909 if (status != 0)
2927 ret_val = -EIO; 2910 return -EIO;
2928 goto err_out;
2929 }
2930 2911
2931 /* addressing mode is not supported */ 2912 /* addressing mode is not supported */
2932 status = hw->phy.ops.read_i2c_eeprom(hw, 2913 status = hw->phy.ops.read_i2c_eeprom(hw,
2933 IXGBE_SFF_SFF_8472_SWAP, 2914 IXGBE_SFF_SFF_8472_SWAP,
2934 &addr_mode); 2915 &addr_mode);
2935 if (status != 0) { 2916 if (status != 0)
2936 ret_val = -EIO; 2917 return -EIO;
2937 goto err_out;
2938 }
2939 2918
2940 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { 2919 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
2941 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); 2920 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
@@ -2952,9 +2931,7 @@ static int ixgbe_get_module_info(struct net_device *dev,
2952 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2931 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2953 } 2932 }
2954 2933
2955err_out: 2934 return 0;
2956 clear_bit(__IXGBE_READ_I2C, &adapter->state);
2957 return ret_val;
2958} 2935}
2959 2936
2960static int ixgbe_get_module_eeprom(struct net_device *dev, 2937static int ixgbe_get_module_eeprom(struct net_device *dev,
@@ -2966,51 +2943,27 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
2966 u32 status = IXGBE_ERR_PHY_ADDR_INVALID; 2943 u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
2967 u8 databyte = 0xFF; 2944 u8 databyte = 0xFF;
2968 int i = 0; 2945 int i = 0;
2969 int ret_val = 0;
2970 2946
2971 /* ixgbe_get_module_info is called before this function in all 2947 if (ee->len == 0)
2972 * cases, so we do not need any checks we already do above, 2948 return -EINVAL;
2973 * and can trust ee->len to be a known value.
2974 */
2975 2949
2976 while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 2950 for (i = ee->offset; i < ee->offset + ee->len; i++) {
2977 msleep(100); 2951 /* I2C reads can take long time */
2978 set_bit(__IXGBE_READ_I2C, &adapter->state); 2952 if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
2979 2953 return -EBUSY;
2980 /* Read the first block, SFF-8079 */
2981 for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
2982 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
2983 if (status != 0) {
2984 /* Error occured while reading module */
2985 ret_val = -EIO;
2986 goto err_out;
2987 }
2988 data[i] = databyte;
2989 }
2990 2954
2991 /* If the second block is requested, check if SFF-8472 is supported. */ 2955 if (i < ETH_MODULE_SFF_8079_LEN)
2992 if (ee->len == ETH_MODULE_SFF_8472_LEN) { 2956 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
2993 if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP) 2957 else
2994 return -EOPNOTSUPP; 2958 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
2995
2996 /* Read the second block, SFF-8472 */
2997 for (i = ETH_MODULE_SFF_8079_LEN;
2998 i < ETH_MODULE_SFF_8472_LEN; i++) {
2999 status = hw->phy.ops.read_i2c_sff8472(hw,
3000 i - ETH_MODULE_SFF_8079_LEN, &databyte);
3001 if (status != 0) {
3002 /* Error occured while reading module */
3003 ret_val = -EIO;
3004 goto err_out;
3005 }
3006 data[i] = databyte;
3007 }
3008 }
3009 2959
3010err_out: 2960 if (status != 0)
3011 clear_bit(__IXGBE_READ_I2C, &adapter->state); 2961 return -EIO;
3012 2962
3013 return ret_val; 2963 data[i - ee->offset] = databyte;
2964 }
2965
2966 return 0;
3014} 2967}
3015 2968
3016static const struct ethtool_ops ixgbe_ethtool_ops = { 2969static const struct ethtool_ops ixgbe_ethtool_ops = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index be4b1fb3d0d2..7aba452833e5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -63,7 +63,7 @@ char ixgbe_default_device_descr[] =
63static char ixgbe_default_device_descr[] = 63static char ixgbe_default_device_descr[] =
64 "Intel(R) 10 Gigabit Network Connection"; 64 "Intel(R) 10 Gigabit Network Connection";
65#endif 65#endif
66#define DRV_VERSION "3.13.10-k" 66#define DRV_VERSION "3.15.1-k"
67const char ixgbe_driver_version[] = DRV_VERSION; 67const char ixgbe_driver_version[] = DRV_VERSION;
68static const char ixgbe_copyright[] = 68static const char ixgbe_copyright[] =
69 "Copyright (c) 1999-2013 Intel Corporation."; 69 "Copyright (c) 1999-2013 Intel Corporation.";
@@ -109,6 +109,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 }, 109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, 110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, 111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, 113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, 114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, 115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
@@ -195,6 +196,86 @@ static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
195 return 0; 196 return 0;
196} 197}
197 198
199/**
200 * ixgbe_check_from_parent - Determine whether PCIe info should come from parent
201 * @hw: hw specific details
202 *
203 * This function is used by probe to determine whether a device's PCI-Express
204 * bandwidth details should be gathered from the parent bus instead of from the
205 * device. Used to ensure that various locations all have the correct device ID
206 * checks.
207 */
208static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
209{
210 switch (hw->device_id) {
211 case IXGBE_DEV_ID_82599_SFP_SF_QP:
212 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
213 return true;
214 default:
215 return false;
216 }
217}
218
219static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
220 int expected_gts)
221{
222 int max_gts = 0;
223 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
224 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
225 struct pci_dev *pdev;
226
227 /* determine whether to use the the parent device
228 */
229 if (ixgbe_pcie_from_parent(&adapter->hw))
230 pdev = adapter->pdev->bus->parent->self;
231 else
232 pdev = adapter->pdev;
233
234 if (pcie_get_minimum_link(pdev, &speed, &width) ||
235 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
236 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
237 return;
238 }
239
240 switch (speed) {
241 case PCIE_SPEED_2_5GT:
242 /* 8b/10b encoding reduces max throughput by 20% */
243 max_gts = 2 * width;
244 break;
245 case PCIE_SPEED_5_0GT:
246 /* 8b/10b encoding reduces max throughput by 20% */
247 max_gts = 4 * width;
248 break;
249 case PCIE_SPEED_8_0GT:
250 /* 128b/130b encoding only reduces throughput by 1% */
251 max_gts = 8 * width;
252 break;
253 default:
254 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
255 return;
256 }
257
258 e_dev_info("PCI Express bandwidth of %dGT/s available\n",
259 max_gts);
260 e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
261 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
262 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
263 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
264 "Unknown"),
265 width,
266 (speed == PCIE_SPEED_2_5GT ? "20%" :
267 speed == PCIE_SPEED_5_0GT ? "20%" :
268 speed == PCIE_SPEED_8_0GT ? "N/a" :
269 "Unknown"));
270
271 if (max_gts < expected_gts) {
272 e_dev_warn("This is not sufficient for optimal performance of this card.\n");
273 e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
274 expected_gts);
275 e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
276 }
277}
278
198static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) 279static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
199{ 280{
200 if (!test_bit(__IXGBE_DOWN, &adapter->state) && 281 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
@@ -3724,8 +3805,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3724 hw->addr_ctrl.user_set_promisc = true; 3805 hw->addr_ctrl.user_set_promisc = true;
3725 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3806 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3726 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); 3807 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
3727 /* don't hardware filter vlans in promisc mode */ 3808 /* Only disable hardware filter vlans in promiscuous mode
3728 ixgbe_vlan_filter_disable(adapter); 3809 * if SR-IOV and VMDQ are disabled - otherwise ensure
3810 * that hardware VLAN filters remain enabled.
3811 */
3812 if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
3813 IXGBE_FLAG_SRIOV_ENABLED)))
3814 ixgbe_vlan_filter_disable(adapter);
3815 else
3816 ixgbe_vlan_filter_enable(adapter);
3729 } else { 3817 } else {
3730 if (netdev->flags & IFF_ALLMULTI) { 3818 if (netdev->flags & IFF_ALLMULTI) {
3731 fctrl |= IXGBE_FCTRL_MPE; 3819 fctrl |= IXGBE_FCTRL_MPE;
@@ -4087,6 +4175,10 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
4087 case ixgbe_phy_sfp_passive_unknown: 4175 case ixgbe_phy_sfp_passive_unknown:
4088 case ixgbe_phy_sfp_active_unknown: 4176 case ixgbe_phy_sfp_active_unknown:
4089 case ixgbe_phy_sfp_ftl_active: 4177 case ixgbe_phy_sfp_ftl_active:
4178 case ixgbe_phy_qsfp_passive_unknown:
4179 case ixgbe_phy_qsfp_active_unknown:
4180 case ixgbe_phy_qsfp_intel:
4181 case ixgbe_phy_qsfp_unknown:
4090 return true; 4182 return true;
4091 case ixgbe_phy_nl: 4183 case ixgbe_phy_nl:
4092 if (hw->mac.type == ixgbe_mac_82598EB) 4184 if (hw->mac.type == ixgbe_mac_82598EB)
@@ -4352,7 +4444,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
4352 if (hw->mac.san_mac_rar_index) 4444 if (hw->mac.san_mac_rar_index)
4353 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); 4445 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
4354 4446
4355 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) 4447 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
4356 ixgbe_ptp_reset(adapter); 4448 ixgbe_ptp_reset(adapter);
4357} 4449}
4358 4450
@@ -4714,8 +4806,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
4714 ixgbe_pbthresh_setup(adapter); 4806 ixgbe_pbthresh_setup(adapter);
4715 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 4807 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4716 hw->fc.send_xon = true; 4808 hw->fc.send_xon = true;
4717 hw->fc.disable_fc_autoneg = 4809 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
4718 (ixgbe_device_supports_autoneg_fc(hw) == 0) ? false : true;
4719 4810
4720#ifdef CONFIG_PCI_IOV 4811#ifdef CONFIG_PCI_IOV
4721 /* assign number of SR-IOV VFs */ 4812 /* assign number of SR-IOV VFs */
@@ -5205,6 +5296,9 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5205 return retval; 5296 return retval;
5206 5297
5207#endif 5298#endif
5299 if (hw->mac.ops.stop_link_on_d3)
5300 hw->mac.ops.stop_link_on_d3(hw);
5301
5208 if (wufc) { 5302 if (wufc) {
5209 ixgbe_set_rx_mode(netdev); 5303 ixgbe_set_rx_mode(netdev);
5210 5304
@@ -5681,7 +5775,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
5681 5775
5682 adapter->last_rx_ptp_check = jiffies; 5776 adapter->last_rx_ptp_check = jiffies;
5683 5777
5684 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) 5778 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5685 ixgbe_ptp_start_cyclecounter(adapter); 5779 ixgbe_ptp_start_cyclecounter(adapter);
5686 5780
5687 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", 5781 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
@@ -5727,7 +5821,7 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
5727 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) 5821 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
5728 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; 5822 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5729 5823
5730 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) 5824 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5731 ixgbe_ptp_start_cyclecounter(adapter); 5825 ixgbe_ptp_start_cyclecounter(adapter);
5732 5826
5733 e_info(drv, "NIC Link is Down\n"); 5827 e_info(drv, "NIC Link is Down\n");
@@ -5826,10 +5920,6 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
5826 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) 5920 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
5827 return; 5921 return;
5828 5922
5829 /* concurent i2c reads are not supported */
5830 if (test_bit(__IXGBE_READ_I2C, &adapter->state))
5831 return;
5832
5833 /* someone else is in init, wait until next service event */ 5923 /* someone else is in init, wait until next service event */
5834 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 5924 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5835 return; 5925 return;
@@ -6038,7 +6128,7 @@ static void ixgbe_service_task(struct work_struct *work)
6038 ixgbe_fdir_reinit_subtask(adapter); 6128 ixgbe_fdir_reinit_subtask(adapter);
6039 ixgbe_check_hang_subtask(adapter); 6129 ixgbe_check_hang_subtask(adapter);
6040 6130
6041 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) { 6131 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
6042 ixgbe_ptp_overflow_check(adapter); 6132 ixgbe_ptp_overflow_check(adapter);
6043 ixgbe_ptp_rx_hang(adapter); 6133 ixgbe_ptp_rx_hang(adapter);
6044 } 6134 }
@@ -7247,6 +7337,42 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7247}; 7337};
7248 7338
7249/** 7339/**
7340 * ixgbe_enumerate_functions - Get the number of ports this device has
7341 * @adapter: adapter structure
7342 *
7343 * This function enumerates the phsyical functions co-located on a single slot,
7344 * in order to determine how many ports a device has. This is most useful in
7345 * determining the required GT/s of PCIe bandwidth necessary for optimal
7346 * performance.
7347 **/
7348static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
7349{
7350 struct ixgbe_hw *hw = &adapter->hw;
7351 struct list_head *entry;
7352 int physfns = 0;
7353
7354 /* Some cards can not use the generic count PCIe functions method, and
7355 * so must be hardcoded to the correct value.
7356 */
7357 switch (hw->device_id) {
7358 case IXGBE_DEV_ID_82599_SFP_SF_QP:
7359 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
7360 physfns = 4;
7361 break;
7362 default:
7363 list_for_each(entry, &adapter->pdev->bus_list) {
7364 struct pci_dev *pdev =
7365 list_entry(entry, struct pci_dev, bus_list);
7366 /* don't count virtual functions */
7367 if (!pdev->is_virtfn)
7368 physfns++;
7369 }
7370 }
7371
7372 return physfns;
7373}
7374
7375/**
7250 * ixgbe_wol_supported - Check whether device supports WoL 7376 * ixgbe_wol_supported - Check whether device supports WoL
7251 * @hw: hw specific details 7377 * @hw: hw specific details
7252 * @device_id: the device ID 7378 * @device_id: the device ID
@@ -7328,7 +7454,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7328 struct ixgbe_hw *hw; 7454 struct ixgbe_hw *hw;
7329 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 7455 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
7330 static int cards_found; 7456 static int cards_found;
7331 int i, err, pci_using_dac; 7457 int i, err, pci_using_dac, expected_gts;
7332 unsigned int indices = MAX_TX_QUEUES; 7458 unsigned int indices = MAX_TX_QUEUES;
7333 u8 part_str[IXGBE_PBANUM_LENGTH]; 7459 u8 part_str[IXGBE_PBANUM_LENGTH];
7334#ifdef IXGBE_FCOE 7460#ifdef IXGBE_FCOE
@@ -7483,10 +7609,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7483 hw->mac.type == ixgbe_mac_82598EB) { 7609 hw->mac.type == ixgbe_mac_82598EB) {
7484 err = 0; 7610 err = 0;
7485 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 7611 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
7486 e_dev_err("failed to load because an unsupported SFP+ " 7612 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
7487 "module type was detected.\n"); 7613 e_dev_err("Reload the driver after installing a supported module.\n");
7488 e_dev_err("Reload the driver after installing a supported "
7489 "module.\n");
7490 goto err_sw_init; 7614 goto err_sw_init;
7491 } else if (err) { 7615 } else if (err) {
7492 e_dev_err("HW Init failed: %d\n", err); 7616 e_dev_err("HW Init failed: %d\n", err);
@@ -7617,7 +7741,7 @@ skip_sriov:
7617 7741
7618 /* pick up the PCI bus settings for reporting later */ 7742 /* pick up the PCI bus settings for reporting later */
7619 hw->mac.ops.get_bus_info(hw); 7743 hw->mac.ops.get_bus_info(hw);
7620 if (hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) 7744 if (ixgbe_pcie_from_parent(hw))
7621 ixgbe_get_parent_bus_info(adapter); 7745 ixgbe_get_parent_bus_info(adapter);
7622 7746
7623 /* print bus type/speed/width info */ 7747 /* print bus type/speed/width info */
@@ -7643,12 +7767,20 @@ skip_sriov:
7643 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", 7767 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
7644 hw->mac.type, hw->phy.type, part_str); 7768 hw->mac.type, hw->phy.type, part_str);
7645 7769
7646 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { 7770 /* calculate the expected PCIe bandwidth required for optimal
7647 e_dev_warn("PCI-Express bandwidth available for this card is " 7771 * performance. Note that some older parts will never have enough
7648 "not sufficient for optimal performance.\n"); 7772 * bandwidth due to being older generation PCIe parts. We clamp these
7649 e_dev_warn("For optimal performance a x8 PCI-Express slot " 7773 * parts to ensure no warning is displayed if it can't be fixed.
7650 "is required.\n"); 7774 */
7775 switch (hw->mac.type) {
7776 case ixgbe_mac_82598EB:
7777 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
7778 break;
7779 default:
7780 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
7781 break;
7651 } 7782 }
7783 ixgbe_check_minimum_link(adapter, expected_gts);
7652 7784
7653 /* reset the hardware with the new settings */ 7785 /* reset the hardware with the new settings */
7654 err = hw->mac.ops.start_hw(hw); 7786 err = hw->mac.ops.start_hw(hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index e5691ccbce9d..e4c676006be9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -204,7 +204,83 @@ out:
204} 204}
205 205
206/** 206/**
207 * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without
208 * the SWFW lock
209 * @hw: pointer to hardware structure
210 * @reg_addr: 32 bit address of PHY register to read
211 * @phy_data: Pointer to read data from PHY register
212 **/
213s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
214 u16 *phy_data)
215{
216 u32 i, data, command;
217
218 /* Setup and write the address cycle command */
219 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
220 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
221 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
222 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
223
224 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
225
226 /* Check every 10 usec to see if the address cycle completed.
227 * The MDI Command bit will clear when the operation is
228 * complete
229 */
230 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
231 udelay(10);
232
233 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
234 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
235 break;
236 }
237
238
239 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
240 hw_dbg(hw, "PHY address command did not complete.\n");
241 return IXGBE_ERR_PHY;
242 }
243
244 /* Address cycle complete, setup and write the read
245 * command
246 */
247 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
248 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
249 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
250 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
251
252 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
253
254 /* Check every 10 usec to see if the address cycle
255 * completed. The MDI Command bit will clear when the
256 * operation is complete
257 */
258 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
259 udelay(10);
260
261 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
262 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
263 break;
264 }
265
266 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
267 hw_dbg(hw, "PHY read command didn't complete\n");
268 return IXGBE_ERR_PHY;
269 }
270
271 /* Read operation is complete. Get the data
272 * from MSRWD
273 */
274 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
275 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
276 *phy_data = (u16)(data);
277
278 return 0;
279}
280
281/**
207 * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register 282 * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
283 * using the SWFW lock - this function is needed in most cases
208 * @hw: pointer to hardware structure 284 * @hw: pointer to hardware structure
209 * @reg_addr: 32 bit address of PHY register to read 285 * @reg_addr: 32 bit address of PHY register to read
210 * @phy_data: Pointer to read data from PHY register 286 * @phy_data: Pointer to read data from PHY register
@@ -212,10 +288,7 @@ out:
212s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 288s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
213 u32 device_type, u16 *phy_data) 289 u32 device_type, u16 *phy_data)
214{ 290{
215 u32 command; 291 s32 status;
216 u32 i;
217 u32 data;
218 s32 status = 0;
219 u16 gssr; 292 u16 gssr;
220 293
221 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) 294 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -223,86 +296,93 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
223 else 296 else
224 gssr = IXGBE_GSSR_PHY0_SM; 297 gssr = IXGBE_GSSR_PHY0_SM;
225 298
226 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) 299 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
300 status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
301 phy_data);
302 hw->mac.ops.release_swfw_sync(hw, gssr);
303 } else {
227 status = IXGBE_ERR_SWFW_SYNC; 304 status = IXGBE_ERR_SWFW_SYNC;
305 }
228 306
229 if (status == 0) { 307 return status;
230 /* Setup and write the address cycle command */ 308}
231 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
232 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
233 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
234 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
235 309
236 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 310/**
311 * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register
312 * without SWFW lock
313 * @hw: pointer to hardware structure
314 * @reg_addr: 32 bit PHY register to write
315 * @device_type: 5 bit device type
316 * @phy_data: Data to write to the PHY register
317 **/
318s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
319 u32 device_type, u16 phy_data)
320{
321 u32 i, command;
237 322
238 /* 323 /* Put the data in the MDI single read and write data register*/
239 * Check every 10 usec to see if the address cycle completed. 324 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
240 * The MDI Command bit will clear when the operation is
241 * complete
242 */
243 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
244 udelay(10);
245 325
246 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 326 /* Setup and write the address cycle command */
327 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
328 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
329 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
330 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
247 331
248 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) 332 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
249 break;
250 }
251 333
252 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { 334 /*
253 hw_dbg(hw, "PHY address command did not complete.\n"); 335 * Check every 10 usec to see if the address cycle completed.
254 status = IXGBE_ERR_PHY; 336 * The MDI Command bit will clear when the operation is
255 } 337 * complete
338 */
339 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
340 udelay(10);
256 341
257 if (status == 0) { 342 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
258 /* 343 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
259 * Address cycle complete, setup and write the read 344 break;
260 * command 345 }
261 */
262 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
263 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
264 (hw->phy.mdio.prtad <<
265 IXGBE_MSCA_PHY_ADDR_SHIFT) |
266 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
267
268 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
269
270 /*
271 * Check every 10 usec to see if the address cycle
272 * completed. The MDI Command bit will clear when the
273 * operation is complete
274 */
275 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
276 udelay(10);
277 346
278 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 347 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
348 hw_dbg(hw, "PHY address cmd didn't complete\n");
349 return IXGBE_ERR_PHY;
350 }
279 351
280 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) 352 /*
281 break; 353 * Address cycle complete, setup and write the write
282 } 354 * command
355 */
356 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
357 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
358 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
359 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
283 360
284 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { 361 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
285 hw_dbg(hw, "PHY read command didn't complete\n");
286 status = IXGBE_ERR_PHY;
287 } else {
288 /*
289 * Read operation is complete. Get the data
290 * from MSRWD
291 */
292 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
293 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
294 *phy_data = (u16)(data);
295 }
296 }
297 362
298 hw->mac.ops.release_swfw_sync(hw, gssr); 363 /* Check every 10 usec to see if the address cycle
364 * completed. The MDI Command bit will clear when the
365 * operation is complete
366 */
367 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
368 udelay(10);
369
370 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
371 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
372 break;
299 } 373 }
300 374
301 return status; 375 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
376 hw_dbg(hw, "PHY write cmd didn't complete\n");
377 return IXGBE_ERR_PHY;
378 }
379
380 return 0;
302} 381}
303 382
304/** 383/**
305 * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register 384 * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
385 * using SWFW lock- this function is needed in most cases
306 * @hw: pointer to hardware structure 386 * @hw: pointer to hardware structure
307 * @reg_addr: 32 bit PHY register to write 387 * @reg_addr: 32 bit PHY register to write
308 * @device_type: 5 bit device type 388 * @device_type: 5 bit device type
@@ -311,9 +391,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
311s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 391s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
312 u32 device_type, u16 phy_data) 392 u32 device_type, u16 phy_data)
313{ 393{
314 u32 command; 394 s32 status;
315 u32 i;
316 s32 status = 0;
317 u16 gssr; 395 u16 gssr;
318 396
319 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) 397 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -321,74 +399,12 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
321 else 399 else
322 gssr = IXGBE_GSSR_PHY0_SM; 400 gssr = IXGBE_GSSR_PHY0_SM;
323 401
324 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) 402 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
325 status = IXGBE_ERR_SWFW_SYNC; 403 status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
326 404 phy_data);
327 if (status == 0) {
328 /* Put the data in the MDI single read and write data register*/
329 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
330
331 /* Setup and write the address cycle command */
332 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
333 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
334 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
335 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
336
337 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
338
339 /*
340 * Check every 10 usec to see if the address cycle completed.
341 * The MDI Command bit will clear when the operation is
342 * complete
343 */
344 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
345 udelay(10);
346
347 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
348
349 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
350 break;
351 }
352
353 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
354 hw_dbg(hw, "PHY address cmd didn't complete\n");
355 status = IXGBE_ERR_PHY;
356 }
357
358 if (status == 0) {
359 /*
360 * Address cycle complete, setup and write the write
361 * command
362 */
363 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
364 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
365 (hw->phy.mdio.prtad <<
366 IXGBE_MSCA_PHY_ADDR_SHIFT) |
367 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
368
369 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
370
371 /*
372 * Check every 10 usec to see if the address cycle
373 * completed. The MDI Command bit will clear when the
374 * operation is complete
375 */
376 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
377 udelay(10);
378
379 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
380
381 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
382 break;
383 }
384
385 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
386 hw_dbg(hw, "PHY address cmd didn't complete\n");
387 status = IXGBE_ERR_PHY;
388 }
389 }
390
391 hw->mac.ops.release_swfw_sync(hw, gssr); 405 hw->mac.ops.release_swfw_sync(hw, gssr);
406 } else {
407 status = IXGBE_ERR_SWFW_SYNC;
392 } 408 }
393 409
394 return status; 410 return status;
@@ -775,6 +791,8 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
775 * Read control word from PHY init contents offset 791 * Read control word from PHY init contents offset
776 */ 792 */
777 ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); 793 ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
794 if (ret_val)
795 goto err_eeprom;
778 control = (eword & IXGBE_CONTROL_MASK_NL) >> 796 control = (eword & IXGBE_CONTROL_MASK_NL) >>
779 IXGBE_CONTROL_SHIFT_NL; 797 IXGBE_CONTROL_SHIFT_NL;
780 edata = eword & IXGBE_DATA_MASK_NL; 798 edata = eword & IXGBE_DATA_MASK_NL;
@@ -787,10 +805,15 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
787 case IXGBE_DATA_NL: 805 case IXGBE_DATA_NL:
788 hw_dbg(hw, "DATA:\n"); 806 hw_dbg(hw, "DATA:\n");
789 data_offset++; 807 data_offset++;
790 hw->eeprom.ops.read(hw, data_offset++, 808 ret_val = hw->eeprom.ops.read(hw, data_offset++,
791 &phy_offset); 809 &phy_offset);
810 if (ret_val)
811 goto err_eeprom;
792 for (i = 0; i < edata; i++) { 812 for (i = 0; i < edata; i++) {
793 hw->eeprom.ops.read(hw, data_offset, &eword); 813 ret_val = hw->eeprom.ops.read(hw, data_offset,
814 &eword);
815 if (ret_val)
816 goto err_eeprom;
794 hw->phy.ops.write_reg(hw, phy_offset, 817 hw->phy.ops.write_reg(hw, phy_offset,
795 MDIO_MMD_PMAPMD, eword); 818 MDIO_MMD_PMAPMD, eword);
796 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, 819 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
@@ -822,12 +845,42 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
822 845
823out: 846out:
824 return ret_val; 847 return ret_val;
848
849err_eeprom:
850 hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
851 return IXGBE_ERR_PHY;
825} 852}
826 853
827/** 854/**
828 * ixgbe_identify_sfp_module_generic - Identifies SFP modules 855 * ixgbe_identify_module_generic - Identifies module type
829 * @hw: pointer to hardware structure 856 * @hw: pointer to hardware structure
830 * 857 *
858 * Determines HW type and calls appropriate function.
859 **/
860s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
861{
862 s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
863
864 switch (hw->mac.ops.get_media_type(hw)) {
865 case ixgbe_media_type_fiber:
866 status = ixgbe_identify_sfp_module_generic(hw);
867 break;
868 case ixgbe_media_type_fiber_qsfp:
869 status = ixgbe_identify_qsfp_module_generic(hw);
870 break;
871 default:
872 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
873 status = IXGBE_ERR_SFP_NOT_PRESENT;
874 break;
875 }
876
877 return status;
878}
879
880/**
881 * ixgbe_identify_sfp_module_generic - Identifies SFP modules
882 * @hw: pointer to hardware structure
883*
831 * Searches for and identifies the SFP module and assigns appropriate PHY type. 884 * Searches for and identifies the SFP module and assigns appropriate PHY type.
832 **/ 885 **/
833s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) 886s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
@@ -1106,6 +1159,197 @@ err_read_i2c_eeprom:
1106} 1159}
1107 1160
1108/** 1161/**
1162 * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules
1163 * @hw: pointer to hardware structure
1164 *
1165 * Searches for and identifies the QSFP module and assigns appropriate PHY type
1166 **/
1167s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1168{
1169 struct ixgbe_adapter *adapter = hw->back;
1170 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
1171 u32 vendor_oui = 0;
1172 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1173 u8 identifier = 0;
1174 u8 comp_codes_1g = 0;
1175 u8 comp_codes_10g = 0;
1176 u8 oui_bytes[3] = {0, 0, 0};
1177 u16 enforce_sfp = 0;
1178 u8 connector = 0;
1179 u8 cable_length = 0;
1180 u8 device_tech = 0;
1181 bool active_cable = false;
1182
1183 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
1184 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1185 status = IXGBE_ERR_SFP_NOT_PRESENT;
1186 goto out;
1187 }
1188
1189 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
1190 &identifier);
1191
1192 if (status != 0)
1193 goto err_read_i2c_eeprom;
1194
1195 if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
1196 hw->phy.type = ixgbe_phy_sfp_unsupported;
1197 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1198 goto out;
1199 }
1200
1201 hw->phy.id = identifier;
1202
1203 /* LAN ID is needed for sfp_type determination */
1204 hw->mac.ops.set_lan_id(hw);
1205
1206 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
1207 &comp_codes_10g);
1208
1209 if (status != 0)
1210 goto err_read_i2c_eeprom;
1211
1212 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
1213 &comp_codes_1g);
1214
1215 if (status != 0)
1216 goto err_read_i2c_eeprom;
1217
1218 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
1219 hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
1220 if (hw->bus.lan_id == 0)
1221 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
1222 else
1223 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
1224 } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1225 IXGBE_SFF_10GBASELR_CAPABLE)) {
1226 if (hw->bus.lan_id == 0)
1227 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
1228 else
1229 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
1230 } else {
1231 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
1232 active_cable = true;
1233
1234 if (!active_cable) {
1235 /* check for active DA cables that pre-date
1236 * SFF-8436 v3.6
1237 */
1238 hw->phy.ops.read_i2c_eeprom(hw,
1239 IXGBE_SFF_QSFP_CONNECTOR,
1240 &connector);
1241
1242 hw->phy.ops.read_i2c_eeprom(hw,
1243 IXGBE_SFF_QSFP_CABLE_LENGTH,
1244 &cable_length);
1245
1246 hw->phy.ops.read_i2c_eeprom(hw,
1247 IXGBE_SFF_QSFP_DEVICE_TECH,
1248 &device_tech);
1249
1250 if ((connector ==
1251 IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
1252 (cable_length > 0) &&
1253 ((device_tech >> 4) ==
1254 IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
1255 active_cable = true;
1256 }
1257
1258 if (active_cable) {
1259 hw->phy.type = ixgbe_phy_qsfp_active_unknown;
1260 if (hw->bus.lan_id == 0)
1261 hw->phy.sfp_type =
1262 ixgbe_sfp_type_da_act_lmt_core0;
1263 else
1264 hw->phy.sfp_type =
1265 ixgbe_sfp_type_da_act_lmt_core1;
1266 } else {
1267 /* unsupported module type */
1268 hw->phy.type = ixgbe_phy_sfp_unsupported;
1269 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1270 goto out;
1271 }
1272 }
1273
1274 if (hw->phy.sfp_type != stored_sfp_type)
1275 hw->phy.sfp_setup_needed = true;
1276
1277 /* Determine if the QSFP+ PHY is dual speed or not. */
1278 hw->phy.multispeed_fiber = false;
1279 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1280 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1281 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1282 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1283 hw->phy.multispeed_fiber = true;
1284
1285 /* Determine PHY vendor for optical modules */
1286 if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1287 IXGBE_SFF_10GBASELR_CAPABLE)) {
1288 status = hw->phy.ops.read_i2c_eeprom(hw,
1289 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
1290 &oui_bytes[0]);
1291
1292 if (status != 0)
1293 goto err_read_i2c_eeprom;
1294
1295 status = hw->phy.ops.read_i2c_eeprom(hw,
1296 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
1297 &oui_bytes[1]);
1298
1299 if (status != 0)
1300 goto err_read_i2c_eeprom;
1301
1302 status = hw->phy.ops.read_i2c_eeprom(hw,
1303 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
1304 &oui_bytes[2]);
1305
1306 if (status != 0)
1307 goto err_read_i2c_eeprom;
1308
1309 vendor_oui =
1310 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1311 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1312 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1313
1314 if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
1315 hw->phy.type = ixgbe_phy_qsfp_intel;
1316 else
1317 hw->phy.type = ixgbe_phy_qsfp_unknown;
1318
1319 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1320 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
1321 /* Make sure we're a supported PHY type */
1322 if (hw->phy.type == ixgbe_phy_qsfp_intel) {
1323 status = 0;
1324 } else {
1325 if (hw->allow_unsupported_sfp == true) {
1326 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1327 status = 0;
1328 } else {
1329 hw_dbg(hw,
1330 "QSFP module not supported\n");
1331 hw->phy.type =
1332 ixgbe_phy_sfp_unsupported;
1333 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1334 }
1335 }
1336 } else {
1337 status = 0;
1338 }
1339 }
1340
1341out:
1342 return status;
1343
1344err_read_i2c_eeprom:
1345 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1346 hw->phy.id = 0;
1347 hw->phy.type = ixgbe_phy_unknown;
1348
1349 return IXGBE_ERR_SFP_NOT_PRESENT;
1350}
1351
1352/**
1109 * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence 1353 * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
1110 * @hw: pointer to hardware structure 1354 * @hw: pointer to hardware structure
1111 * @list_offset: offset to the SFP ID list 1355 * @list_offset: offset to the SFP ID list
@@ -1147,7 +1391,11 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1147 sfp_type = ixgbe_sfp_type_srlr_core1; 1391 sfp_type = ixgbe_sfp_type_srlr_core1;
1148 1392
1149 /* Read offset to PHY init contents */ 1393 /* Read offset to PHY init contents */
1150 hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset); 1394 if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
1395 hw_err(hw, "eeprom read at %d failed\n",
1396 IXGBE_PHY_INIT_OFFSET_NL);
1397 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1398 }
1151 1399
1152 if ((!*list_offset) || (*list_offset == 0xFFFF)) 1400 if ((!*list_offset) || (*list_offset == 0xFFFF))
1153 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; 1401 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
@@ -1159,12 +1407,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1159 * Find the matching SFP ID in the EEPROM 1407 * Find the matching SFP ID in the EEPROM
1160 * and program the init sequence 1408 * and program the init sequence
1161 */ 1409 */
1162 hw->eeprom.ops.read(hw, *list_offset, &sfp_id); 1410 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1411 goto err_phy;
1163 1412
1164 while (sfp_id != IXGBE_PHY_INIT_END_NL) { 1413 while (sfp_id != IXGBE_PHY_INIT_END_NL) {
1165 if (sfp_id == sfp_type) { 1414 if (sfp_id == sfp_type) {
1166 (*list_offset)++; 1415 (*list_offset)++;
1167 hw->eeprom.ops.read(hw, *list_offset, data_offset); 1416 if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
1417 goto err_phy;
1168 if ((!*data_offset) || (*data_offset == 0xFFFF)) { 1418 if ((!*data_offset) || (*data_offset == 0xFFFF)) {
1169 hw_dbg(hw, "SFP+ module not supported\n"); 1419 hw_dbg(hw, "SFP+ module not supported\n");
1170 return IXGBE_ERR_SFP_NOT_SUPPORTED; 1420 return IXGBE_ERR_SFP_NOT_SUPPORTED;
@@ -1174,7 +1424,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1174 } else { 1424 } else {
1175 (*list_offset) += 2; 1425 (*list_offset) += 2;
1176 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) 1426 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1177 return IXGBE_ERR_PHY; 1427 goto err_phy;
1178 } 1428 }
1179 } 1429 }
1180 1430
@@ -1184,6 +1434,10 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1184 } 1434 }
1185 1435
1186 return 0; 1436 return 0;
1437
1438err_phy:
1439 hw_err(hw, "eeprom read at offset %d failed\n", *list_offset);
1440 return IXGBE_ERR_PHY;
1187} 1441}
1188 1442
1189/** 1443/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 886a3431cf5b..24af12e3719e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -33,17 +33,28 @@
33#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 33#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
34 34
35/* EEPROM byte offsets */ 35/* EEPROM byte offsets */
36#define IXGBE_SFF_IDENTIFIER 0x0 36#define IXGBE_SFF_IDENTIFIER 0x0
37#define IXGBE_SFF_IDENTIFIER_SFP 0x3 37#define IXGBE_SFF_IDENTIFIER_SFP 0x3
38#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 38#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
39#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 39#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
40#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 40#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
41#define IXGBE_SFF_1GBE_COMP_CODES 0x6 41#define IXGBE_SFF_1GBE_COMP_CODES 0x6
42#define IXGBE_SFF_10GBE_COMP_CODES 0x3 42#define IXGBE_SFF_10GBE_COMP_CODES 0x3
43#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 43#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
44#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C 44#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
45#define IXGBE_SFF_SFF_8472_SWAP 0x5C 45#define IXGBE_SFF_SFF_8472_SWAP 0x5C
46#define IXGBE_SFF_SFF_8472_COMP 0x5E 46#define IXGBE_SFF_SFF_8472_COMP 0x5E
47#define IXGBE_SFF_SFF_8472_OSCB 0x6E
48#define IXGBE_SFF_SFF_8472_ESCB 0x76
49#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD
50#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5
51#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6
52#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7
53#define IXGBE_SFF_QSFP_CONNECTOR 0x82
54#define IXGBE_SFF_QSFP_10GBE_COMP 0x83
55#define IXGBE_SFF_QSFP_1GBE_COMP 0x86
56#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92
57#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93
47 58
48/* Bitmasks */ 59/* Bitmasks */
49#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 60#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
@@ -54,7 +65,14 @@
54#define IXGBE_SFF_1GBASET_CAPABLE 0x8 65#define IXGBE_SFF_1GBASET_CAPABLE 0x8
55#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 66#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
56#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 67#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
68#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
69#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
70#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
57#define IXGBE_SFF_ADDRESSING_MODE 0x4 71#define IXGBE_SFF_ADDRESSING_MODE 0x4
72#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
73#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
74#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
75#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0
58#define IXGBE_I2C_EEPROM_READ_MASK 0x100 76#define IXGBE_I2C_EEPROM_READ_MASK 0x100
59#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 77#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
60#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 78#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
@@ -102,6 +120,10 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
102 u32 device_type, u16 *phy_data); 120 u32 device_type, u16 *phy_data);
103s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 121s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
104 u32 device_type, u16 phy_data); 122 u32 device_type, u16 phy_data);
123s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
124 u32 device_type, u16 *phy_data);
125s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
126 u32 device_type, u16 phy_data);
105s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); 127s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
106s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, 128s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
107 ixgbe_link_speed speed, 129 ixgbe_link_speed speed,
@@ -121,7 +143,9 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
121 u16 *firmware_version); 143 u16 *firmware_version);
122 144
123s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); 145s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
146s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
124s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); 147s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
148s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
125s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 149s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
126 u16 *list_offset, 150 u16 *list_offset,
127 u16 *data_offset); 151 u16 *data_offset);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 331987d6815c..5184e2a1a7d8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -885,8 +885,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
885 885
886 ixgbe_ptp_reset(adapter); 886 ixgbe_ptp_reset(adapter);
887 887
888 /* set the flag that PTP has been enabled */ 888 /* enter the IXGBE_PTP_RUNNING state */
889 adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED; 889 set_bit(__IXGBE_PTP_RUNNING, &adapter->state);
890 890
891 return; 891 return;
892} 892}
@@ -899,10 +899,12 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
899 */ 899 */
900void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) 900void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
901{ 901{
902 /* stop the overflow check task */ 902 /* Leave the IXGBE_PTP_RUNNING state. */
903 adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED | 903 if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state))
904 IXGBE_FLAG2_PTP_PPS_ENABLED); 904 return;
905 905
906 /* stop the PPS signal */
907 adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED;
906 ixgbe_ptp_setup_sdp(adapter); 908 ixgbe_ptp_setup_sdp(adapter);
907 909
908 cancel_work_sync(&adapter->ptp_tx_work); 910 cancel_work_sync(&adapter->ptp_tx_work);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 1e7d587c4e57..276d7b135332 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -173,39 +173,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
173 ixgbe_disable_sriov(adapter); 173 ixgbe_disable_sriov(adapter);
174} 174}
175 175
176static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter)
177{
178 struct pci_dev *pdev = adapter->pdev;
179 struct pci_dev *vfdev;
180 int dev_id;
181
182 switch (adapter->hw.mac.type) {
183 case ixgbe_mac_82599EB:
184 dev_id = IXGBE_DEV_ID_82599_VF;
185 break;
186 case ixgbe_mac_X540:
187 dev_id = IXGBE_DEV_ID_X540_VF;
188 break;
189 default:
190 return false;
191 }
192
193 /* loop through all the VFs to see if we own any that are assigned */
194 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
195 while (vfdev) {
196 /* if we don't own it we don't care */
197 if (vfdev->is_virtfn && vfdev->physfn == pdev) {
198 /* if it is assigned we cannot release it */
199 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
200 return true;
201 }
202
203 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
204 }
205
206 return false;
207}
208
209#endif /* #ifdef CONFIG_PCI_IOV */ 176#endif /* #ifdef CONFIG_PCI_IOV */
210int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) 177int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
211{ 178{
@@ -235,7 +202,7 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
235 * without causing issues, so just leave the hardware 202 * without causing issues, so just leave the hardware
236 * available but disabled 203 * available but disabled
237 */ 204 */
238 if (ixgbe_vfs_are_assigned(adapter)) { 205 if (pci_vfs_assigned(adapter->pdev)) {
239 e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); 206 e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
240 return -EPERM; 207 return -EPERM;
241 } 208 }
@@ -672,8 +639,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
672{ 639{
673 struct ixgbe_hw *hw = &adapter->hw; 640 struct ixgbe_hw *hw = &adapter->hw;
674 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; 641 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
675 u32 reg, msgbuf[4]; 642 u32 reg, reg_offset, vf_shift;
676 u32 reg_offset, vf_shift; 643 u32 msgbuf[4] = {0, 0, 0, 0};
677 u8 *addr = (u8 *)(&msgbuf[1]); 644 u8 *addr = (u8 *)(&msgbuf[1]);
678 645
679 e_info(probe, "VF Reset msg received from vf %d\n", vf); 646 e_info(probe, "VF Reset msg received from vf %d\n", vf);
@@ -768,6 +735,29 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
768 return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; 735 return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
769} 736}
770 737
738static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
739{
740 u32 vlvf;
741 s32 regindex;
742
743 /* short cut the special case */
744 if (vlan == 0)
745 return 0;
746
747 /* Search for the vlan id in the VLVF entries */
748 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
749 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
750 if ((vlvf & VLAN_VID_MASK) == vlan)
751 break;
752 }
753
754 /* Return a negative value if not found */
755 if (regindex >= IXGBE_VLVF_ENTRIES)
756 regindex = -1;
757
758 return regindex;
759}
760
771static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, 761static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
772 u32 *msgbuf, u32 vf) 762 u32 *msgbuf, u32 vf)
773{ 763{
@@ -775,6 +765,9 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
775 int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; 765 int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
776 int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); 766 int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
777 int err; 767 int err;
768 s32 reg_ndx;
769 u32 vlvf;
770 u32 bits;
778 u8 tcs = netdev_get_num_tc(adapter->netdev); 771 u8 tcs = netdev_get_num_tc(adapter->netdev);
779 772
780 if (adapter->vfinfo[vf].pf_vlan || tcs) { 773 if (adapter->vfinfo[vf].pf_vlan || tcs) {
@@ -790,10 +783,50 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
790 else if (adapter->vfinfo[vf].vlan_count) 783 else if (adapter->vfinfo[vf].vlan_count)
791 adapter->vfinfo[vf].vlan_count--; 784 adapter->vfinfo[vf].vlan_count--;
792 785
786 /* in case of promiscuous mode any VLAN filter set for a VF must
787 * also have the PF pool added to it.
788 */
789 if (add && adapter->netdev->flags & IFF_PROMISC)
790 err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
791
793 err = ixgbe_set_vf_vlan(adapter, add, vid, vf); 792 err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
794 if (!err && adapter->vfinfo[vf].spoofchk_enabled) 793 if (!err && adapter->vfinfo[vf].spoofchk_enabled)
795 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); 794 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
796 795
796 /* Go through all the checks to see if the VLAN filter should
797 * be wiped completely.
798 */
799 if (!add && adapter->netdev->flags & IFF_PROMISC) {
800 reg_ndx = ixgbe_find_vlvf_entry(hw, vid);
801 if (reg_ndx < 0)
802 goto out;
803 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx));
804 /* See if any other pools are set for this VLAN filter
805 * entry other than the PF.
806 */
807 if (VMDQ_P(0) < 32) {
808 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
809 bits &= ~(1 << VMDQ_P(0));
810 bits |= IXGBE_READ_REG(hw,
811 IXGBE_VLVFB(reg_ndx * 2) + 1);
812 } else {
813 bits = IXGBE_READ_REG(hw,
814 IXGBE_VLVFB(reg_ndx * 2) + 1);
815 bits &= ~(1 << (VMDQ_P(0) - 32));
816 bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
817 }
818
819 /* If the filter was removed then ensure PF pool bit
820 * is cleared if the PF only added itself to the pool
821 * because the PF is in promiscuous mode.
822 */
823 if ((vlvf & VLAN_VID_MASK) == vid &&
824 !test_bit(vid, adapter->active_vlans) && !bits)
825 ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
826 }
827
828out:
829
797 return err; 830 return err;
798} 831}
799 832
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 70c6aa3d3f95..6442cf8f9dce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -69,6 +69,7 @@
69#define IXGBE_DEV_ID_82599_LS 0x154F 69#define IXGBE_DEV_ID_82599_LS 0x154F
70#define IXGBE_DEV_ID_X540T 0x1528 70#define IXGBE_DEV_ID_X540T 0x1528
71#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A 71#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
72#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558
72#define IXGBE_DEV_ID_X540T1 0x1560 73#define IXGBE_DEV_ID_X540T1 0x1560
73 74
74/* VF Device IDs */ 75/* VF Device IDs */
@@ -1520,9 +1521,11 @@ enum {
1520#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ 1521#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
1521#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ 1522#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
1522#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ 1523#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */
1524#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */
1523#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ 1525#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */
1524#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ 1526#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
1525#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 Native Function */ 1527#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 Native Function */
1528#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */
1526 1529
1527/* LEDCTL Bit Masks */ 1530/* LEDCTL Bit Masks */
1528#define IXGBE_LED_IVRT_BASE 0x00000040 1531#define IXGBE_LED_IVRT_BASE 0x00000040
@@ -1593,6 +1596,7 @@ enum {
1593#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) 1596#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
1594#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) 1597#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
1595#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) 1598#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
1599#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000
1596#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 1600#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
1597 1601
1598#define IXGBE_MACC_FLU 0x00000001 1602#define IXGBE_MACC_FLU 0x00000001
@@ -2582,6 +2586,10 @@ enum ixgbe_phy_type {
2582 ixgbe_phy_sfp_ftl_active, 2586 ixgbe_phy_sfp_ftl_active,
2583 ixgbe_phy_sfp_unknown, 2587 ixgbe_phy_sfp_unknown,
2584 ixgbe_phy_sfp_intel, 2588 ixgbe_phy_sfp_intel,
2589 ixgbe_phy_qsfp_passive_unknown,
2590 ixgbe_phy_qsfp_active_unknown,
2591 ixgbe_phy_qsfp_intel,
2592 ixgbe_phy_qsfp_unknown,
2585 ixgbe_phy_sfp_unsupported, 2593 ixgbe_phy_sfp_unsupported,
2586 ixgbe_phy_generic 2594 ixgbe_phy_generic
2587}; 2595};
@@ -2622,6 +2630,8 @@ enum ixgbe_sfp_type {
2622enum ixgbe_media_type { 2630enum ixgbe_media_type {
2623 ixgbe_media_type_unknown = 0, 2631 ixgbe_media_type_unknown = 0,
2624 ixgbe_media_type_fiber, 2632 ixgbe_media_type_fiber,
2633 ixgbe_media_type_fiber_fixed,
2634 ixgbe_media_type_fiber_qsfp,
2625 ixgbe_media_type_fiber_lco, 2635 ixgbe_media_type_fiber_lco,
2626 ixgbe_media_type_copper, 2636 ixgbe_media_type_copper,
2627 ixgbe_media_type_backplane, 2637 ixgbe_media_type_backplane,
@@ -2838,6 +2848,7 @@ struct ixgbe_mac_operations {
2838 void (*disable_tx_laser)(struct ixgbe_hw *); 2848 void (*disable_tx_laser)(struct ixgbe_hw *);
2839 void (*enable_tx_laser)(struct ixgbe_hw *); 2849 void (*enable_tx_laser)(struct ixgbe_hw *);
2840 void (*flap_tx_laser)(struct ixgbe_hw *); 2850 void (*flap_tx_laser)(struct ixgbe_hw *);
2851 void (*stop_link_on_d3)(struct ixgbe_hw *);
2841 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); 2852 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
2842 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); 2853 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
2843 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, 2854 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
@@ -2885,6 +2896,8 @@ struct ixgbe_phy_operations {
2885 s32 (*reset)(struct ixgbe_hw *); 2896 s32 (*reset)(struct ixgbe_hw *);
2886 s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); 2897 s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
2887 s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); 2898 s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
2899 s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
2900 s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
2888 s32 (*setup_link)(struct ixgbe_hw *); 2901 s32 (*setup_link)(struct ixgbe_hw *);
2889 s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); 2902 s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
2890 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); 2903 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
@@ -2953,6 +2966,7 @@ struct ixgbe_phy_info {
2953 bool smart_speed_active; 2966 bool smart_speed_active;
2954 bool multispeed_fiber; 2967 bool multispeed_fiber;
2955 bool reset_if_overtemp; 2968 bool reset_if_overtemp;
2969 bool qsfp_shared_i2c_bus;
2956}; 2970};
2957 2971
2958#include "ixgbe_mbx.h" 2972#include "ixgbe_mbx.h"
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 1f5166ad6bb5..59a62bbfb371 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -488,8 +488,8 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
488 * source pruning. 488 * source pruning.
489 */ 489 */
490 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && 490 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
491 !(compare_ether_addr(adapter->netdev->dev_addr, 491 ether_addr_equal(adapter->netdev->dev_addr,
492 eth_hdr(skb)->h_source))) { 492 eth_hdr(skb)->h_source)) {
493 dev_kfree_skb_irq(skb); 493 dev_kfree_skb_irq(skb);
494 goto next_desc; 494 goto next_desc;
495 } 495 }
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index c35db735958f..7fb5677451f9 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2641,7 +2641,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2641 ret = mv643xx_eth_shared_of_probe(pdev); 2641 ret = mv643xx_eth_shared_of_probe(pdev);
2642 if (ret) 2642 if (ret)
2643 return ret; 2643 return ret;
2644 pd = pdev->dev.platform_data; 2644 pd = dev_get_platdata(&pdev->dev);
2645 2645
2646 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? 2646 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
2647 pd->tx_csum_limit : 9 * 1024; 2647 pd->tx_csum_limit : 9 * 1024;
@@ -2833,7 +2833,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2833 struct resource *res; 2833 struct resource *res;
2834 int err; 2834 int err;
2835 2835
2836 pd = pdev->dev.platform_data; 2836 pd = dev_get_platdata(&pdev->dev);
2837 if (pd == NULL) { 2837 if (pd == NULL) {
2838 dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n"); 2838 dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
2839 return -ENODEV; 2839 return -ENODEV;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b017818bccae..e35bac7cfdf1 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -79,10 +79,10 @@
79#define MVNETA_MAC_ADDR_HIGH 0x2418 79#define MVNETA_MAC_ADDR_HIGH 0x2418
80#define MVNETA_SDMA_CONFIG 0x241c 80#define MVNETA_SDMA_CONFIG 0x241c
81#define MVNETA_SDMA_BRST_SIZE_16 4 81#define MVNETA_SDMA_BRST_SIZE_16 4
82#define MVNETA_NO_DESC_SWAP 0x0
83#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) 82#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
84#define MVNETA_RX_NO_DATA_SWAP BIT(4) 83#define MVNETA_RX_NO_DATA_SWAP BIT(4)
85#define MVNETA_TX_NO_DATA_SWAP BIT(5) 84#define MVNETA_TX_NO_DATA_SWAP BIT(5)
85#define MVNETA_DESC_SWAP BIT(6)
86#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) 86#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
87#define MVNETA_PORT_STATUS 0x2444 87#define MVNETA_PORT_STATUS 0x2444
88#define MVNETA_TX_IN_PRGRS BIT(1) 88#define MVNETA_TX_IN_PRGRS BIT(1)
@@ -138,7 +138,9 @@
138#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) 138#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
139#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) 139#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
140#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) 140#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
141#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
141#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) 142#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
143#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
142#define MVNETA_MIB_COUNTERS_BASE 0x3080 144#define MVNETA_MIB_COUNTERS_BASE 0x3080
143#define MVNETA_MIB_LATE_COLLISION 0x7c 145#define MVNETA_MIB_LATE_COLLISION 0x7c
144#define MVNETA_DA_FILT_SPEC_MCAST 0x3400 146#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
@@ -264,8 +266,7 @@ struct mvneta_port {
264 * layout of the transmit and reception DMA descriptors, and their 266 * layout of the transmit and reception DMA descriptors, and their
265 * layout is therefore defined by the hardware design 267 * layout is therefore defined by the hardware design
266 */ 268 */
267struct mvneta_tx_desc { 269
268 u32 command; /* Options used by HW for packet transmitting.*/
269#define MVNETA_TX_L3_OFF_SHIFT 0 270#define MVNETA_TX_L3_OFF_SHIFT 0
270#define MVNETA_TX_IP_HLEN_SHIFT 8 271#define MVNETA_TX_IP_HLEN_SHIFT 8
271#define MVNETA_TX_L4_UDP BIT(16) 272#define MVNETA_TX_L4_UDP BIT(16)
@@ -280,15 +281,6 @@ struct mvneta_tx_desc {
280#define MVNETA_TX_L4_CSUM_FULL BIT(30) 281#define MVNETA_TX_L4_CSUM_FULL BIT(30)
281#define MVNETA_TX_L4_CSUM_NOT BIT(31) 282#define MVNETA_TX_L4_CSUM_NOT BIT(31)
282 283
283 u16 reserverd1; /* csum_l4 (for future use) */
284 u16 data_size; /* Data size of transmitted packet in bytes */
285 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
286 u32 reserved2; /* hw_cmd - (for future use, PMT) */
287 u32 reserved3[4]; /* Reserved - (for future use) */
288};
289
290struct mvneta_rx_desc {
291 u32 status; /* Info about received packet */
292#define MVNETA_RXD_ERR_CRC 0x0 284#define MVNETA_RXD_ERR_CRC 0x0
293#define MVNETA_RXD_ERR_SUMMARY BIT(16) 285#define MVNETA_RXD_ERR_SUMMARY BIT(16)
294#define MVNETA_RXD_ERR_OVERRUN BIT(17) 286#define MVNETA_RXD_ERR_OVERRUN BIT(17)
@@ -299,16 +291,57 @@ struct mvneta_rx_desc {
299#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) 291#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
300#define MVNETA_RXD_L4_CSUM_OK BIT(30) 292#define MVNETA_RXD_L4_CSUM_OK BIT(30)
301 293
294#if defined(__LITTLE_ENDIAN)
295struct mvneta_tx_desc {
296 u32 command; /* Options used by HW for packet transmitting.*/
297 u16 reserverd1; /* csum_l4 (for future use) */
298 u16 data_size; /* Data size of transmitted packet in bytes */
299 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
300 u32 reserved2; /* hw_cmd - (for future use, PMT) */
301 u32 reserved3[4]; /* Reserved - (for future use) */
302};
303
304struct mvneta_rx_desc {
305 u32 status; /* Info about received packet */
302 u16 reserved1; /* pnc_info - (for future use, PnC) */ 306 u16 reserved1; /* pnc_info - (for future use, PnC) */
303 u16 data_size; /* Size of received packet in bytes */ 307 u16 data_size; /* Size of received packet in bytes */
308
304 u32 buf_phys_addr; /* Physical address of the buffer */ 309 u32 buf_phys_addr; /* Physical address of the buffer */
305 u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 310 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
311
306 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 312 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
307 u16 reserved3; /* prefetch_cmd, for future use */ 313 u16 reserved3; /* prefetch_cmd, for future use */
308 u16 reserved4; /* csum_l4 - (for future use, PnC) */ 314 u16 reserved4; /* csum_l4 - (for future use, PnC) */
315
316 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
317 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
318};
319#else
320struct mvneta_tx_desc {
321 u16 data_size; /* Data size of transmitted packet in bytes */
322 u16 reserverd1; /* csum_l4 (for future use) */
323 u32 command; /* Options used by HW for packet transmitting.*/
324 u32 reserved2; /* hw_cmd - (for future use, PMT) */
325 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
326 u32 reserved3[4]; /* Reserved - (for future use) */
327};
328
329struct mvneta_rx_desc {
330 u16 data_size; /* Size of received packet in bytes */
331 u16 reserved1; /* pnc_info - (for future use, PnC) */
332 u32 status; /* Info about received packet */
333
334 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
335 u32 buf_phys_addr; /* Physical address of the buffer */
336
337 u16 reserved4; /* csum_l4 - (for future use, PnC) */
338 u16 reserved3; /* prefetch_cmd, for future use */
339 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
340
309 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 341 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
310 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 342 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
311}; 343};
344#endif
312 345
313struct mvneta_tx_queue { 346struct mvneta_tx_queue {
314 /* Number of this TX queue, in the range 0-7 */ 347 /* Number of this TX queue, in the range 0-7 */
@@ -908,13 +941,22 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
908 /* Default burst size */ 941 /* Default burst size */
909 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 942 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
910 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 943 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
944 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
911 945
912 val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP | 946#if defined(__BIG_ENDIAN)
913 MVNETA_NO_DESC_SWAP); 947 val |= MVNETA_DESC_SWAP;
948#endif
914 949
915 /* Assign port SDMA configuration */ 950 /* Assign port SDMA configuration */
916 mvreg_write(pp, MVNETA_SDMA_CONFIG, val); 951 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
917 952
953 /* Disable PHY polling in hardware, since we're using the
954 * kernel phylib to do this.
955 */
956 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
957 val &= ~MVNETA_PHY_POLLING_ENABLE;
958 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
959
918 mvneta_set_ucast_table(pp, -1); 960 mvneta_set_ucast_table(pp, -1);
919 mvneta_set_special_mcast_table(pp, -1); 961 mvneta_set_special_mcast_table(pp, -1);
920 mvneta_set_other_mcast_table(pp, -1); 962 mvneta_set_other_mcast_table(pp, -1);
@@ -2307,7 +2349,9 @@ static void mvneta_adjust_link(struct net_device *ndev)
2307 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 2349 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2308 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | 2350 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2309 MVNETA_GMAC_CONFIG_GMII_SPEED | 2351 MVNETA_GMAC_CONFIG_GMII_SPEED |
2310 MVNETA_GMAC_CONFIG_FULL_DUPLEX); 2352 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
2353 MVNETA_GMAC_AN_SPEED_EN |
2354 MVNETA_GMAC_AN_DUPLEX_EN);
2311 2355
2312 if (phydev->duplex) 2356 if (phydev->duplex)
2313 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; 2357 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
@@ -2440,6 +2484,21 @@ static int mvneta_stop(struct net_device *dev)
2440 return 0; 2484 return 0;
2441} 2485}
2442 2486
2487static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2488{
2489 struct mvneta_port *pp = netdev_priv(dev);
2490 int ret;
2491
2492 if (!pp->phy_dev)
2493 return -ENOTSUPP;
2494
2495 ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
2496 if (!ret)
2497 mvneta_adjust_link(dev);
2498
2499 return ret;
2500}
2501
2443/* Ethtool methods */ 2502/* Ethtool methods */
2444 2503
2445/* Get settings (phy address, speed) for ethtools */ 2504/* Get settings (phy address, speed) for ethtools */
@@ -2558,6 +2617,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
2558 .ndo_change_mtu = mvneta_change_mtu, 2617 .ndo_change_mtu = mvneta_change_mtu,
2559 .ndo_tx_timeout = mvneta_tx_timeout, 2618 .ndo_tx_timeout = mvneta_tx_timeout,
2560 .ndo_get_stats64 = mvneta_get_stats64, 2619 .ndo_get_stats64 = mvneta_get_stats64,
2620 .ndo_do_ioctl = mvneta_ioctl,
2561}; 2621};
2562 2622
2563const struct ethtool_ops mvneta_eth_tool_ops = { 2623const struct ethtool_ops mvneta_eth_tool_ops = {
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index db481477bcc5..4ae0c7426010 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -583,10 +583,9 @@ static int init_hash_table(struct pxa168_eth_private *pep)
583 * table is full. 583 * table is full.
584 */ 584 */
585 if (pep->htpr == NULL) { 585 if (pep->htpr == NULL) {
586 pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, 586 pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent,
587 HASH_ADDR_TABLE_SIZE, 587 HASH_ADDR_TABLE_SIZE,
588 &pep->htpr_dma, 588 &pep->htpr_dma, GFP_KERNEL);
589 GFP_KERNEL | __GFP_ZERO);
590 if (pep->htpr == NULL) 589 if (pep->htpr == NULL)
591 return -ENOMEM; 590 return -ENOMEM;
592 } else { 591 } else {
@@ -1024,9 +1023,9 @@ static int rxq_init(struct net_device *dev)
1024 pep->rx_desc_count = 0; 1023 pep->rx_desc_count = 0;
1025 size = pep->rx_ring_size * sizeof(struct rx_desc); 1024 size = pep->rx_ring_size * sizeof(struct rx_desc);
1026 pep->rx_desc_area_size = size; 1025 pep->rx_desc_area_size = size;
1027 pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, 1026 pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
1028 &pep->rx_desc_dma, 1027 &pep->rx_desc_dma,
1029 GFP_KERNEL | __GFP_ZERO); 1028 GFP_KERNEL);
1030 if (!pep->p_rx_desc_area) 1029 if (!pep->p_rx_desc_area)
1031 goto out; 1030 goto out;
1032 1031
@@ -1085,9 +1084,9 @@ static int txq_init(struct net_device *dev)
1085 pep->tx_desc_count = 0; 1084 pep->tx_desc_count = 0;
1086 size = pep->tx_ring_size * sizeof(struct tx_desc); 1085 size = pep->tx_ring_size * sizeof(struct tx_desc);
1087 pep->tx_desc_area_size = size; 1086 pep->tx_desc_area_size = size;
1088 pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, 1087 pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
1089 &pep->tx_desc_dma, 1088 &pep->tx_desc_dma,
1090 GFP_KERNEL | __GFP_ZERO); 1089 GFP_KERNEL);
1091 if (!pep->p_tx_desc_area) 1090 if (!pep->p_tx_desc_area)
1092 goto out; 1091 goto out;
1093 /* Initialize the next_desc_ptr links in the Tx descriptors ring */ 1092 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
@@ -1517,7 +1516,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
1517 printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME); 1516 printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
1518 eth_hw_addr_random(dev); 1517 eth_hw_addr_random(dev);
1519 1518
1520 pep->pd = pdev->dev.platform_data; 1519 pep->pd = dev_get_platdata(&pdev->dev);
1521 pep->rx_ring_size = NUM_RX_DESCS; 1520 pep->rx_ring_size = NUM_RX_DESCS;
1522 if (pep->pd->rx_queue_size) 1521 if (pep->pd->rx_queue_size)
1523 pep->rx_ring_size = pep->pd->rx_queue_size; 1522 pep->rx_ring_size = pep->pd->rx_queue_size;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 299d0184f983..ea20182c6969 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -800,7 +800,16 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
800 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 800 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
801} 801}
802 802
803int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, 803static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
804 struct mlx4_vhcr *vhcr,
805 struct mlx4_cmd_mailbox *inbox,
806 struct mlx4_cmd_mailbox *outbox,
807 struct mlx4_cmd_info *cmd)
808{
809 return -EPERM;
810}
811
812static int MLX4_CMD_GET_OP_REQ_wrapper(struct mlx4_dev *dev, int slave,
804 struct mlx4_vhcr *vhcr, 813 struct mlx4_vhcr *vhcr,
805 struct mlx4_cmd_mailbox *inbox, 814 struct mlx4_cmd_mailbox *inbox,
806 struct mlx4_cmd_mailbox *outbox, 815 struct mlx4_cmd_mailbox *outbox,
@@ -1252,6 +1261,15 @@ static struct mlx4_cmd_info cmd_info[] = {
1252 .wrapper = MLX4_CMD_UPDATE_QP_wrapper 1261 .wrapper = MLX4_CMD_UPDATE_QP_wrapper
1253 }, 1262 },
1254 { 1263 {
1264 .opcode = MLX4_CMD_GET_OP_REQ,
1265 .has_inbox = false,
1266 .has_outbox = false,
1267 .out_is_imm = false,
1268 .encode_slave_id = false,
1269 .verify = NULL,
1270 .wrapper = MLX4_CMD_GET_OP_REQ_wrapper,
1271 },
1272 {
1255 .opcode = MLX4_CMD_CONF_SPECIAL_QP, 1273 .opcode = MLX4_CMD_CONF_SPECIAL_QP,
1256 .has_inbox = false, 1274 .has_inbox = false,
1257 .has_outbox = false, 1275 .has_outbox = false,
@@ -1526,7 +1544,7 @@ static int calculate_transition(u16 oper_vlan, u16 admin_vlan)
1526 return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT)); 1544 return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT));
1527} 1545}
1528 1546
1529int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, 1547static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1530 int slave, int port) 1548 int slave, int port)
1531{ 1549{
1532 struct mlx4_vport_oper_state *vp_oper; 1550 struct mlx4_vport_oper_state *vp_oper;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 9d4a1ea030d8..b4881b686159 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -160,6 +160,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
160 struct ieee_pfc *pfc) 160 struct ieee_pfc *pfc)
161{ 161{
162 struct mlx4_en_priv *priv = netdev_priv(dev); 162 struct mlx4_en_priv *priv = netdev_priv(dev);
163 struct mlx4_en_port_profile *prof = priv->prof;
163 struct mlx4_en_dev *mdev = priv->mdev; 164 struct mlx4_en_dev *mdev = priv->mdev;
164 int err; 165 int err;
165 166
@@ -169,15 +170,17 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
169 pfc->mbc, 170 pfc->mbc,
170 pfc->delay); 171 pfc->delay);
171 172
172 priv->prof->rx_pause = priv->prof->tx_pause = !!pfc->pfc_en; 173 prof->rx_pause = !pfc->pfc_en;
173 priv->prof->rx_ppp = priv->prof->tx_ppp = pfc->pfc_en; 174 prof->tx_pause = !pfc->pfc_en;
175 prof->rx_ppp = pfc->pfc_en;
176 prof->tx_ppp = pfc->pfc_en;
174 177
175 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 178 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
176 priv->rx_skb_size + ETH_FCS_LEN, 179 priv->rx_skb_size + ETH_FCS_LEN,
177 priv->prof->tx_pause, 180 prof->tx_pause,
178 priv->prof->tx_ppp, 181 prof->tx_ppp,
179 priv->prof->rx_pause, 182 prof->rx_pause,
180 priv->prof->rx_ppp); 183 prof->rx_ppp);
181 if (err) 184 if (err)
182 en_err(priv, "Failed setting pause params\n"); 185 en_err(priv, "Failed setting pause params\n");
183 186
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 7c492382da09..0698c82d6ff1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -191,6 +191,39 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
191 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); 191 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
192} 192}
193 193
194static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
195 struct mlx4_en_tx_ring *ring, int index,
196 u8 owner)
197{
198 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
199 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
200 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
201 void *end = ring->buf + ring->buf_size;
202 __be32 *ptr = (__be32 *)tx_desc;
203 int i;
204
205 /* Optimize the common case when there are no wraparounds */
206 if (likely((void *)tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
207 /* Stamp the freed descriptor */
208 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
209 i += STAMP_STRIDE) {
210 *ptr = stamp;
211 ptr += STAMP_DWORDS;
212 }
213 } else {
214 /* Stamp the freed descriptor */
215 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
216 i += STAMP_STRIDE) {
217 *ptr = stamp;
218 ptr += STAMP_DWORDS;
219 if ((void *)ptr >= end) {
220 ptr = ring->buf;
221 stamp ^= cpu_to_be32(0x80000000);
222 }
223 }
224 }
225}
226
194 227
195static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, 228static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
196 struct mlx4_en_tx_ring *ring, 229 struct mlx4_en_tx_ring *ring,
@@ -205,8 +238,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
205 void *end = ring->buf + ring->buf_size; 238 void *end = ring->buf + ring->buf_size;
206 int frags = skb_shinfo(skb)->nr_frags; 239 int frags = skb_shinfo(skb)->nr_frags;
207 int i; 240 int i;
208 __be32 *ptr = (__be32 *)tx_desc;
209 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
210 struct skb_shared_hwtstamps hwts; 241 struct skb_shared_hwtstamps hwts;
211 242
212 if (timestamp) { 243 if (timestamp) {
@@ -232,12 +263,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
232 skb_frag_size(frag), PCI_DMA_TODEVICE); 263 skb_frag_size(frag), PCI_DMA_TODEVICE);
233 } 264 }
234 } 265 }
235 /* Stamp the freed descriptor */
236 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
237 *ptr = stamp;
238 ptr += STAMP_DWORDS;
239 }
240
241 } else { 266 } else {
242 if (!tx_info->inl) { 267 if (!tx_info->inl) {
243 if ((void *) data >= end) { 268 if ((void *) data >= end) {
@@ -263,16 +288,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
263 ++data; 288 ++data;
264 } 289 }
265 } 290 }
266 /* Stamp the freed descriptor */
267 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
268 *ptr = stamp;
269 ptr += STAMP_DWORDS;
270 if ((void *) ptr >= end) {
271 ptr = ring->buf;
272 stamp ^= cpu_to_be32(0x80000000);
273 }
274 }
275
276 } 291 }
277 dev_kfree_skb_any(skb); 292 dev_kfree_skb_any(skb);
278 return tx_info->nr_txbb; 293 return tx_info->nr_txbb;
@@ -318,8 +333,9 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
318 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; 333 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
319 struct mlx4_cqe *cqe; 334 struct mlx4_cqe *cqe;
320 u16 index; 335 u16 index;
321 u16 new_index, ring_index; 336 u16 new_index, ring_index, stamp_index;
322 u32 txbbs_skipped = 0; 337 u32 txbbs_skipped = 0;
338 u32 txbbs_stamp = 0;
323 u32 cons_index = mcq->cons_index; 339 u32 cons_index = mcq->cons_index;
324 int size = cq->size; 340 int size = cq->size;
325 u32 size_mask = ring->size_mask; 341 u32 size_mask = ring->size_mask;
@@ -335,6 +351,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
335 index = cons_index & size_mask; 351 index = cons_index & size_mask;
336 cqe = &buf[(index << factor) + factor]; 352 cqe = &buf[(index << factor) + factor];
337 ring_index = ring->cons & size_mask; 353 ring_index = ring->cons & size_mask;
354 stamp_index = ring_index;
338 355
339 /* Process all completed CQEs */ 356 /* Process all completed CQEs */
340 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, 357 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
@@ -345,6 +362,15 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
345 */ 362 */
346 rmb(); 363 rmb();
347 364
365 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
366 MLX4_CQE_OPCODE_ERROR)) {
367 struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
368
369 en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
370 cqe_err->vendor_err_syndrome,
371 cqe_err->syndrome);
372 }
373
348 /* Skip over last polled CQE */ 374 /* Skip over last polled CQE */
349 new_index = be16_to_cpu(cqe->wqe_index) & size_mask; 375 new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
350 376
@@ -359,6 +385,12 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
359 priv, ring, ring_index, 385 priv, ring, ring_index,
360 !!((ring->cons + txbbs_skipped) & 386 !!((ring->cons + txbbs_skipped) &
361 ring->size), timestamp); 387 ring->size), timestamp);
388
389 mlx4_en_stamp_wqe(priv, ring, stamp_index,
390 !!((ring->cons + txbbs_stamp) &
391 ring->size));
392 stamp_index = ring_index;
393 txbbs_stamp = txbbs_skipped;
362 packets++; 394 packets++;
363 bytes += ring->tx_info[ring_index].nr_bytes; 395 bytes += ring->tx_info[ring_index].nr_bytes;
364 } while (ring_index != new_index); 396 } while (ring_index != new_index);
@@ -556,17 +588,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
556{ 588{
557 struct mlx4_en_priv *priv = netdev_priv(dev); 589 struct mlx4_en_priv *priv = netdev_priv(dev);
558 struct mlx4_en_dev *mdev = priv->mdev; 590 struct mlx4_en_dev *mdev = priv->mdev;
591 struct device *ddev = priv->ddev;
559 struct mlx4_en_tx_ring *ring; 592 struct mlx4_en_tx_ring *ring;
560 struct mlx4_en_tx_desc *tx_desc; 593 struct mlx4_en_tx_desc *tx_desc;
561 struct mlx4_wqe_data_seg *data; 594 struct mlx4_wqe_data_seg *data;
562 struct skb_frag_struct *frag;
563 struct mlx4_en_tx_info *tx_info; 595 struct mlx4_en_tx_info *tx_info;
564 struct ethhdr *ethh;
565 int tx_ind = 0; 596 int tx_ind = 0;
566 int nr_txbb; 597 int nr_txbb;
567 int desc_size; 598 int desc_size;
568 int real_size; 599 int real_size;
569 dma_addr_t dma;
570 u32 index, bf_index; 600 u32 index, bf_index;
571 __be32 op_own; 601 __be32 op_own;
572 u16 vlan_tag = 0; 602 u16 vlan_tag = 0;
@@ -642,6 +672,61 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
642 tx_info->skb = skb; 672 tx_info->skb = skb;
643 tx_info->nr_txbb = nr_txbb; 673 tx_info->nr_txbb = nr_txbb;
644 674
675 if (lso_header_size)
676 data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4,
677 DS_SIZE));
678 else
679 data = &tx_desc->data;
680
681 /* valid only for none inline segments */
682 tx_info->data_offset = (void *)data - (void *)tx_desc;
683
684 tx_info->linear = (lso_header_size < skb_headlen(skb) &&
685 !is_inline(skb, NULL)) ? 1 : 0;
686
687 data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
688
689 if (is_inline(skb, &fragptr)) {
690 tx_info->inl = 1;
691 } else {
692 /* Map fragments */
693 for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
694 struct skb_frag_struct *frag;
695 dma_addr_t dma;
696
697 frag = &skb_shinfo(skb)->frags[i];
698 dma = skb_frag_dma_map(ddev, frag,
699 0, skb_frag_size(frag),
700 DMA_TO_DEVICE);
701 if (dma_mapping_error(ddev, dma))
702 goto tx_drop_unmap;
703
704 data->addr = cpu_to_be64(dma);
705 data->lkey = cpu_to_be32(mdev->mr.key);
706 wmb();
707 data->byte_count = cpu_to_be32(skb_frag_size(frag));
708 --data;
709 }
710
711 /* Map linear part */
712 if (tx_info->linear) {
713 u32 byte_count = skb_headlen(skb) - lso_header_size;
714 dma_addr_t dma;
715
716 dma = dma_map_single(ddev, skb->data +
717 lso_header_size, byte_count,
718 PCI_DMA_TODEVICE);
719 if (dma_mapping_error(ddev, dma))
720 goto tx_drop_unmap;
721
722 data->addr = cpu_to_be64(dma);
723 data->lkey = cpu_to_be32(mdev->mr.key);
724 wmb();
725 data->byte_count = cpu_to_be32(byte_count);
726 }
727 tx_info->inl = 0;
728 }
729
645 /* 730 /*
646 * For timestamping add flag to skb_shinfo and 731 * For timestamping add flag to skb_shinfo and
647 * set flag for further reference 732 * set flag for further reference
@@ -666,6 +751,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
666 } 751 }
667 752
668 if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) { 753 if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) {
754 struct ethhdr *ethh;
755
669 /* Copy dst mac address to wqe. This allows loopback in eSwitch, 756 /* Copy dst mac address to wqe. This allows loopback in eSwitch,
670 * so that VFs and PF can communicate with each other 757 * so that VFs and PF can communicate with each other
671 */ 758 */
@@ -688,8 +775,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
688 /* Copy headers; 775 /* Copy headers;
689 * note that we already verified that it is linear */ 776 * note that we already verified that it is linear */
690 memcpy(tx_desc->lso.header, skb->data, lso_header_size); 777 memcpy(tx_desc->lso.header, skb->data, lso_header_size);
691 data = ((void *) &tx_desc->lso +
692 ALIGN(lso_header_size + 4, DS_SIZE));
693 778
694 priv->port_stats.tso_packets++; 779 priv->port_stats.tso_packets++;
695 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + 780 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
@@ -701,7 +786,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
701 op_own = cpu_to_be32(MLX4_OPCODE_SEND) | 786 op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
702 ((ring->prod & ring->size) ? 787 ((ring->prod & ring->size) ?
703 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); 788 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
704 data = &tx_desc->data;
705 tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); 789 tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
706 ring->packets++; 790 ring->packets++;
707 791
@@ -710,38 +794,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
710 netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes); 794 netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
711 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); 795 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
712 796
713 797 if (tx_info->inl) {
714 /* valid only for none inline segments */
715 tx_info->data_offset = (void *) data - (void *) tx_desc;
716
717 tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
718 data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
719
720 if (!is_inline(skb, &fragptr)) {
721 /* Map fragments */
722 for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
723 frag = &skb_shinfo(skb)->frags[i];
724 dma = skb_frag_dma_map(priv->ddev, frag,
725 0, skb_frag_size(frag),
726 DMA_TO_DEVICE);
727 data->addr = cpu_to_be64(dma);
728 data->lkey = cpu_to_be32(mdev->mr.key);
729 wmb();
730 data->byte_count = cpu_to_be32(skb_frag_size(frag));
731 --data;
732 }
733
734 /* Map linear part */
735 if (tx_info->linear) {
736 dma = dma_map_single(priv->ddev, skb->data + lso_header_size,
737 skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
738 data->addr = cpu_to_be64(dma);
739 data->lkey = cpu_to_be32(mdev->mr.key);
740 wmb();
741 data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
742 }
743 tx_info->inl = 0;
744 } else {
745 build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr); 798 build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
746 tx_info->inl = 1; 799 tx_info->inl = 1;
747 } 800 }
@@ -781,6 +834,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
781 834
782 return NETDEV_TX_OK; 835 return NETDEV_TX_OK;
783 836
837tx_drop_unmap:
838 en_err(priv, "DMA mapping error\n");
839
840 for (i++; i < skb_shinfo(skb)->nr_frags; i++) {
841 data++;
842 dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr),
843 be32_to_cpu(data->byte_count),
844 PCI_DMA_TODEVICE);
845 }
846
784tx_drop: 847tx_drop:
785 dev_kfree_skb_any(skb); 848 dev_kfree_skb_any(skb);
786 priv->stats.tx_dropped++; 849 priv->stats.tx_dropped++;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 7e042869ef0c..0416c5b3b35c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -79,6 +79,7 @@ enum {
79 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ 79 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
80 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ 80 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
81 (1ull << MLX4_EVENT_TYPE_CMD) | \ 81 (1ull << MLX4_EVENT_TYPE_CMD) | \
82 (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \
82 (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ 83 (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
83 (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ 84 (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
84 (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) 85 (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
@@ -629,6 +630,14 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
629 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); 630 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
630 break; 631 break;
631 632
633 case MLX4_EVENT_TYPE_OP_REQUIRED:
634 atomic_inc(&priv->opreq_count);
635 /* FW commands can't be executed from interrupt context
636 * working in deferred task
637 */
638 queue_work(mlx4_wq, &priv->opreq_task);
639 break;
640
632 case MLX4_EVENT_TYPE_COMM_CHANNEL: 641 case MLX4_EVENT_TYPE_COMM_CHANNEL:
633 if (!mlx4_is_master(dev)) { 642 if (!mlx4_is_master(dev)) {
634 mlx4_warn(dev, "Received comm channel event " 643 mlx4_warn(dev, "Received comm channel event "
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 6fc6dabc78d5..0d63daa2f422 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1696,3 +1696,107 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
1696 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1696 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1697} 1697}
1698EXPORT_SYMBOL_GPL(mlx4_wol_write); 1698EXPORT_SYMBOL_GPL(mlx4_wol_write);
1699
1700enum {
1701 ADD_TO_MCG = 0x26,
1702};
1703
1704
1705void mlx4_opreq_action(struct work_struct *work)
1706{
1707 struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
1708 opreq_task);
1709 struct mlx4_dev *dev = &priv->dev;
1710 int num_tasks = atomic_read(&priv->opreq_count);
1711 struct mlx4_cmd_mailbox *mailbox;
1712 struct mlx4_mgm *mgm;
1713 u32 *outbox;
1714 u32 modifier;
1715 u16 token;
1716 u16 type_m;
1717 u16 type;
1718 int err;
1719 u32 num_qps;
1720 struct mlx4_qp qp;
1721 int i;
1722 u8 rem_mcg;
1723 u8 prot;
1724
1725#define GET_OP_REQ_MODIFIER_OFFSET 0x08
1726#define GET_OP_REQ_TOKEN_OFFSET 0x14
1727#define GET_OP_REQ_TYPE_OFFSET 0x1a
1728#define GET_OP_REQ_DATA_OFFSET 0x20
1729
1730 mailbox = mlx4_alloc_cmd_mailbox(dev);
1731 if (IS_ERR(mailbox)) {
1732 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
1733 return;
1734 }
1735 outbox = mailbox->buf;
1736
1737 while (num_tasks) {
1738 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1739 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
1740 MLX4_CMD_NATIVE);
1741 if (err) {
1742 mlx4_err(dev, "Failed to retreive required operation: %d\n",
1743 err);
1744 return;
1745 }
1746 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
1747 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
1748 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
1749 type_m = type >> 12;
1750 type &= 0xfff;
1751
1752 switch (type) {
1753 case ADD_TO_MCG:
1754 if (dev->caps.steering_mode ==
1755 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1756 mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
1757 err = EPERM;
1758 break;
1759 }
1760 mgm = (struct mlx4_mgm *)((u8 *)(outbox) +
1761 GET_OP_REQ_DATA_OFFSET);
1762 num_qps = be32_to_cpu(mgm->members_count) &
1763 MGM_QPN_MASK;
1764 rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1;
1765 prot = ((u8 *)(&mgm->members_count))[0] >> 6;
1766
1767 for (i = 0; i < num_qps; i++) {
1768 qp.qpn = be32_to_cpu(mgm->qp[i]);
1769 if (rem_mcg)
1770 err = mlx4_multicast_detach(dev, &qp,
1771 mgm->gid,
1772 prot, 0);
1773 else
1774 err = mlx4_multicast_attach(dev, &qp,
1775 mgm->gid,
1776 mgm->gid[5]
1777 , 0, prot,
1778 NULL);
1779 if (err)
1780 break;
1781 }
1782 break;
1783 default:
1784 mlx4_warn(dev, "Bad type for required operation\n");
1785 err = EINVAL;
1786 break;
1787 }
1788 err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16),
1789 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
1790 MLX4_CMD_NATIVE);
1791 if (err) {
1792 mlx4_err(dev, "Failed to acknowledge required request: %d\n",
1793 err);
1794 goto out;
1795 }
1796 memset(outbox, 0, 0xffc);
1797 num_tasks = atomic_dec_return(&priv->opreq_count);
1798 }
1799
1800out:
1801 mlx4_free_cmd_mailbox(dev, mailbox);
1802}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index fdf41665a059..a0a368b7c939 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -220,5 +220,6 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
220int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev); 220int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
221int mlx4_NOP(struct mlx4_dev *dev); 221int mlx4_NOP(struct mlx4_dev *dev);
222int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg); 222int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg);
223void mlx4_opreq_action(struct work_struct *work);
223 224
224#endif /* MLX4_FW_H */ 225#endif /* MLX4_FW_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 36be3208786a..60c9f4f103fc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1692,11 +1692,19 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1692 goto err_xrcd_table_free; 1692 goto err_xrcd_table_free;
1693 } 1693 }
1694 1694
1695 if (!mlx4_is_slave(dev)) {
1696 err = mlx4_init_mcg_table(dev);
1697 if (err) {
1698 mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n");
1699 goto err_mr_table_free;
1700 }
1701 }
1702
1695 err = mlx4_init_eq_table(dev); 1703 err = mlx4_init_eq_table(dev);
1696 if (err) { 1704 if (err) {
1697 mlx4_err(dev, "Failed to initialize " 1705 mlx4_err(dev, "Failed to initialize "
1698 "event queue table, aborting.\n"); 1706 "event queue table, aborting.\n");
1699 goto err_mr_table_free; 1707 goto err_mcg_table_free;
1700 } 1708 }
1701 1709
1702 err = mlx4_cmd_use_events(dev); 1710 err = mlx4_cmd_use_events(dev);
@@ -1746,19 +1754,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1746 goto err_srq_table_free; 1754 goto err_srq_table_free;
1747 } 1755 }
1748 1756
1749 if (!mlx4_is_slave(dev)) {
1750 err = mlx4_init_mcg_table(dev);
1751 if (err) {
1752 mlx4_err(dev, "Failed to initialize "
1753 "multicast group table, aborting.\n");
1754 goto err_qp_table_free;
1755 }
1756 }
1757
1758 err = mlx4_init_counters_table(dev); 1757 err = mlx4_init_counters_table(dev);
1759 if (err && err != -ENOENT) { 1758 if (err && err != -ENOENT) {
1760 mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); 1759 mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
1761 goto err_mcg_table_free; 1760 goto err_qp_table_free;
1762 } 1761 }
1763 1762
1764 if (!mlx4_is_slave(dev)) { 1763 if (!mlx4_is_slave(dev)) {
@@ -1803,9 +1802,6 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1803err_counters_table_free: 1802err_counters_table_free:
1804 mlx4_cleanup_counters_table(dev); 1803 mlx4_cleanup_counters_table(dev);
1805 1804
1806err_mcg_table_free:
1807 mlx4_cleanup_mcg_table(dev);
1808
1809err_qp_table_free: 1805err_qp_table_free:
1810 mlx4_cleanup_qp_table(dev); 1806 mlx4_cleanup_qp_table(dev);
1811 1807
@@ -1821,6 +1817,10 @@ err_cmd_poll:
1821err_eq_table_free: 1817err_eq_table_free:
1822 mlx4_cleanup_eq_table(dev); 1818 mlx4_cleanup_eq_table(dev);
1823 1819
1820err_mcg_table_free:
1821 if (!mlx4_is_slave(dev))
1822 mlx4_cleanup_mcg_table(dev);
1823
1824err_mr_table_free: 1824err_mr_table_free:
1825 mlx4_cleanup_mr_table(dev); 1825 mlx4_cleanup_mr_table(dev);
1826 1826
@@ -2197,6 +2197,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2197 } 2197 }
2198 } 2198 }
2199 2199
2200 atomic_set(&priv->opreq_count, 0);
2201 INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
2202
2200 /* 2203 /*
2201 * Now reset the HCA before we touch the PCI capabilities or 2204 * Now reset the HCA before we touch the PCI capabilities or
2202 * attempt a firmware command, since a boot ROM may have left 2205 * attempt a firmware command, since a boot ROM may have left
@@ -2315,12 +2318,12 @@ err_port:
2315 mlx4_cleanup_port_info(&priv->port[port]); 2318 mlx4_cleanup_port_info(&priv->port[port]);
2316 2319
2317 mlx4_cleanup_counters_table(dev); 2320 mlx4_cleanup_counters_table(dev);
2318 mlx4_cleanup_mcg_table(dev);
2319 mlx4_cleanup_qp_table(dev); 2321 mlx4_cleanup_qp_table(dev);
2320 mlx4_cleanup_srq_table(dev); 2322 mlx4_cleanup_srq_table(dev);
2321 mlx4_cleanup_cq_table(dev); 2323 mlx4_cleanup_cq_table(dev);
2322 mlx4_cmd_use_polling(dev); 2324 mlx4_cmd_use_polling(dev);
2323 mlx4_cleanup_eq_table(dev); 2325 mlx4_cleanup_eq_table(dev);
2326 mlx4_cleanup_mcg_table(dev);
2324 mlx4_cleanup_mr_table(dev); 2327 mlx4_cleanup_mr_table(dev);
2325 mlx4_cleanup_xrcd_table(dev); 2328 mlx4_cleanup_xrcd_table(dev);
2326 mlx4_cleanup_pd_table(dev); 2329 mlx4_cleanup_pd_table(dev);
@@ -2403,12 +2406,12 @@ static void mlx4_remove_one(struct pci_dev *pdev)
2403 RES_TR_FREE_SLAVES_ONLY); 2406 RES_TR_FREE_SLAVES_ONLY);
2404 2407
2405 mlx4_cleanup_counters_table(dev); 2408 mlx4_cleanup_counters_table(dev);
2406 mlx4_cleanup_mcg_table(dev);
2407 mlx4_cleanup_qp_table(dev); 2409 mlx4_cleanup_qp_table(dev);
2408 mlx4_cleanup_srq_table(dev); 2410 mlx4_cleanup_srq_table(dev);
2409 mlx4_cleanup_cq_table(dev); 2411 mlx4_cleanup_cq_table(dev);
2410 mlx4_cmd_use_polling(dev); 2412 mlx4_cmd_use_polling(dev);
2411 mlx4_cleanup_eq_table(dev); 2413 mlx4_cleanup_eq_table(dev);
2414 mlx4_cleanup_mcg_table(dev);
2412 mlx4_cleanup_mr_table(dev); 2415 mlx4_cleanup_mr_table(dev);
2413 mlx4_cleanup_xrcd_table(dev); 2416 mlx4_cleanup_xrcd_table(dev);
2414 mlx4_cleanup_pd_table(dev); 2417 mlx4_cleanup_pd_table(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f3e804f2a35f..55f6245efb6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -39,19 +39,8 @@
39 39
40#include "mlx4.h" 40#include "mlx4.h"
41 41
42#define MGM_QPN_MASK 0x00FFFFFF
43#define MGM_BLCK_LB_BIT 30
44
45static const u8 zero_gid[16]; /* automatically initialized to 0 */ 42static const u8 zero_gid[16]; /* automatically initialized to 0 */
46 43
47struct mlx4_mgm {
48 __be32 next_gid_index;
49 __be32 members_count;
50 u32 reserved[2];
51 u8 gid[16];
52 __be32 qp[MLX4_MAX_QP_PER_MGM];
53};
54
55int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 44int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
56{ 45{
57 return 1 << dev->oper_log_mgm_entry_size; 46 return 1 << dev->oper_log_mgm_entry_size;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 17d9277e33ef..348bb8c7d9a7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -554,6 +554,17 @@ struct mlx4_mfunc {
554 struct mlx4_mfunc_master_ctx master; 554 struct mlx4_mfunc_master_ctx master;
555}; 555};
556 556
557#define MGM_QPN_MASK 0x00FFFFFF
558#define MGM_BLCK_LB_BIT 30
559
560struct mlx4_mgm {
561 __be32 next_gid_index;
562 __be32 members_count;
563 u32 reserved[2];
564 u8 gid[16];
565 __be32 qp[MLX4_MAX_QP_PER_MGM];
566};
567
557struct mlx4_cmd { 568struct mlx4_cmd {
558 struct pci_pool *pool; 569 struct pci_pool *pool;
559 void __iomem *hcr; 570 void __iomem *hcr;
@@ -802,6 +813,8 @@ struct mlx4_priv {
802 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; 813 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
803 __be64 slave_node_guids[MLX4_MFUNC_MAX]; 814 __be64 slave_node_guids[MLX4_MFUNC_MAX];
804 815
816 atomic_t opreq_count;
817 struct work_struct opreq_task;
805}; 818};
806 819
807static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) 820static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index e393d998be89..0951f7aca1ef 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -705,7 +705,8 @@ static void ks8842_rx_frame(struct net_device *netdev,
705 ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR); 705 ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR);
706} 706}
707 707
708void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter) 708static void ks8842_handle_rx(struct net_device *netdev,
709 struct ks8842_adapter *adapter)
709{ 710{
710 u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; 711 u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
711 netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data); 712 netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data);
@@ -715,7 +716,8 @@ void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
715 } 716 }
716} 717}
717 718
718void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter) 719static void ks8842_handle_tx(struct net_device *netdev,
720 struct ks8842_adapter *adapter)
719{ 721{
720 u16 sr = ks8842_read16(adapter, 16, REG_TXSR); 722 u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
721 netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr); 723 netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr);
@@ -724,7 +726,7 @@ void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
724 netif_wake_queue(netdev); 726 netif_wake_queue(netdev);
725} 727}
726 728
727void ks8842_handle_rx_overrun(struct net_device *netdev, 729static void ks8842_handle_rx_overrun(struct net_device *netdev,
728 struct ks8842_adapter *adapter) 730 struct ks8842_adapter *adapter)
729{ 731{
730 netdev_dbg(netdev, "%s: entry\n", __func__); 732 netdev_dbg(netdev, "%s: entry\n", __func__);
@@ -732,7 +734,7 @@ void ks8842_handle_rx_overrun(struct net_device *netdev,
732 netdev->stats.rx_fifo_errors++; 734 netdev->stats.rx_fifo_errors++;
733} 735}
734 736
735void ks8842_tasklet(unsigned long arg) 737static void ks8842_tasklet(unsigned long arg)
736{ 738{
737 struct net_device *netdev = (struct net_device *)arg; 739 struct net_device *netdev = (struct net_device *)arg;
738 struct ks8842_adapter *adapter = netdev_priv(netdev); 740 struct ks8842_adapter *adapter = netdev_priv(netdev);
@@ -1146,7 +1148,7 @@ static int ks8842_probe(struct platform_device *pdev)
1146 struct resource *iomem; 1148 struct resource *iomem;
1147 struct net_device *netdev; 1149 struct net_device *netdev;
1148 struct ks8842_adapter *adapter; 1150 struct ks8842_adapter *adapter;
1149 struct ks8842_platform_data *pdata = pdev->dev.platform_data; 1151 struct ks8842_platform_data *pdata = dev_get_platdata(&pdev->dev);
1150 u16 id; 1152 u16 id;
1151 unsigned i; 1153 unsigned i;
1152 1154
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index ac20098b542a..0fba1532d326 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -688,7 +688,7 @@ static void ks_soft_reset(struct ks_net *ks, unsigned op)
688} 688}
689 689
690 690
691void ks_enable_qmu(struct ks_net *ks) 691static void ks_enable_qmu(struct ks_net *ks)
692{ 692{
693 u16 w; 693 u16 w;
694 694
@@ -1636,7 +1636,7 @@ static int ks8851_probe(struct platform_device *pdev)
1636 } else { 1636 } else {
1637 struct ks8851_mll_platform_data *pdata; 1637 struct ks8851_mll_platform_data *pdata;
1638 1638
1639 pdata = pdev->dev.platform_data; 1639 pdata = dev_get_platdata(&pdev->dev);
1640 if (!pdata) { 1640 if (!pdata) {
1641 netdev_err(netdev, "No platform data\n"); 1641 netdev_err(netdev, "No platform data\n");
1642 err = -ENODEV; 1642 err = -ENODEV;
diff --git a/drivers/net/ethernet/moxa/Kconfig b/drivers/net/ethernet/moxa/Kconfig
new file mode 100644
index 000000000000..1731e050fa27
--- /dev/null
+++ b/drivers/net/ethernet/moxa/Kconfig
@@ -0,0 +1,30 @@
1#
2# MOXART device configuration
3#
4
5config NET_VENDOR_MOXART
6 bool "MOXA ART devices"
7 default y
8 depends on (ARM && ARCH_MOXART)
9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from
12 <http://www.tldp.org/docs.html#howto>.
13
14 Note that the answer to this question doesn't directly affect the
15 kernel: saying N will just cause the configurator to skip all
16 the questions about MOXA ART devices. If you say Y, you will be asked
17 for your specific card in the following questions.
18
19if NET_VENDOR_MOXART
20
21config ARM_MOXART_ETHER
22 tristate "MOXART Ethernet support"
23 depends on ARM && ARCH_MOXART
24 select NET_CORE
25 ---help---
26 If you wish to compile a kernel for a hardware with MOXA ART SoC and
27 want to use the internal ethernet then you should answer Y to this.
28
29
30endif # NET_VENDOR_MOXART
diff --git a/drivers/net/ethernet/moxa/Makefile b/drivers/net/ethernet/moxa/Makefile
new file mode 100644
index 000000000000..aa3c73e9e952
--- /dev/null
+++ b/drivers/net/ethernet/moxa/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the MOXART network device drivers.
3#
4
5obj-$(CONFIG_ARM_MOXART_ETHER) += moxart_ether.o
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
new file mode 100644
index 000000000000..83c2091c9c23
--- /dev/null
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -0,0 +1,559 @@
1/* MOXA ART Ethernet (RTL8201CP) driver.
2 *
3 * Copyright (C) 2013 Jonas Jensen
4 *
5 * Jonas Jensen <jonas.jensen@gmail.com>
6 *
7 * Based on code from
8 * Moxa Technology Co., Ltd. <www.moxa.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/skbuff.h>
20#include <linux/dma-mapping.h>
21#include <linux/ethtool.h>
22#include <linux/platform_device.h>
23#include <linux/interrupt.h>
24#include <linux/irq.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27#include <linux/crc32.h>
28#include <linux/crc32c.h>
29#include <linux/dma-mapping.h>
30
31#include "moxart_ether.h"
32
33static inline void moxart_emac_write(struct net_device *ndev,
34 unsigned int reg, unsigned long value)
35{
36 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
37
38 writel(value, priv->base + reg);
39}
40
41static void moxart_update_mac_address(struct net_device *ndev)
42{
43 moxart_emac_write(ndev, REG_MAC_MS_ADDRESS,
44 ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])));
45 moxart_emac_write(ndev, REG_MAC_MS_ADDRESS + 4,
46 ((ndev->dev_addr[2] << 24) |
47 (ndev->dev_addr[3] << 16) |
48 (ndev->dev_addr[4] << 8) |
49 (ndev->dev_addr[5])));
50}
51
52static int moxart_set_mac_address(struct net_device *ndev, void *addr)
53{
54 struct sockaddr *address = addr;
55
56 if (!is_valid_ether_addr(address->sa_data))
57 return -EADDRNOTAVAIL;
58
59 memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
60 moxart_update_mac_address(ndev);
61
62 return 0;
63}
64
65static void moxart_mac_free_memory(struct net_device *ndev)
66{
67 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
68 int i;
69
70 for (i = 0; i < RX_DESC_NUM; i++)
71 dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
72 priv->rx_buf_size, DMA_FROM_DEVICE);
73
74 if (priv->tx_desc_base)
75 dma_free_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM,
76 priv->tx_desc_base, priv->tx_base);
77
78 if (priv->rx_desc_base)
79 dma_free_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM,
80 priv->rx_desc_base, priv->rx_base);
81
82 kfree(priv->tx_buf_base);
83 kfree(priv->rx_buf_base);
84}
85
86static void moxart_mac_reset(struct net_device *ndev)
87{
88 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
89
90 writel(SW_RST, priv->base + REG_MAC_CTRL);
91 while (readl(priv->base + REG_MAC_CTRL) & SW_RST)
92 mdelay(10);
93
94 writel(0, priv->base + REG_INTERRUPT_MASK);
95
96 priv->reg_maccr = RX_BROADPKT | FULLDUP | CRC_APD | RX_FTL;
97}
98
99static void moxart_mac_enable(struct net_device *ndev)
100{
101 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
102
103 writel(0x00001010, priv->base + REG_INT_TIMER_CTRL);
104 writel(0x00000001, priv->base + REG_APOLL_TIMER_CTRL);
105 writel(0x00000390, priv->base + REG_DMA_BLEN_CTRL);
106
107 priv->reg_imr |= (RPKT_FINISH_M | XPKT_FINISH_M);
108 writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
109
110 priv->reg_maccr |= (RCV_EN | XMT_EN | RDMA_EN | XDMA_EN);
111 writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
112}
113
114static void moxart_mac_setup_desc_ring(struct net_device *ndev)
115{
116 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
117 void __iomem *desc;
118 int i;
119
120 for (i = 0; i < TX_DESC_NUM; i++) {
121 desc = priv->tx_desc_base + i * TX_REG_DESC_SIZE;
122 memset(desc, 0, TX_REG_DESC_SIZE);
123
124 priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i;
125 }
126 writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
127
128 priv->tx_head = 0;
129 priv->tx_tail = 0;
130
131 for (i = 0; i < RX_DESC_NUM; i++) {
132 desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE;
133 memset(desc, 0, RX_REG_DESC_SIZE);
134 writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
135 writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
136 desc + RX_REG_OFFSET_DESC1);
137
138 priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
139 priv->rx_mapping[i] = dma_map_single(&ndev->dev,
140 priv->rx_buf[i],
141 priv->rx_buf_size,
142 DMA_FROM_DEVICE);
143 if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
144 netdev_err(ndev, "DMA mapping error\n");
145
146 writel(priv->rx_mapping[i],
147 desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS);
148 writel(priv->rx_buf[i],
149 desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT);
150 }
151 writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
152
153 priv->rx_head = 0;
154
155 /* reset the MAC controler TX/RX desciptor base address */
156 writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS);
157 writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS);
158}
159
160static int moxart_mac_open(struct net_device *ndev)
161{
162 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
163
164 if (!is_valid_ether_addr(ndev->dev_addr))
165 return -EADDRNOTAVAIL;
166
167 napi_enable(&priv->napi);
168
169 moxart_mac_reset(ndev);
170 moxart_update_mac_address(ndev);
171 moxart_mac_setup_desc_ring(ndev);
172 moxart_mac_enable(ndev);
173 netif_start_queue(ndev);
174
175 netdev_dbg(ndev, "%s: IMR=0x%x, MACCR=0x%x\n",
176 __func__, readl(priv->base + REG_INTERRUPT_MASK),
177 readl(priv->base + REG_MAC_CTRL));
178
179 return 0;
180}
181
182static int moxart_mac_stop(struct net_device *ndev)
183{
184 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
185
186 napi_disable(&priv->napi);
187
188 netif_stop_queue(ndev);
189
190 /* disable all interrupts */
191 writel(0, priv->base + REG_INTERRUPT_MASK);
192
193 /* disable all functions */
194 writel(0, priv->base + REG_MAC_CTRL);
195
196 return 0;
197}
198
199static int moxart_rx_poll(struct napi_struct *napi, int budget)
200{
201 struct moxart_mac_priv_t *priv = container_of(napi,
202 struct moxart_mac_priv_t,
203 napi);
204 struct net_device *ndev = priv->ndev;
205 struct sk_buff *skb;
206 void __iomem *desc;
207 unsigned int desc0, len;
208 int rx_head = priv->rx_head;
209 int rx = 0;
210
211 while (1) {
212 desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
213 desc0 = readl(desc + RX_REG_OFFSET_DESC0);
214
215 if (desc0 & RX_DESC0_DMA_OWN)
216 break;
217
218 if (desc0 & (RX_DESC0_ERR | RX_DESC0_CRC_ERR | RX_DESC0_FTL |
219 RX_DESC0_RUNT | RX_DESC0_ODD_NB)) {
220 net_dbg_ratelimited("packet error\n");
221 priv->stats.rx_dropped++;
222 priv->stats.rx_errors++;
223 continue;
224 }
225
226 len = desc0 & RX_DESC0_FRAME_LEN_MASK;
227
228 if (len > RX_BUF_SIZE)
229 len = RX_BUF_SIZE;
230
231 skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size);
232 if (unlikely(!skb)) {
233 net_dbg_ratelimited("build_skb failed\n");
234 priv->stats.rx_dropped++;
235 priv->stats.rx_errors++;
236 }
237
238 skb_put(skb, len);
239 skb->protocol = eth_type_trans(skb, ndev);
240 napi_gro_receive(&priv->napi, skb);
241 rx++;
242
243 ndev->last_rx = jiffies;
244 priv->stats.rx_packets++;
245 priv->stats.rx_bytes += len;
246 if (desc0 & RX_DESC0_MULTICAST)
247 priv->stats.multicast++;
248
249 writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
250
251 rx_head = RX_NEXT(rx_head);
252 priv->rx_head = rx_head;
253
254 if (rx >= budget)
255 break;
256 }
257
258 if (rx < budget) {
259 napi_gro_flush(napi, false);
260 __napi_complete(napi);
261 }
262
263 priv->reg_imr |= RPKT_FINISH_M;
264 writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
265
266 return rx;
267}
268
269static void moxart_tx_finished(struct net_device *ndev)
270{
271 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
272 unsigned tx_head = priv->tx_head;
273 unsigned tx_tail = priv->tx_tail;
274
275 while (tx_tail != tx_head) {
276 dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
277 priv->tx_len[tx_tail], DMA_TO_DEVICE);
278
279 priv->stats.tx_packets++;
280 priv->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
281
282 dev_kfree_skb_irq(priv->tx_skb[tx_tail]);
283 priv->tx_skb[tx_tail] = NULL;
284
285 tx_tail = TX_NEXT(tx_tail);
286 }
287 priv->tx_tail = tx_tail;
288}
289
290static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
291{
292 struct net_device *ndev = (struct net_device *) dev_id;
293 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
294 unsigned int ists = readl(priv->base + REG_INTERRUPT_STATUS);
295
296 if (ists & XPKT_OK_INT_STS)
297 moxart_tx_finished(ndev);
298
299 if (ists & RPKT_FINISH) {
300 if (napi_schedule_prep(&priv->napi)) {
301 priv->reg_imr &= ~RPKT_FINISH_M;
302 writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
303 __napi_schedule(&priv->napi);
304 }
305 }
306
307 return IRQ_HANDLED;
308}
309
310static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
311{
312 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
313 void __iomem *desc;
314 unsigned int len;
315 unsigned int tx_head = priv->tx_head;
316 u32 txdes1;
317 int ret = NETDEV_TX_BUSY;
318
319 desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
320
321 spin_lock_irq(&priv->txlock);
322 if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
323 net_dbg_ratelimited("no TX space for packet\n");
324 priv->stats.tx_dropped++;
325 goto out_unlock;
326 }
327
328 len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
329
330 priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
331 len, DMA_TO_DEVICE);
332 if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
333 netdev_err(ndev, "DMA mapping error\n");
334 goto out_unlock;
335 }
336
337 priv->tx_len[tx_head] = len;
338 priv->tx_skb[tx_head] = skb;
339
340 writel(priv->tx_mapping[tx_head],
341 desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS);
342 writel(skb->data,
343 desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT);
344
345 if (skb->len < ETH_ZLEN) {
346 memset(&skb->data[skb->len],
347 0, ETH_ZLEN - skb->len);
348 len = ETH_ZLEN;
349 }
350
351 txdes1 = readl(desc + TX_REG_OFFSET_DESC1);
352 txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS;
353 txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE);
354 txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK);
355 writel(txdes1, desc + TX_REG_OFFSET_DESC1);
356 writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
357
358 /* start to send packet */
359 writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND);
360
361 priv->tx_head = TX_NEXT(tx_head);
362
363 ndev->trans_start = jiffies;
364 ret = NETDEV_TX_OK;
365out_unlock:
366 spin_unlock_irq(&priv->txlock);
367
368 return ret;
369}
370
371static struct net_device_stats *moxart_mac_get_stats(struct net_device *ndev)
372{
373 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
374
375 return &priv->stats;
376}
377
378static void moxart_mac_setmulticast(struct net_device *ndev)
379{
380 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
381 struct netdev_hw_addr *ha;
382 int crc_val;
383
384 netdev_for_each_mc_addr(ha, ndev) {
385 crc_val = crc32_le(~0, ha->addr, ETH_ALEN);
386 crc_val = (crc_val >> 26) & 0x3f;
387 if (crc_val >= 32) {
388 writel(readl(priv->base + REG_MCAST_HASH_TABLE1) |
389 (1UL << (crc_val - 32)),
390 priv->base + REG_MCAST_HASH_TABLE1);
391 } else {
392 writel(readl(priv->base + REG_MCAST_HASH_TABLE0) |
393 (1UL << crc_val),
394 priv->base + REG_MCAST_HASH_TABLE0);
395 }
396 }
397}
398
399static void moxart_mac_set_rx_mode(struct net_device *ndev)
400{
401 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
402
403 spin_lock_irq(&priv->txlock);
404
405 (ndev->flags & IFF_PROMISC) ? (priv->reg_maccr |= RCV_ALL) :
406 (priv->reg_maccr &= ~RCV_ALL);
407
408 (ndev->flags & IFF_ALLMULTI) ? (priv->reg_maccr |= RX_MULTIPKT) :
409 (priv->reg_maccr &= ~RX_MULTIPKT);
410
411 if ((ndev->flags & IFF_MULTICAST) && netdev_mc_count(ndev)) {
412 priv->reg_maccr |= HT_MULTI_EN;
413 moxart_mac_setmulticast(ndev);
414 } else {
415 priv->reg_maccr &= ~HT_MULTI_EN;
416 }
417
418 writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
419
420 spin_unlock_irq(&priv->txlock);
421}
422
423static struct net_device_ops moxart_netdev_ops = {
424 .ndo_open = moxart_mac_open,
425 .ndo_stop = moxart_mac_stop,
426 .ndo_start_xmit = moxart_mac_start_xmit,
427 .ndo_get_stats = moxart_mac_get_stats,
428 .ndo_set_rx_mode = moxart_mac_set_rx_mode,
429 .ndo_set_mac_address = moxart_set_mac_address,
430 .ndo_validate_addr = eth_validate_addr,
431 .ndo_change_mtu = eth_change_mtu,
432};
433
434static int moxart_mac_probe(struct platform_device *pdev)
435{
436 struct device *p_dev = &pdev->dev;
437 struct device_node *node = p_dev->of_node;
438 struct net_device *ndev;
439 struct moxart_mac_priv_t *priv;
440 struct resource *res;
441 unsigned int irq;
442 int ret;
443
444 ndev = alloc_etherdev(sizeof(struct moxart_mac_priv_t));
445 if (!ndev)
446 return -ENOMEM;
447
448 irq = irq_of_parse_and_map(node, 0);
449 if (irq <= 0) {
450 netdev_err(ndev, "irq_of_parse_and_map failed\n");
451 return -EINVAL;
452 }
453
454 priv = netdev_priv(ndev);
455 priv->ndev = ndev;
456
457 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
458 ndev->base_addr = res->start;
459 priv->base = devm_ioremap_resource(p_dev, res);
460 ret = IS_ERR(priv->base);
461 if (ret) {
462 dev_err(p_dev, "devm_ioremap_resource failed\n");
463 goto init_fail;
464 }
465
466 spin_lock_init(&priv->txlock);
467
468 priv->tx_buf_size = TX_BUF_SIZE;
469 priv->rx_buf_size = RX_BUF_SIZE +
470 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
471
472 priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
473 TX_DESC_NUM, &priv->tx_base,
474 GFP_DMA | GFP_KERNEL);
475 if (priv->tx_desc_base == NULL)
476 goto init_fail;
477
478 priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
479 RX_DESC_NUM, &priv->rx_base,
480 GFP_DMA | GFP_KERNEL);
481 if (priv->rx_desc_base == NULL)
482 goto init_fail;
483
484 priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
485 GFP_ATOMIC);
486 if (!priv->tx_buf_base)
487 goto init_fail;
488
489 priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
490 GFP_ATOMIC);
491 if (!priv->rx_buf_base)
492 goto init_fail;
493
494 platform_set_drvdata(pdev, ndev);
495
496 ret = devm_request_irq(p_dev, irq, moxart_mac_interrupt, 0,
497 pdev->name, ndev);
498 if (ret) {
499 netdev_err(ndev, "devm_request_irq failed\n");
500 goto init_fail;
501 }
502
503 ether_setup(ndev);
504 ndev->netdev_ops = &moxart_netdev_ops;
505 netif_napi_add(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM);
506 ndev->priv_flags |= IFF_UNICAST_FLT;
507 ndev->irq = irq;
508
509 SET_NETDEV_DEV(ndev, &pdev->dev);
510
511 ret = register_netdev(ndev);
512 if (ret) {
513 free_netdev(ndev);
514 goto init_fail;
515 }
516
517 netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n",
518 __func__, ndev->irq, ndev->dev_addr);
519
520 return 0;
521
522init_fail:
523 netdev_err(ndev, "init failed\n");
524 moxart_mac_free_memory(ndev);
525
526 return ret;
527}
528
529static int moxart_remove(struct platform_device *pdev)
530{
531 struct net_device *ndev = platform_get_drvdata(pdev);
532
533 unregister_netdev(ndev);
534 free_irq(ndev->irq, ndev);
535 moxart_mac_free_memory(ndev);
536 free_netdev(ndev);
537
538 return 0;
539}
540
541static const struct of_device_id moxart_mac_match[] = {
542 { .compatible = "moxa,moxart-mac" },
543 { }
544};
545
546struct __initdata platform_driver moxart_mac_driver = {
547 .probe = moxart_mac_probe,
548 .remove = moxart_remove,
549 .driver = {
550 .name = "moxart-ethernet",
551 .owner = THIS_MODULE,
552 .of_match_table = moxart_mac_match,
553 },
554};
555module_platform_driver(moxart_mac_driver);
556
557MODULE_DESCRIPTION("MOXART RTL8201CP Ethernet driver");
558MODULE_LICENSE("GPL v2");
559MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
new file mode 100644
index 000000000000..2be9280d608c
--- /dev/null
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -0,0 +1,330 @@
1/* MOXA ART Ethernet (RTL8201CP) driver.
2 *
3 * Copyright (C) 2013 Jonas Jensen
4 *
5 * Jonas Jensen <jonas.jensen@gmail.com>
6 *
7 * Based on code from
8 * Moxa Technology Co., Ltd. <www.moxa.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 */
14
15#ifndef _MOXART_ETHERNET_H
16#define _MOXART_ETHERNET_H
17
18#define TX_REG_OFFSET_DESC0 0
19#define TX_REG_OFFSET_DESC1 4
20#define TX_REG_OFFSET_DESC2 8
21#define TX_REG_DESC_SIZE 16
22
23#define RX_REG_OFFSET_DESC0 0
24#define RX_REG_OFFSET_DESC1 4
25#define RX_REG_OFFSET_DESC2 8
26#define RX_REG_DESC_SIZE 16
27
28#define TX_DESC0_PKT_LATE_COL 0x1 /* abort, late collision */
29#define TX_DESC0_RX_PKT_EXS_COL 0x2 /* abort, >16 collisions */
30#define TX_DESC0_DMA_OWN 0x80000000 /* owned by controller */
31#define TX_DESC1_BUF_SIZE_MASK 0x7ff
32#define TX_DESC1_LTS 0x8000000 /* last TX packet */
33#define TX_DESC1_FTS 0x10000000 /* first TX packet */
34#define TX_DESC1_FIFO_COMPLETE 0x20000000
35#define TX_DESC1_INTR_COMPLETE 0x40000000
36#define TX_DESC1_END 0x80000000
37#define TX_DESC2_ADDRESS_PHYS 0
38#define TX_DESC2_ADDRESS_VIRT 4
39
40#define RX_DESC0_FRAME_LEN 0
41#define RX_DESC0_FRAME_LEN_MASK 0x7FF
42#define RX_DESC0_MULTICAST 0x10000
43#define RX_DESC0_BROADCAST 0x20000
44#define RX_DESC0_ERR 0x40000
45#define RX_DESC0_CRC_ERR 0x80000
46#define RX_DESC0_FTL 0x100000
47#define RX_DESC0_RUNT 0x200000 /* packet less than 64 bytes */
48#define RX_DESC0_ODD_NB 0x400000 /* receive odd nibbles */
49#define RX_DESC0_LRS 0x10000000 /* last receive segment */
50#define RX_DESC0_FRS 0x20000000 /* first receive segment */
51#define RX_DESC0_DMA_OWN 0x80000000
52#define RX_DESC1_BUF_SIZE_MASK 0x7FF
53#define RX_DESC1_END 0x80000000
54#define RX_DESC2_ADDRESS_PHYS 0
55#define RX_DESC2_ADDRESS_VIRT 4
56
57#define TX_DESC_NUM 64
58#define TX_DESC_NUM_MASK (TX_DESC_NUM-1)
59#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK))
60#define TX_BUF_SIZE 1600
61#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1)
62
63#define RX_DESC_NUM 64
64#define RX_DESC_NUM_MASK (RX_DESC_NUM-1)
65#define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM_MASK))
66#define RX_BUF_SIZE 1600
67#define RX_BUF_SIZE_MAX (RX_DESC1_BUF_SIZE_MASK+1)
68
69#define REG_INTERRUPT_STATUS 0
70#define REG_INTERRUPT_MASK 4
71#define REG_MAC_MS_ADDRESS 8
72#define REG_MAC_LS_ADDRESS 12
73#define REG_MCAST_HASH_TABLE0 16
74#define REG_MCAST_HASH_TABLE1 20
75#define REG_TX_POLL_DEMAND 24
76#define REG_RX_POLL_DEMAND 28
77#define REG_TXR_BASE_ADDRESS 32
78#define REG_RXR_BASE_ADDRESS 36
79#define REG_INT_TIMER_CTRL 40
80#define REG_APOLL_TIMER_CTRL 44
81#define REG_DMA_BLEN_CTRL 48
82#define REG_RESERVED1 52
83#define REG_MAC_CTRL 136
84#define REG_MAC_STATUS 140
85#define REG_PHY_CTRL 144
86#define REG_PHY_WRITE_DATA 148
87#define REG_FLOW_CTRL 152
88#define REG_BACK_PRESSURE 156
89#define REG_RESERVED2 160
90#define REG_TEST_SEED 196
91#define REG_DMA_FIFO_STATE 200
92#define REG_TEST_MODE 204
93#define REG_RESERVED3 208
94#define REG_TX_COL_COUNTER 212
95#define REG_RPF_AEP_COUNTER 216
96#define REG_XM_PG_COUNTER 220
97#define REG_RUNT_TLC_COUNTER 224
98#define REG_CRC_FTL_COUNTER 228
99#define REG_RLC_RCC_COUNTER 232
100#define REG_BROC_COUNTER 236
101#define REG_MULCA_COUNTER 240
102#define REG_RP_COUNTER 244
103#define REG_XP_COUNTER 248
104
105#define REG_PHY_CTRL_OFFSET 0x0
106#define REG_PHY_STATUS 0x1
107#define REG_PHY_ID1 0x2
108#define REG_PHY_ID2 0x3
109#define REG_PHY_ANA 0x4
110#define REG_PHY_ANLPAR 0x5
111#define REG_PHY_ANE 0x6
112#define REG_PHY_ECTRL1 0x10
113#define REG_PHY_QPDS 0x11
114#define REG_PHY_10BOP 0x12
115#define REG_PHY_ECTRL2 0x13
116#define REG_PHY_FTMAC100_WRITE 0x8000000
117#define REG_PHY_FTMAC100_READ 0x4000000
118
119/* REG_INTERRUPT_STATUS */
120#define RPKT_FINISH BIT(0) /* DMA data received */
121#define NORXBUF BIT(1) /* receive buffer unavailable */
122#define XPKT_FINISH BIT(2) /* DMA moved data to TX FIFO */
123#define NOTXBUF BIT(3) /* transmit buffer unavailable */
124#define XPKT_OK_INT_STS BIT(4) /* transmit to ethernet success */
125#define XPKT_LOST_INT_STS BIT(5) /* transmit ethernet lost (collision) */
126#define RPKT_SAV BIT(6) /* FIFO receive success */
127#define RPKT_LOST_INT_STS BIT(7) /* FIFO full, receive failed */
128#define AHB_ERR BIT(8) /* AHB error */
129#define PHYSTS_CHG BIT(9) /* PHY link status change */
130
131/* REG_INTERRUPT_MASK */
132#define RPKT_FINISH_M BIT(0)
133#define NORXBUF_M BIT(1)
134#define XPKT_FINISH_M BIT(2)
135#define NOTXBUF_M BIT(3)
136#define XPKT_OK_M BIT(4)
137#define XPKT_LOST_M BIT(5)
138#define RPKT_SAV_M BIT(6)
139#define RPKT_LOST_M BIT(7)
140#define AHB_ERR_M BIT(8)
141#define PHYSTS_CHG_M BIT(9)
142
143/* REG_MAC_MS_ADDRESS */
144#define MAC_MADR_MASK 0xffff /* 2 MSB MAC address */
145
146/* REG_INT_TIMER_CTRL */
147#define TXINT_TIME_SEL BIT(15) /* TX cycle time period */
148#define TXINT_THR_MASK 0x7000
149#define TXINT_CNT_MASK 0xf00
150#define RXINT_TIME_SEL BIT(7) /* RX cycle time period */
151#define RXINT_THR_MASK 0x70
152#define RXINT_CNT_MASK 0xF
153
154/* REG_APOLL_TIMER_CTRL */
155#define TXPOLL_TIME_SEL BIT(12) /* TX poll time period */
156#define TXPOLL_CNT_MASK 0xf00
157#define TXPOLL_CNT_SHIFT_BIT 8
158#define RXPOLL_TIME_SEL BIT(4) /* RX poll time period */
159#define RXPOLL_CNT_MASK 0xF
160#define RXPOLL_CNT_SHIFT_BIT 0
161
162/* REG_DMA_BLEN_CTRL */
163#define RX_THR_EN BIT(9) /* RX FIFO threshold arbitration */
164#define RXFIFO_HTHR_MASK 0x1c0
165#define RXFIFO_LTHR_MASK 0x38
166#define INCR16_EN BIT(2) /* AHB bus INCR16 burst command */
167#define INCR8_EN BIT(1) /* AHB bus INCR8 burst command */
168#define INCR4_EN BIT(0) /* AHB bus INCR4 burst command */
169
170/* REG_MAC_CTRL */
171#define RX_BROADPKT BIT(17) /* receive broadcast packets */
172#define RX_MULTIPKT BIT(16) /* receive all multicast packets */
173#define FULLDUP BIT(15) /* full duplex */
174#define CRC_APD BIT(14) /* append CRC to transmitted packet */
175#define RCV_ALL BIT(12) /* ignore incoming packet destination */
176#define RX_FTL BIT(11) /* accept packets larger than 1518 B */
177#define RX_RUNT BIT(10) /* accept packets smaller than 64 B */
178#define HT_MULTI_EN BIT(9) /* accept on hash and mcast pass */
179#define RCV_EN BIT(8) /* receiver enable */
180#define ENRX_IN_HALFTX BIT(6) /* enable receive in half duplex mode */
181#define XMT_EN BIT(5) /* transmit enable */
182#define CRC_DIS BIT(4) /* disable CRC check when receiving */
183#define LOOP_EN BIT(3) /* internal loop-back */
184#define SW_RST BIT(2) /* software reset, last 64 AHB clocks */
185#define RDMA_EN BIT(1) /* enable receive DMA chan */
186#define XDMA_EN BIT(0) /* enable transmit DMA chan */
187
188/* REG_MAC_STATUS */
189#define COL_EXCEED BIT(11) /* more than 16 collisions */
190#define LATE_COL BIT(10) /* transmit late collision detected */
191#define XPKT_LOST BIT(9) /* transmit to ethernet lost */
192#define XPKT_OK BIT(8) /* transmit to ethernet success */
193#define RUNT_MAC_STS BIT(7) /* receive runt detected */
194#define FTL_MAC_STS BIT(6) /* receive frame too long detected */
195#define CRC_ERR_MAC_STS BIT(5)
196#define RPKT_LOST BIT(4) /* RX FIFO full, receive failed */
197#define RPKT_SAVE BIT(3) /* RX FIFO receive success */
198#define COL BIT(2) /* collision, incoming packet dropped */
199#define MCPU_BROADCAST BIT(1)
200#define MCPU_MULTICAST BIT(0)
201
202/* REG_PHY_CTRL */
203#define MIIWR BIT(27) /* init write sequence (auto cleared)*/
204#define MIIRD BIT(26)
205#define REGAD_MASK 0x3e00000
206#define PHYAD_MASK 0x1f0000
207#define MIIRDATA_MASK 0xffff
208
209/* REG_PHY_WRITE_DATA */
210#define MIIWDATA_MASK 0xffff
211
212/* REG_FLOW_CTRL */
213#define PAUSE_TIME_MASK 0xffff0000
214#define FC_HIGH_MASK 0xf000
215#define FC_LOW_MASK 0xf00
216#define RX_PAUSE BIT(4) /* receive pause frame */
217#define TX_PAUSED BIT(3) /* transmit pause due to receive */
218#define FCTHR_EN BIT(2) /* enable threshold mode. */
219#define TX_PAUSE BIT(1) /* transmit pause frame */
220#define FC_EN BIT(0) /* flow control mode enable */
221
222/* REG_BACK_PRESSURE */
223#define BACKP_LOW_MASK 0xf00
224#define BACKP_JAM_LEN_MASK 0xf0
225#define BACKP_MODE BIT(1) /* address mode */
226#define BACKP_ENABLE BIT(0)
227
228/* REG_TEST_SEED */
229#define TEST_SEED_MASK 0x3fff
230
231/* REG_DMA_FIFO_STATE */
232#define TX_DMA_REQUEST BIT(31)
233#define RX_DMA_REQUEST BIT(30)
234#define TX_DMA_GRANT BIT(29)
235#define RX_DMA_GRANT BIT(28)
236#define TX_FIFO_EMPTY BIT(27)
237#define RX_FIFO_EMPTY BIT(26)
238#define TX_DMA2_SM_MASK 0x7000
239#define TX_DMA1_SM_MASK 0xf00
240#define RX_DMA2_SM_MASK 0x70
241#define RX_DMA1_SM_MASK 0xF
242
243/* REG_TEST_MODE */
244#define SINGLE_PKT BIT(26) /* single packet mode */
245#define PTIMER_TEST BIT(25) /* automatic polling timer test mode */
246#define ITIMER_TEST BIT(24) /* interrupt timer test mode */
247#define TEST_SEED_SELECT BIT(22)
248#define SEED_SELECT BIT(21)
249#define TEST_MODE BIT(20)
250#define TEST_TIME_MASK 0xffc00
251#define TEST_EXCEL_MASK 0x3e0
252
253/* REG_TX_COL_COUNTER */
254#define TX_MCOL_MASK 0xffff0000
255#define TX_MCOL_SHIFT_BIT 16
256#define TX_SCOL_MASK 0xffff
257#define TX_SCOL_SHIFT_BIT 0
258
259/* REG_RPF_AEP_COUNTER */
260#define RPF_MASK 0xffff0000
261#define RPF_SHIFT_BIT 16
262#define AEP_MASK 0xffff
263#define AEP_SHIFT_BIT 0
264
265/* REG_XM_PG_COUNTER */
266#define XM_MASK 0xffff0000
267#define XM_SHIFT_BIT 16
268#define PG_MASK 0xffff
269#define PG_SHIFT_BIT 0
270
271/* REG_RUNT_TLC_COUNTER */
272#define RUNT_CNT_MASK 0xffff0000
273#define RUNT_CNT_SHIFT_BIT 16
274#define TLCC_MASK 0xffff
275#define TLCC_SHIFT_BIT 0
276
277/* REG_CRC_FTL_COUNTER */
278#define CRCER_CNT_MASK 0xffff0000
279#define CRCER_CNT_SHIFT_BIT 16
280#define FTL_CNT_MASK 0xffff
281#define FTL_CNT_SHIFT_BIT 0
282
283/* REG_RLC_RCC_COUNTER */
284#define RLC_MASK 0xffff0000
285#define RLC_SHIFT_BIT 16
286#define RCC_MASK 0xffff
287#define RCC_SHIFT_BIT 0
288
289/* REG_PHY_STATUS */
290#define AN_COMPLETE 0x20
291#define LINK_STATUS 0x4
292
293struct moxart_mac_priv_t {
294 void __iomem *base;
295 struct net_device_stats stats;
296 unsigned int reg_maccr;
297 unsigned int reg_imr;
298 struct napi_struct napi;
299 struct net_device *ndev;
300
301 dma_addr_t rx_base;
302 dma_addr_t rx_mapping[RX_DESC_NUM];
303 void __iomem *rx_desc_base;
304 unsigned char *rx_buf_base;
305 unsigned char *rx_buf[RX_DESC_NUM];
306 unsigned int rx_head;
307 unsigned int rx_buf_size;
308
309 dma_addr_t tx_base;
310 dma_addr_t tx_mapping[TX_DESC_NUM];
311 void __iomem *tx_desc_base;
312 unsigned char *tx_buf_base;
313 unsigned char *tx_buf[RX_DESC_NUM];
314 unsigned int tx_head;
315 unsigned int tx_buf_size;
316
317 spinlock_t txlock;
318 unsigned int tx_len[TX_DESC_NUM];
319 struct sk_buff *tx_skb[TX_DESC_NUM];
320 unsigned int tx_tail;
321};
322
323#if TX_BUF_SIZE >= TX_BUF_SIZE_MAX
324#error MOXA ART Ethernet device driver TX buffer is too large!
325#endif
326#if RX_BUF_SIZE >= RX_BUF_SIZE_MAX
327#error MOXA ART Ethernet device driver RX buffer is too large!
328#endif
329
330#endif
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 967bae8b85c5..149355b52ad0 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -74,6 +74,7 @@
74#ifdef CONFIG_MTRR 74#ifdef CONFIG_MTRR
75#include <asm/mtrr.h> 75#include <asm/mtrr.h>
76#endif 76#endif
77#include <net/busy_poll.h>
77 78
78#include "myri10ge_mcp.h" 79#include "myri10ge_mcp.h"
79#include "myri10ge_mcp_gen_header.h" 80#include "myri10ge_mcp_gen_header.h"
@@ -194,6 +195,21 @@ struct myri10ge_slice_state {
194 int cpu; 195 int cpu;
195 __be32 __iomem *dca_tag; 196 __be32 __iomem *dca_tag;
196#endif 197#endif
198#ifdef CONFIG_NET_RX_BUSY_POLL
199 unsigned int state;
200#define SLICE_STATE_IDLE 0
201#define SLICE_STATE_NAPI 1 /* NAPI owns this slice */
202#define SLICE_STATE_POLL 2 /* poll owns this slice */
203#define SLICE_LOCKED (SLICE_STATE_NAPI | SLICE_STATE_POLL)
204#define SLICE_STATE_NAPI_YIELD 4 /* NAPI yielded this slice */
205#define SLICE_STATE_POLL_YIELD 8 /* poll yielded this slice */
206#define SLICE_USER_PEND (SLICE_STATE_POLL | SLICE_STATE_POLL_YIELD)
207 spinlock_t lock;
208 unsigned long lock_napi_yield;
209 unsigned long lock_poll_yield;
210 unsigned long busy_poll_miss;
211 unsigned long busy_poll_cnt;
212#endif /* CONFIG_NET_RX_BUSY_POLL */
197 char irq_desc[32]; 213 char irq_desc[32];
198}; 214};
199 215
@@ -244,7 +260,7 @@ struct myri10ge_priv {
244 int fw_ver_minor; 260 int fw_ver_minor;
245 int fw_ver_tiny; 261 int fw_ver_tiny;
246 int adopted_rx_filter_bug; 262 int adopted_rx_filter_bug;
247 u8 mac_addr[6]; /* eeprom mac address */ 263 u8 mac_addr[ETH_ALEN]; /* eeprom mac address */
248 unsigned long serial_number; 264 unsigned long serial_number;
249 int vendor_specific_offset; 265 int vendor_specific_offset;
250 int fw_multicast_support; 266 int fw_multicast_support;
@@ -909,6 +925,92 @@ abort:
909 return status; 925 return status;
910} 926}
911 927
928#ifdef CONFIG_NET_RX_BUSY_POLL
929static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
930{
931 spin_lock_init(&ss->lock);
932 ss->state = SLICE_STATE_IDLE;
933}
934
935static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
936{
937 int rc = true;
938 spin_lock(&ss->lock);
939 if ((ss->state & SLICE_LOCKED)) {
940 WARN_ON((ss->state & SLICE_STATE_NAPI));
941 ss->state |= SLICE_STATE_NAPI_YIELD;
942 rc = false;
943 ss->lock_napi_yield++;
944 } else
945 ss->state = SLICE_STATE_NAPI;
946 spin_unlock(&ss->lock);
947 return rc;
948}
949
950static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
951{
952 spin_lock(&ss->lock);
953 WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD)));
954 ss->state = SLICE_STATE_IDLE;
955 spin_unlock(&ss->lock);
956}
957
958static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
959{
960 int rc = true;
961 spin_lock_bh(&ss->lock);
962 if ((ss->state & SLICE_LOCKED)) {
963 ss->state |= SLICE_STATE_POLL_YIELD;
964 rc = false;
965 ss->lock_poll_yield++;
966 } else
967 ss->state |= SLICE_STATE_POLL;
968 spin_unlock_bh(&ss->lock);
969 return rc;
970}
971
972static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
973{
974 spin_lock_bh(&ss->lock);
975 WARN_ON((ss->state & SLICE_STATE_NAPI));
976 ss->state = SLICE_STATE_IDLE;
977 spin_unlock_bh(&ss->lock);
978}
979
980static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
981{
982 WARN_ON(!(ss->state & SLICE_LOCKED));
983 return (ss->state & SLICE_USER_PEND);
984}
985#else /* CONFIG_NET_RX_BUSY_POLL */
986static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
987{
988}
989
990static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
991{
992 return false;
993}
994
995static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
996{
997}
998
999static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
1000{
1001 return false;
1002}
1003
1004static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
1005{
1006}
1007
1008static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
1009{
1010 return false;
1011}
1012#endif
1013
912static int myri10ge_reset(struct myri10ge_priv *mgp) 1014static int myri10ge_reset(struct myri10ge_priv *mgp)
913{ 1015{
914 struct myri10ge_cmd cmd; 1016 struct myri10ge_cmd cmd;
@@ -1300,6 +1402,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
1300 } 1402 }
1301} 1403}
1302 1404
1405#define MYRI10GE_HLEN 64 /* Bytes to copy from page to skb linear memory */
1406
1303static inline int 1407static inline int
1304myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) 1408myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
1305{ 1409{
@@ -1311,6 +1415,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
1311 struct pci_dev *pdev = mgp->pdev; 1415 struct pci_dev *pdev = mgp->pdev;
1312 struct net_device *dev = mgp->dev; 1416 struct net_device *dev = mgp->dev;
1313 u8 *va; 1417 u8 *va;
1418 bool polling;
1314 1419
1315 if (len <= mgp->small_bytes) { 1420 if (len <= mgp->small_bytes) {
1316 rx = &ss->rx_small; 1421 rx = &ss->rx_small;
@@ -1325,7 +1430,15 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
1325 va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; 1430 va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
1326 prefetch(va); 1431 prefetch(va);
1327 1432
1328 skb = napi_get_frags(&ss->napi); 1433 /* When busy polling in user context, allocate skb and copy headers to
1434 * skb's linear memory ourselves. When not busy polling, use the napi
1435 * gro api.
1436 */
1437 polling = myri10ge_ss_busy_polling(ss);
1438 if (polling)
1439 skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
1440 else
1441 skb = napi_get_frags(&ss->napi);
1329 if (unlikely(skb == NULL)) { 1442 if (unlikely(skb == NULL)) {
1330 ss->stats.rx_dropped++; 1443 ss->stats.rx_dropped++;
1331 for (i = 0, remainder = len; remainder > 0; i++) { 1444 for (i = 0, remainder = len; remainder > 0; i++) {
@@ -1364,8 +1477,29 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
1364 } 1477 }
1365 myri10ge_vlan_rx(mgp->dev, va, skb); 1478 myri10ge_vlan_rx(mgp->dev, va, skb);
1366 skb_record_rx_queue(skb, ss - &mgp->ss[0]); 1479 skb_record_rx_queue(skb, ss - &mgp->ss[0]);
1480 skb_mark_napi_id(skb, &ss->napi);
1481
1482 if (polling) {
1483 int hlen;
1484
1485 /* myri10ge_vlan_rx might have moved the header, so compute
1486 * length and address again.
1487 */
1488 hlen = MYRI10GE_HLEN > skb->len ? skb->len : MYRI10GE_HLEN;
1489 va = page_address(skb_frag_page(&rx_frags[0])) +
1490 rx_frags[0].page_offset;
1491 /* Copy header into the skb linear memory */
1492 skb_copy_to_linear_data(skb, va, hlen);
1493 rx_frags[0].page_offset += hlen;
1494 rx_frags[0].size -= hlen;
1495 skb->data_len -= hlen;
1496 skb->tail += hlen;
1497 skb->protocol = eth_type_trans(skb, dev);
1498 netif_receive_skb(skb);
1499 }
1500 else
1501 napi_gro_frags(&ss->napi);
1367 1502
1368 napi_gro_frags(&ss->napi);
1369 return 1; 1503 return 1;
1370} 1504}
1371 1505
@@ -1524,10 +1658,14 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
1524 if (ss->mgp->dca_enabled) 1658 if (ss->mgp->dca_enabled)
1525 myri10ge_update_dca(ss); 1659 myri10ge_update_dca(ss);
1526#endif 1660#endif
1661 /* Try later if the busy_poll handler is running. */
1662 if (!myri10ge_ss_lock_napi(ss))
1663 return budget;
1527 1664
1528 /* process as many rx events as NAPI will allow */ 1665 /* process as many rx events as NAPI will allow */
1529 work_done = myri10ge_clean_rx_done(ss, budget); 1666 work_done = myri10ge_clean_rx_done(ss, budget);
1530 1667
1668 myri10ge_ss_unlock_napi(ss);
1531 if (work_done < budget) { 1669 if (work_done < budget) {
1532 napi_complete(napi); 1670 napi_complete(napi);
1533 put_be32(htonl(3), ss->irq_claim); 1671 put_be32(htonl(3), ss->irq_claim);
@@ -1535,6 +1673,34 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
1535 return work_done; 1673 return work_done;
1536} 1674}
1537 1675
1676#ifdef CONFIG_NET_RX_BUSY_POLL
1677static int myri10ge_busy_poll(struct napi_struct *napi)
1678{
1679 struct myri10ge_slice_state *ss =
1680 container_of(napi, struct myri10ge_slice_state, napi);
1681 struct myri10ge_priv *mgp = ss->mgp;
1682 int work_done;
1683
1684 /* Poll only when the link is up */
1685 if (mgp->link_state != MXGEFW_LINK_UP)
1686 return LL_FLUSH_FAILED;
1687
1688 if (!myri10ge_ss_lock_poll(ss))
1689 return LL_FLUSH_BUSY;
1690
1691 /* Process a small number of packets */
1692 work_done = myri10ge_clean_rx_done(ss, 4);
1693 if (work_done)
1694 ss->busy_poll_cnt += work_done;
1695 else
1696 ss->busy_poll_miss++;
1697
1698 myri10ge_ss_unlock_poll(ss);
1699
1700 return work_done;
1701}
1702#endif /* CONFIG_NET_RX_BUSY_POLL */
1703
1538static irqreturn_t myri10ge_intr(int irq, void *arg) 1704static irqreturn_t myri10ge_intr(int irq, void *arg)
1539{ 1705{
1540 struct myri10ge_slice_state *ss = arg; 1706 struct myri10ge_slice_state *ss = arg;
@@ -1742,6 +1908,10 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
1742 "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done", 1908 "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
1743 "rx_small_cnt", "rx_big_cnt", 1909 "rx_small_cnt", "rx_big_cnt",
1744 "wake_queue", "stop_queue", "tx_linearized", 1910 "wake_queue", "stop_queue", "tx_linearized",
1911#ifdef CONFIG_NET_RX_BUSY_POLL
1912 "rx_lock_napi_yield", "rx_lock_poll_yield", "rx_busy_poll_miss",
1913 "rx_busy_poll_cnt",
1914#endif
1745}; 1915};
1746 1916
1747#define MYRI10GE_NET_STATS_LEN 21 1917#define MYRI10GE_NET_STATS_LEN 21
@@ -1842,6 +2012,12 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1842 data[i++] = (unsigned int)ss->tx.wake_queue; 2012 data[i++] = (unsigned int)ss->tx.wake_queue;
1843 data[i++] = (unsigned int)ss->tx.stop_queue; 2013 data[i++] = (unsigned int)ss->tx.stop_queue;
1844 data[i++] = (unsigned int)ss->tx.linearized; 2014 data[i++] = (unsigned int)ss->tx.linearized;
2015#ifdef CONFIG_NET_RX_BUSY_POLL
2016 data[i++] = ss->lock_napi_yield;
2017 data[i++] = ss->lock_poll_yield;
2018 data[i++] = ss->busy_poll_miss;
2019 data[i++] = ss->busy_poll_cnt;
2020#endif
1845 } 2021 }
1846} 2022}
1847 2023
@@ -2405,6 +2581,9 @@ static int myri10ge_open(struct net_device *dev)
2405 goto abort_with_rings; 2581 goto abort_with_rings;
2406 } 2582 }
2407 2583
2584 /* Initialize the slice spinlock and state used for polling */
2585 myri10ge_ss_init_lock(ss);
2586
2408 /* must happen prior to any irq */ 2587 /* must happen prior to any irq */
2409 napi_enable(&(ss)->napi); 2588 napi_enable(&(ss)->napi);
2410 } 2589 }
@@ -2481,9 +2660,19 @@ static int myri10ge_close(struct net_device *dev)
2481 2660
2482 del_timer_sync(&mgp->watchdog_timer); 2661 del_timer_sync(&mgp->watchdog_timer);
2483 mgp->running = MYRI10GE_ETH_STOPPING; 2662 mgp->running = MYRI10GE_ETH_STOPPING;
2663 local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */
2484 for (i = 0; i < mgp->num_slices; i++) { 2664 for (i = 0; i < mgp->num_slices; i++) {
2485 napi_disable(&mgp->ss[i].napi); 2665 napi_disable(&mgp->ss[i].napi);
2666 /* Lock the slice to prevent the busy_poll handler from
2667 * accessing it. Later when we bring the NIC up, myri10ge_open
2668 * resets the slice including this lock.
2669 */
2670 while (!myri10ge_ss_lock_napi(&mgp->ss[i])) {
2671 pr_info("Slice %d locked\n", i);
2672 mdelay(1);
2673 }
2486 } 2674 }
2675 local_bh_enable();
2487 netif_carrier_off(dev); 2676 netif_carrier_off(dev);
2488 2677
2489 netif_tx_stop_all_queues(dev); 2678 netif_tx_stop_all_queues(dev);
@@ -3569,8 +3758,11 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp)
3569 ss->fw_stats, ss->fw_stats_bus); 3758 ss->fw_stats, ss->fw_stats_bus);
3570 ss->fw_stats = NULL; 3759 ss->fw_stats = NULL;
3571 } 3760 }
3761 napi_hash_del(&ss->napi);
3572 netif_napi_del(&ss->napi); 3762 netif_napi_del(&ss->napi);
3573 } 3763 }
3764 /* Wait till napi structs are no longer used, and then free ss. */
3765 synchronize_rcu();
3574 kfree(mgp->ss); 3766 kfree(mgp->ss);
3575 mgp->ss = NULL; 3767 mgp->ss = NULL;
3576} 3768}
@@ -3591,9 +3783,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
3591 for (i = 0; i < mgp->num_slices; i++) { 3783 for (i = 0; i < mgp->num_slices; i++) {
3592 ss = &mgp->ss[i]; 3784 ss = &mgp->ss[i];
3593 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); 3785 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
3594 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, 3786 ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes,
3595 &ss->rx_done.bus, 3787 &ss->rx_done.bus,
3596 GFP_KERNEL | __GFP_ZERO); 3788 GFP_KERNEL);
3597 if (ss->rx_done.entry == NULL) 3789 if (ss->rx_done.entry == NULL)
3598 goto abort; 3790 goto abort;
3599 bytes = sizeof(*ss->fw_stats); 3791 bytes = sizeof(*ss->fw_stats);
@@ -3606,6 +3798,7 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
3606 ss->dev = mgp->dev; 3798 ss->dev = mgp->dev;
3607 netif_napi_add(ss->dev, &ss->napi, myri10ge_poll, 3799 netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
3608 myri10ge_napi_weight); 3800 myri10ge_napi_weight);
3801 napi_hash_add(&ss->napi);
3609 } 3802 }
3610 return 0; 3803 return 0;
3611abort: 3804abort:
@@ -3625,13 +3818,12 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
3625 struct pci_dev *pdev = mgp->pdev; 3818 struct pci_dev *pdev = mgp->pdev;
3626 char *old_fw; 3819 char *old_fw;
3627 bool old_allocated; 3820 bool old_allocated;
3628 int i, status, ncpus, msix_cap; 3821 int i, status, ncpus;
3629 3822
3630 mgp->num_slices = 1; 3823 mgp->num_slices = 1;
3631 msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3632 ncpus = netif_get_num_default_rss_queues(); 3824 ncpus = netif_get_num_default_rss_queues();
3633 3825
3634 if (myri10ge_max_slices == 1 || msix_cap == 0 || 3826 if (myri10ge_max_slices == 1 || !pdev->msix_cap ||
3635 (myri10ge_max_slices == -1 && ncpus < 2)) 3827 (myri10ge_max_slices == -1 && ncpus < 2))
3636 return; 3828 return;
3637 3829
@@ -3749,6 +3941,9 @@ static const struct net_device_ops myri10ge_netdev_ops = {
3749 .ndo_change_mtu = myri10ge_change_mtu, 3941 .ndo_change_mtu = myri10ge_change_mtu,
3750 .ndo_set_rx_mode = myri10ge_set_multicast_list, 3942 .ndo_set_rx_mode = myri10ge_set_multicast_list,
3751 .ndo_set_mac_address = myri10ge_set_mac_address, 3943 .ndo_set_mac_address = myri10ge_set_mac_address,
3944#ifdef CONFIG_NET_RX_BUSY_POLL
3945 .ndo_busy_poll = myri10ge_busy_poll,
3946#endif
3752}; 3947};
3753 3948
3754static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3949static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index dc2c6f561e9a..e6f0a4366f90 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -390,7 +390,7 @@ static int netx_eth_drv_probe(struct platform_device *pdev)
390 390
391 priv = netdev_priv(ndev); 391 priv = netdev_priv(ndev);
392 392
393 pdata = (struct netxeth_platform_data *)pdev->dev.platform_data; 393 pdata = dev_get_platdata(&pdev->dev);
394 priv->xc = request_xc(pdata->xcno, &pdev->dev); 394 priv->xc = request_xc(pdata->xcno, &pdev->dev);
395 if (!priv->xc) { 395 if (!priv->xc) {
396 dev_err(&pdev->dev, "unable to request xc engine\n"); 396 dev_err(&pdev->dev, "unable to request xc engine\n");
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index e88bdb1aa669..79645f74b3a8 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -922,7 +922,7 @@ static void __init get_mac_address(struct net_device *dev)
922{ 922{
923 struct w90p910_ether *ether = netdev_priv(dev); 923 struct w90p910_ether *ether = netdev_priv(dev);
924 struct platform_device *pdev; 924 struct platform_device *pdev;
925 char addr[6]; 925 char addr[ETH_ALEN];
926 926
927 pdev = ether->pdev; 927 pdev = ether->pdev;
928 928
@@ -934,7 +934,7 @@ static void __init get_mac_address(struct net_device *dev)
934 addr[5] = 0xa8; 934 addr[5] = 0xa8;
935 935
936 if (is_valid_ether_addr(addr)) 936 if (is_valid_ether_addr(addr))
937 memcpy(dev->dev_addr, &addr, 0x06); 937 memcpy(dev->dev_addr, &addr, ETH_ALEN);
938 else 938 else
939 dev_err(&pdev->dev, "invalid mac address\n"); 939 dev_err(&pdev->dev, "invalid mac address\n");
940} 940}
@@ -1014,7 +1014,7 @@ static int w90p910_ether_probe(struct platform_device *pdev)
1014 if (ether->rxirq < 0) { 1014 if (ether->rxirq < 0) {
1015 dev_err(&pdev->dev, "failed to get ether rx irq\n"); 1015 dev_err(&pdev->dev, "failed to get ether rx irq\n");
1016 error = -ENXIO; 1016 error = -ENXIO;
1017 goto failed_free_txirq; 1017 goto failed_free_io;
1018 } 1018 }
1019 1019
1020 platform_set_drvdata(pdev, dev); 1020 platform_set_drvdata(pdev, dev);
@@ -1023,7 +1023,7 @@ static int w90p910_ether_probe(struct platform_device *pdev)
1023 if (IS_ERR(ether->clk)) { 1023 if (IS_ERR(ether->clk)) {
1024 dev_err(&pdev->dev, "failed to get ether clock\n"); 1024 dev_err(&pdev->dev, "failed to get ether clock\n");
1025 error = PTR_ERR(ether->clk); 1025 error = PTR_ERR(ether->clk);
1026 goto failed_free_rxirq; 1026 goto failed_free_io;
1027 } 1027 }
1028 1028
1029 ether->rmiiclk = clk_get(&pdev->dev, "RMII"); 1029 ether->rmiiclk = clk_get(&pdev->dev, "RMII");
@@ -1049,10 +1049,6 @@ failed_put_rmiiclk:
1049 clk_put(ether->rmiiclk); 1049 clk_put(ether->rmiiclk);
1050failed_put_clk: 1050failed_put_clk:
1051 clk_put(ether->clk); 1051 clk_put(ether->clk);
1052failed_free_rxirq:
1053 free_irq(ether->rxirq, pdev);
1054failed_free_txirq:
1055 free_irq(ether->txirq, pdev);
1056failed_free_io: 1052failed_free_io:
1057 iounmap(ether->reg); 1053 iounmap(ether->reg);
1058failed_free_mem: 1054failed_free_mem:
@@ -1075,9 +1071,6 @@ static int w90p910_ether_remove(struct platform_device *pdev)
1075 iounmap(ether->reg); 1071 iounmap(ether->reg);
1076 release_mem_region(ether->res->start, resource_size(ether->res)); 1072 release_mem_region(ether->res->start, resource_size(ether->res));
1077 1073
1078 free_irq(ether->txirq, dev);
1079 free_irq(ether->rxirq, dev);
1080
1081 del_timer_sync(&ether->check_timer); 1074 del_timer_sync(&ether->check_timer);
1082 1075
1083 free_netdev(dev); 1076 free_netdev(dev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 7779036690cc..6797b1075874 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -582,6 +582,19 @@ struct pch_gbe_hw_stats {
582}; 582};
583 583
584/** 584/**
585 * struct pch_gbe_privdata - PCI Device ID driver data
586 * @phy_tx_clk_delay: Bool, configure the PHY TX delay in software
587 * @phy_disable_hibernate: Bool, disable PHY hibernation
588 * @platform_init: Platform initialization callback, called from
589 * probe, prior to PHY initialization.
590 */
591struct pch_gbe_privdata {
592 bool phy_tx_clk_delay;
593 bool phy_disable_hibernate;
594 int (*platform_init)(struct pci_dev *pdev);
595};
596
597/**
585 * struct pch_gbe_adapter - board specific private data structure 598 * struct pch_gbe_adapter - board specific private data structure
586 * @stats_lock: Spinlock structure for status 599 * @stats_lock: Spinlock structure for status
587 * @ethtool_lock: Spinlock structure for ethtool 600 * @ethtool_lock: Spinlock structure for ethtool
@@ -604,6 +617,7 @@ struct pch_gbe_hw_stats {
604 * @rx_buffer_len: Receive buffer length 617 * @rx_buffer_len: Receive buffer length
605 * @tx_queue_len: Transmit queue length 618 * @tx_queue_len: Transmit queue length
606 * @have_msi: PCI MSI mode flag 619 * @have_msi: PCI MSI mode flag
620 * @pch_gbe_privdata: PCI Device ID driver_data
607 */ 621 */
608 622
609struct pch_gbe_adapter { 623struct pch_gbe_adapter {
@@ -631,6 +645,7 @@ struct pch_gbe_adapter {
631 int hwts_tx_en; 645 int hwts_tx_en;
632 int hwts_rx_en; 646 int hwts_rx_en;
633 struct pci_dev *ptp_pdev; 647 struct pci_dev *ptp_pdev;
648 struct pch_gbe_privdata *pdata;
634}; 649};
635 650
636#define pch_gbe_hw_to_adapter(hw) container_of(hw, struct pch_gbe_adapter, hw) 651#define pch_gbe_hw_to_adapter(hw) container_of(hw, struct pch_gbe_adapter, hw)
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 1129db0cdf82..f0ceb89af931 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -118,6 +118,7 @@ static int pch_gbe_set_settings(struct net_device *netdev,
118 * filled by get_settings() on a down link, speed is -1: */ 118 * filled by get_settings() on a down link, speed is -1: */
119 if (speed == UINT_MAX) { 119 if (speed == UINT_MAX) {
120 speed = SPEED_1000; 120 speed = SPEED_1000;
121 ethtool_cmd_speed_set(ecmd, speed);
121 ecmd->duplex = DUPLEX_FULL; 122 ecmd->duplex = DUPLEX_FULL;
122 } 123 }
123 ret = mii_ethtool_sset(&adapter->mii, ecmd); 124 ret = mii_ethtool_sset(&adapter->mii, ecmd);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index ab1039a95bf9..5a0f04c2c813 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -23,6 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/net_tstamp.h> 24#include <linux/net_tstamp.h>
25#include <linux/ptp_classify.h> 25#include <linux/ptp_classify.h>
26#include <linux/gpio.h>
26 27
27#define DRV_VERSION "1.01" 28#define DRV_VERSION "1.01"
28const char pch_driver_version[] = DRV_VERSION; 29const char pch_driver_version[] = DRV_VERSION;
@@ -111,6 +112,8 @@ const char pch_driver_version[] = DRV_VERSION;
111#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81" 112#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
112#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00" 113#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
113 114
115#define MINNOW_PHY_RESET_GPIO 13
116
114static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; 117static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
115 118
116static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); 119static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
@@ -682,7 +685,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
682 } 685 }
683 adapter->hw.phy.addr = adapter->mii.phy_id; 686 adapter->hw.phy.addr = adapter->mii.phy_id;
684 netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id); 687 netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id);
685 if (addr == 32) 688 if (addr == PCH_GBE_PHY_REGS_LEN)
686 return -EAGAIN; 689 return -EAGAIN;
687 /* Selected the phy and isolate the rest */ 690 /* Selected the phy and isolate the rest */
688 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) { 691 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
@@ -1488,9 +1491,9 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1488 bufsz = adapter->rx_buffer_len; 1491 bufsz = adapter->rx_buffer_len;
1489 1492
1490 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; 1493 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1491 rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size, 1494 rx_ring->rx_buff_pool =
1492 &rx_ring->rx_buff_pool_logic, 1495 dma_zalloc_coherent(&pdev->dev, size,
1493 GFP_KERNEL | __GFP_ZERO); 1496 &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
1494 if (!rx_ring->rx_buff_pool) 1497 if (!rx_ring->rx_buff_pool)
1495 return -ENOMEM; 1498 return -ENOMEM;
1496 1499
@@ -1804,9 +1807,8 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1804 1807
1805 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); 1808 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1806 1809
1807 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 1810 tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
1808 &tx_ring->dma, 1811 &tx_ring->dma, GFP_KERNEL);
1809 GFP_KERNEL | __GFP_ZERO);
1810 if (!tx_ring->desc) { 1812 if (!tx_ring->desc) {
1811 vfree(tx_ring->buffer_info); 1813 vfree(tx_ring->buffer_info);
1812 return -ENOMEM; 1814 return -ENOMEM;
@@ -1849,9 +1851,8 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1849 return -ENOMEM; 1851 return -ENOMEM;
1850 1852
1851 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); 1853 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1852 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 1854 rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
1853 &rx_ring->dma, 1855 &rx_ring->dma, GFP_KERNEL);
1854 GFP_KERNEL | __GFP_ZERO);
1855 if (!rx_ring->desc) { 1856 if (!rx_ring->desc) {
1856 vfree(rx_ring->buffer_info); 1857 vfree(rx_ring->buffer_info);
1857 return -ENOMEM; 1858 return -ENOMEM;
@@ -2635,6 +2636,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2635 adapter->pdev = pdev; 2636 adapter->pdev = pdev;
2636 adapter->hw.back = adapter; 2637 adapter->hw.back = adapter;
2637 adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR]; 2638 adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
2639 adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
2640 if (adapter->pdata && adapter->pdata->platform_init)
2641 adapter->pdata->platform_init(pdev);
2638 2642
2639 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number, 2643 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
2640 PCI_DEVFN(12, 4)); 2644 PCI_DEVFN(12, 4));
@@ -2710,6 +2714,10 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2710 2714
2711 dev_dbg(&pdev->dev, "PCH Network Connection\n"); 2715 dev_dbg(&pdev->dev, "PCH Network Connection\n");
2712 2716
2717 /* Disable hibernation on certain platforms */
2718 if (adapter->pdata && adapter->pdata->phy_disable_hibernate)
2719 pch_gbe_phy_disable_hibernate(&adapter->hw);
2720
2713 device_set_wakeup_enable(&pdev->dev, 1); 2721 device_set_wakeup_enable(&pdev->dev, 1);
2714 return 0; 2722 return 0;
2715 2723
@@ -2720,9 +2728,48 @@ err_free_netdev:
2720 return ret; 2728 return ret;
2721} 2729}
2722 2730
2731/* The AR803X PHY on the MinnowBoard requires a physical pin to be toggled to
2732 * ensure it is awake for probe and init. Request the line and reset the PHY.
2733 */
2734static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
2735{
2736 unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT;
2737 unsigned gpio = MINNOW_PHY_RESET_GPIO;
2738 int ret;
2739
2740 ret = devm_gpio_request_one(&pdev->dev, gpio, flags,
2741 "minnow_phy_reset");
2742 if (ret) {
2743 dev_err(&pdev->dev,
2744 "ERR: Can't request PHY reset GPIO line '%d'\n", gpio);
2745 return ret;
2746 }
2747
2748 gpio_set_value(gpio, 0);
2749 usleep_range(1250, 1500);
2750 gpio_set_value(gpio, 1);
2751 usleep_range(1250, 1500);
2752
2753 return ret;
2754}
2755
2756static struct pch_gbe_privdata pch_gbe_minnow_privdata = {
2757 .phy_tx_clk_delay = true,
2758 .phy_disable_hibernate = true,
2759 .platform_init = pch_gbe_minnow_platform_init,
2760};
2761
2723static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { 2762static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
2724 {.vendor = PCI_VENDOR_ID_INTEL, 2763 {.vendor = PCI_VENDOR_ID_INTEL,
2725 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE, 2764 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2765 .subvendor = PCI_VENDOR_ID_CIRCUITCO,
2766 .subdevice = PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD,
2767 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2768 .class_mask = (0xFFFF00),
2769 .driver_data = (kernel_ulong_t)&pch_gbe_minnow_privdata
2770 },
2771 {.vendor = PCI_VENDOR_ID_INTEL,
2772 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2726 .subvendor = PCI_ANY_ID, 2773 .subvendor = PCI_ANY_ID,
2727 .subdevice = PCI_ANY_ID, 2774 .subdevice = PCI_ANY_ID,
2728 .class = (PCI_CLASS_NETWORK_ETHERNET << 8), 2775 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index da079073a6c6..8b7ff75fc8e0 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -74,6 +74,15 @@
74#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ 74#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
75#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ 75#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
76 76
77/* AR8031 PHY Debug Registers */
78#define PHY_AR803X_ID 0x00001374
79#define PHY_AR8031_DBG_OFF 0x1D
80#define PHY_AR8031_DBG_DAT 0x1E
81#define PHY_AR8031_SERDES 0x05
82#define PHY_AR8031_HIBERNATE 0x0B
83#define PHY_AR8031_SERDES_TX_CLK_DLY 0x0100 /* TX clock delay of 2.0ns */
84#define PHY_AR8031_PS_HIB_EN 0x8000 /* Hibernate enable */
85
77/* Phy Id Register (word 2) */ 86/* Phy Id Register (word 2) */
78#define PHY_REVISION_MASK 0x000F 87#define PHY_REVISION_MASK 0x000F
79 88
@@ -249,6 +258,51 @@ void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw)
249} 258}
250 259
251/** 260/**
261 * pch_gbe_phy_tx_clk_delay - Setup TX clock delay via the PHY
262 * @hw: Pointer to the HW structure
263 * Returns
264 * 0: Successful.
265 * -EINVAL: Invalid argument.
266 */
267static int pch_gbe_phy_tx_clk_delay(struct pch_gbe_hw *hw)
268{
269 /* The RGMII interface requires a ~2ns TX clock delay. This is typically
270 * done in layout with a longer trace or via PHY strapping, but can also
271 * be done via PHY configuration registers.
272 */
273 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
274 u16 mii_reg;
275 int ret = 0;
276
277 switch (hw->phy.id) {
278 case PHY_AR803X_ID:
279 netdev_dbg(adapter->netdev,
280 "Configuring AR803X PHY for 2ns TX clock delay\n");
281 pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_OFF, &mii_reg);
282 ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF,
283 PHY_AR8031_SERDES);
284 if (ret)
285 break;
286
287 pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg);
288 mii_reg |= PHY_AR8031_SERDES_TX_CLK_DLY;
289 ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT,
290 mii_reg);
291 break;
292 default:
293 netdev_err(adapter->netdev,
294 "Unknown PHY (%x), could not set TX clock delay\n",
295 hw->phy.id);
296 return -EINVAL;
297 }
298
299 if (ret)
300 netdev_err(adapter->netdev,
301 "Could not configure tx clock delay for PHY\n");
302 return ret;
303}
304
305/**
252 * pch_gbe_phy_init_setting - PHY initial setting 306 * pch_gbe_phy_init_setting - PHY initial setting
253 * @hw: Pointer to the HW structure 307 * @hw: Pointer to the HW structure
254 */ 308 */
@@ -277,4 +331,48 @@ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
277 pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg); 331 pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg);
278 mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX; 332 mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX;
279 pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg); 333 pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg);
334
335 /* Setup a TX clock delay on certain platforms */
336 if (adapter->pdata && adapter->pdata->phy_tx_clk_delay)
337 pch_gbe_phy_tx_clk_delay(hw);
338}
339
340/**
341 * pch_gbe_phy_disable_hibernate - Disable the PHY low power state
342 * @hw: Pointer to the HW structure
343 * Returns
344 * 0: Successful.
345 * -EINVAL: Invalid argument.
346 */
347int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw)
348{
349 struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
350 u16 mii_reg;
351 int ret = 0;
352
353 switch (hw->phy.id) {
354 case PHY_AR803X_ID:
355 netdev_dbg(adapter->netdev,
356 "Disabling hibernation for AR803X PHY\n");
357 ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF,
358 PHY_AR8031_HIBERNATE);
359 if (ret)
360 break;
361
362 pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg);
363 mii_reg &= ~PHY_AR8031_PS_HIB_EN;
364 ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT,
365 mii_reg);
366 break;
367 default:
368 netdev_err(adapter->netdev,
369 "Unknown PHY (%x), could not disable hibernation\n",
370 hw->phy.id);
371 return -EINVAL;
372 }
373
374 if (ret)
375 netdev_err(adapter->netdev,
376 "Could not disable PHY hibernation\n");
377 return ret;
280} 378}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
index 03264dc7b5ec..0cbe69206e04 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
@@ -33,5 +33,6 @@ void pch_gbe_phy_power_up(struct pch_gbe_hw *hw);
33void pch_gbe_phy_power_down(struct pch_gbe_hw *hw); 33void pch_gbe_phy_power_down(struct pch_gbe_hw *hw);
34void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw); 34void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw);
35void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw); 35void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw);
36int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw);
36 37
37#endif /* _PCH_GBE_PHY_H_ */ 38#endif /* _PCH_GBE_PHY_H_ */
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index a5f0b5da6149..c498181a9aa8 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -191,7 +191,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
191 struct device_node *dn = pci_device_to_OF_node(pdev); 191 struct device_node *dn = pci_device_to_OF_node(pdev);
192 int len; 192 int len;
193 const u8 *maddr; 193 const u8 *maddr;
194 u8 addr[6]; 194 u8 addr[ETH_ALEN];
195 195
196 if (!dn) { 196 if (!dn) {
197 dev_dbg(&pdev->dev, 197 dev_dbg(&pdev->dev,
@@ -201,8 +201,8 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
201 201
202 maddr = of_get_property(dn, "local-mac-address", &len); 202 maddr = of_get_property(dn, "local-mac-address", &len);
203 203
204 if (maddr && len == 6) { 204 if (maddr && len == ETH_ALEN) {
205 memcpy(mac->mac_addr, maddr, 6); 205 memcpy(mac->mac_addr, maddr, ETH_ALEN);
206 return 0; 206 return 0;
207 } 207 }
208 208
@@ -219,14 +219,15 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
219 return -ENOENT; 219 return -ENOENT;
220 } 220 }
221 221
222 if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0], 222 if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
223 &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) { 223 &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
224 != ETH_ALEN) {
224 dev_warn(&pdev->dev, 225 dev_warn(&pdev->dev,
225 "can't parse mac address, not configuring\n"); 226 "can't parse mac address, not configuring\n");
226 return -EINVAL; 227 return -EINVAL;
227 } 228 }
228 229
229 memcpy(mac->mac_addr, addr, 6); 230 memcpy(mac->mac_addr, addr, ETH_ALEN);
230 231
231 return 0; 232 return 0;
232} 233}
@@ -439,10 +440,9 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
439 if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) 440 if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
440 goto out_ring_desc; 441 goto out_ring_desc;
441 442
442 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, 443 ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev,
443 RX_RING_SIZE * sizeof(u64), 444 RX_RING_SIZE * sizeof(u64),
444 &ring->buf_dma, 445 &ring->buf_dma, GFP_KERNEL);
445 GFP_KERNEL | __GFP_ZERO);
446 if (!ring->buffers) 446 if (!ring->buffers)
447 goto out_ring_desc; 447 goto out_ring_desc;
448 448
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h
index e2f4efa8ad46..f2749d46c125 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.h
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.h
@@ -83,7 +83,7 @@ struct pasemi_mac {
83#define MAC_TYPE_GMAC 1 83#define MAC_TYPE_GMAC 1
84#define MAC_TYPE_XAUI 2 84#define MAC_TYPE_XAUI 2
85 85
86 u8 mac_addr[6]; 86 u8 mac_addr[ETH_ALEN];
87 87
88 struct net_lro_mgr lro_mgr; 88 struct net_lro_mgr lro_mgr;
89 struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; 89 struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 0e1797295a48..f59e6be4a66e 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -45,6 +45,17 @@ config QLCNIC_SRIOV
45 This allows for virtual function acceleration in virtualized 45 This allows for virtual function acceleration in virtualized
46 environments. 46 environments.
47 47
48config QLCNIC_DCB
49 bool "QLOGIC QLCNIC 82XX and 83XX family DCB Support"
50 depends on QLCNIC && DCB
51 default y
52 ---help---
53 This configuration parameter enables DCB support in QLE83XX
54 and QLE82XX Converged Ethernet devices. This allows for DCB
55 get operations support through rtNetlink interface. Only CEE
56 mode of DCB is supported. PG and PFC values are related only
57 to Tx.
58
48config QLGE 59config QLGE
49 tristate "QLogic QLGE 10Gb Ethernet Driver Support" 60 tristate "QLogic QLGE 10Gb Ethernet Driver Support"
50 depends on PCI 61 depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 9fbb1cdbfa47..8375cbde9969 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -536,10 +536,10 @@ static void netxen_p2_nic_set_multi(struct net_device *netdev)
536{ 536{
537 struct netxen_adapter *adapter = netdev_priv(netdev); 537 struct netxen_adapter *adapter = netdev_priv(netdev);
538 struct netdev_hw_addr *ha; 538 struct netdev_hw_addr *ha;
539 u8 null_addr[6]; 539 u8 null_addr[ETH_ALEN];
540 int i; 540 int i;
541 541
542 memset(null_addr, 0, 6); 542 memset(null_addr, 0, ETH_ALEN);
543 543
544 if (netdev->flags & IFF_PROMISC) { 544 if (netdev->flags & IFF_PROMISC) {
545 545
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index ec4cf7fd4123..cbd75f97ffb3 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -459,16 +459,14 @@ static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
459static void netxen_set_msix_bit(struct pci_dev *pdev, int enable) 459static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
460{ 460{
461 u32 control; 461 u32 control;
462 int pos;
463 462
464 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 463 if (pdev->msix_cap) {
465 if (pos) { 464 pci_read_config_dword(pdev, pdev->msix_cap, &control);
466 pci_read_config_dword(pdev, pos, &control);
467 if (enable) 465 if (enable)
468 control |= PCI_MSIX_FLAGS_ENABLE; 466 control |= PCI_MSIX_FLAGS_ENABLE;
469 else 467 else
470 control = 0; 468 control = 0;
471 pci_write_config_dword(pdev, pos, control); 469 pci_write_config_dword(pdev, pdev->msix_cap, control);
472 } 470 }
473} 471}
474 472
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index 4b1fb3faa3b7..a848d2979722 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -11,3 +11,5 @@ qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
11 qlcnic_minidump.o qlcnic_sriov_common.o 11 qlcnic_minidump.o qlcnic_sriov_common.o
12 12
13qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o 13qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o
14
15qlcnic-$(CONFIG_QLCNIC_DCB) += qlcnic_dcb.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 221645e9f182..88349b8fa39a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -20,7 +20,6 @@
20#include <linux/tcp.h> 20#include <linux/tcp.h>
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/firmware.h> 22#include <linux/firmware.h>
23
24#include <linux/ethtool.h> 23#include <linux/ethtool.h>
25#include <linux/mii.h> 24#include <linux/mii.h>
26#include <linux/timer.h> 25#include <linux/timer.h>
@@ -35,11 +34,12 @@
35#include "qlcnic_hdr.h" 34#include "qlcnic_hdr.h"
36#include "qlcnic_hw.h" 35#include "qlcnic_hw.h"
37#include "qlcnic_83xx_hw.h" 36#include "qlcnic_83xx_hw.h"
37#include "qlcnic_dcb.h"
38 38
39#define _QLCNIC_LINUX_MAJOR 5 39#define _QLCNIC_LINUX_MAJOR 5
40#define _QLCNIC_LINUX_MINOR 2 40#define _QLCNIC_LINUX_MINOR 3
41#define _QLCNIC_LINUX_SUBVERSION 44 41#define _QLCNIC_LINUX_SUBVERSION 50
42#define QLCNIC_LINUX_VERSIONID "5.2.44" 42#define QLCNIC_LINUX_VERSIONID "5.3.50"
43#define QLCNIC_DRV_IDC_VER 0x01 43#define QLCNIC_DRV_IDC_VER 0x01
44#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 44#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
45 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 45 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -98,6 +98,9 @@
98#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ 98#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
99 + MGMT_CMD_DESC_RESV) 99 + MGMT_CMD_DESC_RESV)
100#define QLCNIC_MAX_TX_TIMEOUTS 2 100#define QLCNIC_MAX_TX_TIMEOUTS 2
101#define QLCNIC_MAX_TX_RINGS 8
102#define QLCNIC_MAX_SDS_RINGS 8
103
101/* 104/*
102 * Following are the states of the Phantom. Phantom will set them and 105 * Following are the states of the Phantom. Phantom will set them and
103 * Host will read to check if the fields are correct. 106 * Host will read to check if the fields are correct.
@@ -389,7 +392,7 @@ struct qlcnic_dump_template_hdr {
389 392
390struct qlcnic_fw_dump { 393struct qlcnic_fw_dump {
391 u8 clr; /* flag to indicate if dump is cleared */ 394 u8 clr; /* flag to indicate if dump is cleared */
392 u8 enable; /* enable/disable dump */ 395 bool enable; /* enable/disable dump */
393 u32 size; /* total size of the dump */ 396 u32 size; /* total size of the dump */
394 void *data; /* dump data area */ 397 void *data; /* dump data area */
395 struct qlcnic_dump_template_hdr *tmpl_hdr; 398 struct qlcnic_dump_template_hdr *tmpl_hdr;
@@ -460,14 +463,16 @@ struct qlcnic_hardware_context {
460 struct qlcnic_fdt fdt; 463 struct qlcnic_fdt fdt;
461 struct qlc_83xx_reset reset; 464 struct qlc_83xx_reset reset;
462 struct qlc_83xx_idc idc; 465 struct qlc_83xx_idc idc;
463 struct qlc_83xx_fw_info fw_info; 466 struct qlc_83xx_fw_info *fw_info;
464 struct qlcnic_intrpt_config *intr_tbl; 467 struct qlcnic_intrpt_config *intr_tbl;
465 struct qlcnic_sriov *sriov; 468 struct qlcnic_sriov *sriov;
466 u32 *reg_tbl; 469 u32 *reg_tbl;
467 u32 *ext_reg_tbl; 470 u32 *ext_reg_tbl;
468 u32 mbox_aen[QLC_83XX_MBX_AEN_CNT]; 471 u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
469 u32 mbox_reg[4]; 472 u32 mbox_reg[4];
470 spinlock_t mbx_lock; 473 struct qlcnic_mailbox *mailbox;
474 u8 extend_lb_time;
475 u8 phys_port_id[ETH_ALEN];
471}; 476};
472 477
473struct qlcnic_adapter_stats { 478struct qlcnic_adapter_stats {
@@ -515,6 +520,7 @@ struct qlcnic_host_sds_ring {
515 u32 num_desc; 520 u32 num_desc;
516 void __iomem *crb_sts_consumer; 521 void __iomem *crb_sts_consumer;
517 522
523 struct qlcnic_host_tx_ring *tx_ring;
518 struct status_desc *desc_head; 524 struct status_desc *desc_head;
519 struct qlcnic_adapter *adapter; 525 struct qlcnic_adapter *adapter;
520 struct napi_struct napi; 526 struct napi_struct napi;
@@ -532,9 +538,17 @@ struct qlcnic_host_tx_ring {
532 void __iomem *crb_intr_mask; 538 void __iomem *crb_intr_mask;
533 char name[IFNAMSIZ + 12]; 539 char name[IFNAMSIZ + 12];
534 u16 ctx_id; 540 u16 ctx_id;
541
542 u32 state;
535 u32 producer; 543 u32 producer;
536 u32 sw_consumer; 544 u32 sw_consumer;
537 u32 num_desc; 545 u32 num_desc;
546
547 u64 xmit_on;
548 u64 xmit_off;
549 u64 xmit_called;
550 u64 xmit_finished;
551
538 void __iomem *crb_cmd_producer; 552 void __iomem *crb_cmd_producer;
539 struct cmd_desc_type0 *desc_head; 553 struct cmd_desc_type0 *desc_head;
540 struct qlcnic_adapter *adapter; 554 struct qlcnic_adapter *adapter;
@@ -559,7 +573,6 @@ struct qlcnic_recv_context {
559 u32 state; 573 u32 state;
560 u16 context_id; 574 u16 context_id;
561 u16 virt_port; 575 u16 virt_port;
562
563}; 576};
564 577
565/* HW context creation */ 578/* HW context creation */
@@ -604,6 +617,7 @@ struct qlcnic_recv_context {
604#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8) 617#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
605#define QLCNIC_CAP0_VALIDOFF (1 << 11) 618#define QLCNIC_CAP0_VALIDOFF (1 << 11)
606#define QLCNIC_CAP0_LRO_MSS (1 << 21) 619#define QLCNIC_CAP0_LRO_MSS (1 << 21)
620#define QLCNIC_CAP0_TX_MULTI (1 << 22)
607 621
608/* 622/*
609 * Context state 623 * Context state
@@ -631,7 +645,7 @@ struct qlcnic_hostrq_rds_ring {
631 645
632struct qlcnic_hostrq_rx_ctx { 646struct qlcnic_hostrq_rx_ctx {
633 __le64 host_rsp_dma_addr; /* Response dma'd here */ 647 __le64 host_rsp_dma_addr; /* Response dma'd here */
634 __le32 capabilities[4]; /* Flag bit vector */ 648 __le32 capabilities[4]; /* Flag bit vector */
635 __le32 host_int_crb_mode; /* Interrupt crb usage */ 649 __le32 host_int_crb_mode; /* Interrupt crb usage */
636 __le32 host_rds_crb_mode; /* RDS crb usage */ 650 __le32 host_rds_crb_mode; /* RDS crb usage */
637 /* These ring offsets are relative to data[0] below */ 651 /* These ring offsets are relative to data[0] below */
@@ -802,6 +816,7 @@ struct qlcnic_mac_list_s {
802 816
803#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f 817#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f
804#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 0x8D 818#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 0x8D
819#define QLCNIC_C2H_OPCODE_GET_DCB_AEN 0x90
805 820
806#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ 821#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
807#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ 822#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
@@ -814,6 +829,7 @@ struct qlcnic_mac_list_s {
814#define QLCNIC_FW_CAPABILITY_BDG BIT_8 829#define QLCNIC_FW_CAPABILITY_BDG BIT_8
815#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9 830#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
816#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10 831#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
832#define QLCNIC_FW_CAPABILITY_2_MULTI_TX BIT_4
817#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27 833#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
818#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31 834#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
819 835
@@ -821,6 +837,7 @@ struct qlcnic_mac_list_s {
821#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3 837#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3
822#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5 838#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5
823#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7 839#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
840#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_8
824 841
825/* module types */ 842/* module types */
826#define LINKEVENT_MODULE_NOT_PRESENT 1 843#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -913,6 +930,8 @@ struct qlcnic_ipaddr {
913#define QLCNIC_FW_LRO_MSS_CAP 0x8000 930#define QLCNIC_FW_LRO_MSS_CAP 0x8000
914#define QLCNIC_TX_INTR_SHARED 0x10000 931#define QLCNIC_TX_INTR_SHARED 0x10000
915#define QLCNIC_APP_CHANGED_FLAGS 0x20000 932#define QLCNIC_APP_CHANGED_FLAGS 0x20000
933#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
934
916#define QLCNIC_IS_MSI_FAMILY(adapter) \ 935#define QLCNIC_IS_MSI_FAMILY(adapter) \
917 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 936 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
918#define QLCNIC_IS_TSO_CAPABLE(adapter) \ 937#define QLCNIC_IS_TSO_CAPABLE(adapter) \
@@ -922,11 +941,11 @@ struct qlcnic_ipaddr {
922#define QLCNIC_BEACON_DISABLE 0xD 941#define QLCNIC_BEACON_DISABLE 0xD
923 942
924#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4 943#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
944#define QLCNIC_DEF_NUM_TX_RINGS 4
925#define QLCNIC_MSIX_TBL_SPACE 8192 945#define QLCNIC_MSIX_TBL_SPACE 8192
926#define QLCNIC_PCI_REG_MSIX_TBL 0x44 946#define QLCNIC_PCI_REG_MSIX_TBL 0x44
927#define QLCNIC_MSIX_TBL_PGSIZE 4096 947#define QLCNIC_MSIX_TBL_PGSIZE 4096
928 948
929#define QLCNIC_NETDEV_WEIGHT 128
930#define QLCNIC_ADAPTER_UP_MAGIC 777 949#define QLCNIC_ADAPTER_UP_MAGIC 777
931 950
932#define __QLCNIC_FW_ATTACHED 0 951#define __QLCNIC_FW_ATTACHED 0
@@ -937,10 +956,13 @@ struct qlcnic_ipaddr {
937#define __QLCNIC_DIAG_RES_ALLOC 6 956#define __QLCNIC_DIAG_RES_ALLOC 6
938#define __QLCNIC_LED_ENABLE 7 957#define __QLCNIC_LED_ENABLE 7
939#define __QLCNIC_ELB_INPROGRESS 8 958#define __QLCNIC_ELB_INPROGRESS 8
959#define __QLCNIC_MULTI_TX_UNIQUE 9
940#define __QLCNIC_SRIOV_ENABLE 10 960#define __QLCNIC_SRIOV_ENABLE 10
941#define __QLCNIC_SRIOV_CAPABLE 11 961#define __QLCNIC_SRIOV_CAPABLE 11
942#define __QLCNIC_MBX_POLL_ENABLE 12 962#define __QLCNIC_MBX_POLL_ENABLE 12
943#define __QLCNIC_DIAG_MODE 13 963#define __QLCNIC_DIAG_MODE 13
964#define __QLCNIC_DCB_STATE 14
965#define __QLCNIC_DCB_IN_AEN 15
944 966
945#define QLCNIC_INTERRUPT_TEST 1 967#define QLCNIC_INTERRUPT_TEST 1
946#define QLCNIC_LOOPBACK_TEST 2 968#define QLCNIC_LOOPBACK_TEST 2
@@ -950,12 +972,6 @@ struct qlcnic_ipaddr {
950#define QLCNIC_READD_AGE 20 972#define QLCNIC_READD_AGE 20
951#define QLCNIC_LB_MAX_FILTERS 64 973#define QLCNIC_LB_MAX_FILTERS 64
952#define QLCNIC_LB_BUCKET_SIZE 32 974#define QLCNIC_LB_BUCKET_SIZE 32
953
954/* QLCNIC Driver Error Code */
955#define QLCNIC_FW_NOT_RESPOND 51
956#define QLCNIC_TEST_IN_PROGRESS 52
957#define QLCNIC_UNDEFINED_ERROR 53
958#define QLCNIC_LB_CABLE_NOT_CONN 54
959#define QLCNIC_ILB_MAX_RCV_LOOP 10 975#define QLCNIC_ILB_MAX_RCV_LOOP 10
960 976
961struct qlcnic_filter { 977struct qlcnic_filter {
@@ -972,6 +988,21 @@ struct qlcnic_filter_hash {
972 u16 fbucket_size; 988 u16 fbucket_size;
973}; 989};
974 990
991/* Mailbox specific data structures */
992struct qlcnic_mailbox {
993 struct workqueue_struct *work_q;
994 struct qlcnic_adapter *adapter;
995 struct qlcnic_mbx_ops *ops;
996 struct work_struct work;
997 struct completion completion;
998 struct list_head cmd_q;
999 unsigned long status;
1000 spinlock_t queue_lock; /* Mailbox queue lock */
1001 spinlock_t aen_lock; /* Mailbox response/AEN lock */
1002 atomic_t rsp_status;
1003 u32 num_cmds;
1004};
1005
975struct qlcnic_adapter { 1006struct qlcnic_adapter {
976 struct qlcnic_hardware_context *ahw; 1007 struct qlcnic_hardware_context *ahw;
977 struct qlcnic_recv_context *recv_ctx; 1008 struct qlcnic_recv_context *recv_ctx;
@@ -1035,6 +1066,7 @@ struct qlcnic_adapter {
1035 struct delayed_work fw_work; 1066 struct delayed_work fw_work;
1036 struct delayed_work idc_aen_work; 1067 struct delayed_work idc_aen_work;
1037 struct delayed_work mbx_poll_work; 1068 struct delayed_work mbx_poll_work;
1069 struct qlcnic_dcb *dcb;
1038 1070
1039 struct qlcnic_filter_hash fhash; 1071 struct qlcnic_filter_hash fhash;
1040 struct qlcnic_filter_hash rx_fhash; 1072 struct qlcnic_filter_hash rx_fhash;
@@ -1152,6 +1184,7 @@ struct qlcnic_pci_info {
1152}; 1184};
1153 1185
1154struct qlcnic_npar_info { 1186struct qlcnic_npar_info {
1187 bool eswitch_status;
1155 u16 pvid; 1188 u16 pvid;
1156 u16 min_bw; 1189 u16 min_bw;
1157 u16 max_bw; 1190 u16 max_bw;
@@ -1371,7 +1404,6 @@ struct qlcnic_esw_statistics {
1371 struct __qlcnic_esw_statistics tx; 1404 struct __qlcnic_esw_statistics tx;
1372}; 1405};
1373 1406
1374#define QLCNIC_DUMP_MASK_DEF 0x1f
1375#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed 1407#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed
1376#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed 1408#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed
1377#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed 1409#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed
@@ -1385,9 +1417,20 @@ struct _cdrp_cmd {
1385}; 1417};
1386 1418
1387struct qlcnic_cmd_args { 1419struct qlcnic_cmd_args {
1388 struct _cdrp_cmd req; 1420 struct completion completion;
1389 struct _cdrp_cmd rsp; 1421 struct list_head list;
1390 int op_type; 1422 struct _cdrp_cmd req;
1423 struct _cdrp_cmd rsp;
1424 atomic_t rsp_status;
1425 int pay_size;
1426 u32 rsp_opcode;
1427 u32 total_cmds;
1428 u32 op_type;
1429 u32 type;
1430 u32 cmd_op;
1431 u32 *hdr; /* Back channel message header */
1432 u32 *pay; /* Back channel message payload */
1433 u8 func_num;
1391}; 1434};
1392 1435
1393int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter); 1436int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
@@ -1435,6 +1478,12 @@ int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
1435void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter); 1478void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
1436void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter); 1479void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
1437int qlcnic_dump_fw(struct qlcnic_adapter *); 1480int qlcnic_dump_fw(struct qlcnic_adapter *);
1481int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *);
1482bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *);
1483pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
1484 pci_channel_state_t);
1485pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *);
1486void qlcnic_82xx_io_resume(struct pci_dev *);
1438 1487
1439/* Functions from qlcnic_init.c */ 1488/* Functions from qlcnic_init.c */
1440void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int); 1489void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int);
@@ -1462,7 +1511,8 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter);
1462 1511
1463void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter); 1512void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
1464void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter); 1513void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
1465void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter); 1514void qlcnic_release_tx_buffers(struct qlcnic_adapter *,
1515 struct qlcnic_host_tx_ring *);
1466 1516
1467int qlcnic_check_fw_status(struct qlcnic_adapter *adapter); 1517int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
1468void qlcnic_watchdog_task(struct work_struct *work); 1518void qlcnic_watchdog_task(struct work_struct *work);
@@ -1474,6 +1524,7 @@ void __qlcnic_set_multi(struct net_device *, u16);
1474int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16); 1524int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16);
1475int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *); 1525int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
1476void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter); 1526void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
1527int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *);
1477 1528
1478int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); 1529int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
1479int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32); 1530int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32);
@@ -1495,8 +1546,9 @@ int qlcnic_reset_context(struct qlcnic_adapter *);
1495void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings); 1546void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
1496int qlcnic_diag_alloc_res(struct net_device *netdev, int test); 1547int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
1497netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 1548netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
1498int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t); 1549int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, int);
1499int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32); 1550int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32);
1551int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *, u32 txq);
1500void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter); 1552void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
1501void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *); 1553void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
1502int qlcnic_enable_msix(struct qlcnic_adapter *, u32); 1554int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
@@ -1523,6 +1575,7 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
1523void qlcnic_advert_link_change(struct qlcnic_adapter *, int); 1575void qlcnic_advert_link_change(struct qlcnic_adapter *, int);
1524void qlcnic_free_tx_rings(struct qlcnic_adapter *); 1576void qlcnic_free_tx_rings(struct qlcnic_adapter *);
1525int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *); 1577int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
1578void qlcnic_dump_mbx(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
1526 1579
1527void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter); 1580void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
1528void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter); 1581void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
@@ -1585,6 +1638,26 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1585 tx_ring->producer; 1638 tx_ring->producer;
1586} 1639}
1587 1640
1641static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
1642 struct net_device *netdev)
1643{
1644 int err, tx_q;
1645
1646 tx_q = adapter->max_drv_tx_rings;
1647
1648 netdev->num_tx_queues = tx_q;
1649 netdev->real_num_tx_queues = tx_q;
1650
1651 err = netif_set_real_num_tx_queues(netdev, tx_q);
1652 if (err)
1653 dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n",
1654 tx_q);
1655 else
1656 dev_info(&adapter->pdev->dev, "set %d Tx queues\n", tx_q);
1657
1658 return err;
1659}
1660
1588struct qlcnic_nic_template { 1661struct qlcnic_nic_template {
1589 int (*config_bridged_mode) (struct qlcnic_adapter *, u32); 1662 int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
1590 int (*config_led) (struct qlcnic_adapter *, u32, u32); 1663 int (*config_led) (struct qlcnic_adapter *, u32, u32);
@@ -1600,6 +1673,20 @@ struct qlcnic_nic_template {
1600 int (*resume)(struct qlcnic_adapter *); 1673 int (*resume)(struct qlcnic_adapter *);
1601}; 1674};
1602 1675
1676struct qlcnic_mbx_ops {
1677 int (*enqueue_cmd) (struct qlcnic_adapter *,
1678 struct qlcnic_cmd_args *, unsigned long *);
1679 void (*dequeue_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
1680 void (*decode_resp) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
1681 void (*encode_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
1682 void (*nofity_fw) (struct qlcnic_adapter *, u8);
1683};
1684
1685int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
1686void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
1687void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
1688void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
1689
1603/* Adapter hardware abstraction */ 1690/* Adapter hardware abstraction */
1604struct qlcnic_hardware_ops { 1691struct qlcnic_hardware_ops {
1605 void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t); 1692 void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
@@ -1607,8 +1694,8 @@ struct qlcnic_hardware_ops {
1607 int (*read_reg) (struct qlcnic_adapter *, ulong, int *); 1694 int (*read_reg) (struct qlcnic_adapter *, ulong, int *);
1608 int (*write_reg) (struct qlcnic_adapter *, ulong, u32); 1695 int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
1609 void (*get_ocm_win) (struct qlcnic_hardware_context *); 1696 void (*get_ocm_win) (struct qlcnic_hardware_context *);
1610 int (*get_mac_address) (struct qlcnic_adapter *, u8 *); 1697 int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8);
1611 int (*setup_intr) (struct qlcnic_adapter *, u8); 1698 int (*setup_intr) (struct qlcnic_adapter *, u8, int);
1612 int (*alloc_mbx_args)(struct qlcnic_cmd_args *, 1699 int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
1613 struct qlcnic_adapter *, u32); 1700 struct qlcnic_adapter *, u32);
1614 int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *); 1701 int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
@@ -1641,6 +1728,11 @@ struct qlcnic_hardware_ops {
1641 int (*get_board_info) (struct qlcnic_adapter *); 1728 int (*get_board_info) (struct qlcnic_adapter *);
1642 void (*set_mac_filter_count) (struct qlcnic_adapter *); 1729 void (*set_mac_filter_count) (struct qlcnic_adapter *);
1643 void (*free_mac_list) (struct qlcnic_adapter *); 1730 void (*free_mac_list) (struct qlcnic_adapter *);
1731 int (*read_phys_port_id) (struct qlcnic_adapter *);
1732 pci_ers_result_t (*io_error_detected) (struct pci_dev *,
1733 pci_channel_state_t);
1734 pci_ers_result_t (*io_slot_reset) (struct pci_dev *);
1735 void (*io_resume) (struct pci_dev *);
1644}; 1736};
1645 1737
1646extern struct qlcnic_nic_template qlcnic_vf_ops; 1738extern struct qlcnic_nic_template qlcnic_vf_ops;
@@ -1669,14 +1761,15 @@ static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
1669} 1761}
1670 1762
1671static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, 1763static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
1672 u8 *mac) 1764 u8 *mac, u8 function)
1673{ 1765{
1674 return adapter->ahw->hw_ops->get_mac_address(adapter, mac); 1766 return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function);
1675} 1767}
1676 1768
1677static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr) 1769static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter,
1770 u8 num_intr, int txq)
1678{ 1771{
1679 return adapter->ahw->hw_ops->setup_intr(adapter, num_intr); 1772 return adapter->ahw->hw_ops->setup_intr(adapter, num_intr, txq);
1680} 1773}
1681 1774
1682static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx, 1775static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
@@ -1867,6 +1960,12 @@ static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
1867 adapter->ahw->hw_ops->set_mac_filter_count(adapter); 1960 adapter->ahw->hw_ops->set_mac_filter_count(adapter);
1868} 1961}
1869 1962
1963static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
1964{
1965 if (adapter->ahw->hw_ops->read_phys_port_id)
1966 adapter->ahw->hw_ops->read_phys_port_id(adapter);
1967}
1968
1870static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, 1969static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
1871 u32 key) 1970 u32 key)
1872{ 1971{
@@ -1898,16 +1997,45 @@ static inline void qlcnic_config_ipaddr(struct qlcnic_adapter *adapter,
1898 adapter->nic_ops->config_ipaddr(adapter, ip, cmd); 1997 adapter->nic_ops->config_ipaddr(adapter, ip, cmd);
1899} 1998}
1900 1999
2000static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
2001{
2002 return test_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
2003}
2004
2005static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
2006{
2007 test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
2008 adapter->max_drv_tx_rings = 1;
2009}
2010
2011/* When operating in a muti tx mode, driver needs to write 0x1
2012 * to src register, instead of 0x0 to disable receiving interrupt.
2013 */
1901static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring) 2014static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
1902{ 2015{
1903 writel(0, sds_ring->crb_intr_mask); 2016 struct qlcnic_adapter *adapter = sds_ring->adapter;
2017
2018 if (qlcnic_check_multi_tx(adapter) &&
2019 !adapter->ahw->diag_test &&
2020 (adapter->flags & QLCNIC_MSIX_ENABLED))
2021 writel(0x1, sds_ring->crb_intr_mask);
2022 else
2023 writel(0, sds_ring->crb_intr_mask);
1904} 2024}
1905 2025
2026/* When operating in a muti tx mode, driver needs to write 0x0
2027 * to src register, instead of 0x1 to enable receiving interrupts.
2028 */
1906static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring) 2029static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
1907{ 2030{
1908 struct qlcnic_adapter *adapter = sds_ring->adapter; 2031 struct qlcnic_adapter *adapter = sds_ring->adapter;
1909 2032
1910 writel(0x1, sds_ring->crb_intr_mask); 2033 if (qlcnic_check_multi_tx(adapter) &&
2034 !adapter->ahw->diag_test &&
2035 (adapter->flags & QLCNIC_MSIX_ENABLED))
2036 writel(0, sds_ring->crb_intr_mask);
2037 else
2038 writel(0x1, sds_ring->crb_intr_mask);
1911 2039
1912 if (!QLCNIC_IS_MSI_FAMILY(adapter)) 2040 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1913 writel(0xfbff, adapter->tgt_mask_reg); 2041 writel(0xfbff, adapter->tgt_mask_reg);
@@ -1939,9 +2067,11 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
1939 __func__, ##_args); \ 2067 __func__, ##_args); \
1940 } while (0) 2068 } while (0)
1941 2069
1942#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030 2070#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
2071#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
1943#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430 2072#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
1944#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 2073#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040
2074#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440
1945 2075
1946static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter) 2076static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
1947{ 2077{
@@ -1949,12 +2079,22 @@ static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
1949 return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false; 2079 return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false;
1950} 2080}
1951 2081
2082static inline bool qlcnic_84xx_check(struct qlcnic_adapter *adapter)
2083{
2084 unsigned short device = adapter->pdev->device;
2085
2086 return ((device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
2087 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
2088}
2089
1952static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter) 2090static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
1953{ 2091{
1954 unsigned short device = adapter->pdev->device; 2092 unsigned short device = adapter->pdev->device;
1955 bool status; 2093 bool status;
1956 2094
1957 status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) || 2095 status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
2096 (device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
2097 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
1958 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false; 2098 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
1959 2099
1960 return status; 2100 return status;
@@ -1968,7 +2108,105 @@ static inline bool qlcnic_sriov_pf_check(struct qlcnic_adapter *adapter)
1968static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter) 2108static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
1969{ 2109{
1970 unsigned short device = adapter->pdev->device; 2110 unsigned short device = adapter->pdev->device;
2111 bool status;
2112
2113 status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
2114 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
2115
2116 return status;
2117}
2118
2119static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
2120{
2121 struct qlcnic_dcb *dcb = adapter->dcb;
2122
2123 if (dcb && dcb->ops->get_hw_capability)
2124 return dcb->ops->get_hw_capability(adapter);
2125
2126 return 0;
2127}
2128
2129static inline void qlcnic_dcb_free(struct qlcnic_adapter *adapter)
2130{
2131 struct qlcnic_dcb *dcb = adapter->dcb;
2132
2133 if (dcb && dcb->ops->free)
2134 dcb->ops->free(adapter);
2135}
2136
2137static inline int qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
2138{
2139 struct qlcnic_dcb *dcb = adapter->dcb;
2140
2141 if (dcb && dcb->ops->attach)
2142 return dcb->ops->attach(adapter);
2143
2144 return 0;
2145}
2146
2147static inline int
2148qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, char *buf)
2149{
2150 struct qlcnic_dcb *dcb = adapter->dcb;
2151
2152 if (dcb && dcb->ops->query_hw_capability)
2153 return dcb->ops->query_hw_capability(adapter, buf);
2154
2155 return 0;
2156}
2157
2158static inline void qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
2159{
2160 struct qlcnic_dcb *dcb = adapter->dcb;
2161
2162 if (dcb && dcb->ops->get_info)
2163 dcb->ops->get_info(adapter);
2164}
2165
2166static inline int
2167qlcnic_dcb_query_cee_param(struct qlcnic_adapter *adapter, char *buf, u8 type)
2168{
2169 struct qlcnic_dcb *dcb = adapter->dcb;
2170
2171 if (dcb && dcb->ops->query_cee_param)
2172 return dcb->ops->query_cee_param(adapter, buf, type);
2173
2174 return 0;
2175}
2176
2177static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
2178{
2179 struct qlcnic_dcb *dcb = adapter->dcb;
2180
2181 if (dcb && dcb->ops->get_cee_cfg)
2182 return dcb->ops->get_cee_cfg(adapter);
2183
2184 return 0;
2185}
2186
2187static inline void
2188qlcnic_dcb_register_aen(struct qlcnic_adapter *adapter, u8 flag)
2189{
2190 struct qlcnic_dcb *dcb = adapter->dcb;
2191
2192 if (dcb && dcb->ops->register_aen)
2193 dcb->ops->register_aen(adapter, flag);
2194}
2195
2196static inline void qlcnic_dcb_handle_aen(struct qlcnic_adapter *adapter,
2197 void *msg)
2198{
2199 struct qlcnic_dcb *dcb = adapter->dcb;
2200
2201 if (dcb && dcb->ops->handle_aen)
2202 dcb->ops->handle_aen(adapter, msg);
2203}
2204
2205static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_adapter *adapter)
2206{
2207 struct qlcnic_dcb *dcb = adapter->dcb;
1971 2208
1972 return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false; 2209 if (dcb && dcb->ops->init_dcbnl_ops)
2210 dcb->ops->init_dcbnl_ops(adapter);
1973} 2211}
1974#endif /* __QLCNIC_H_ */ 2212#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 9d4bb7f83904..a1818dae47b6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -11,6 +11,7 @@
11#include <linux/ipv6.h> 11#include <linux/ipv6.h>
12#include <linux/ethtool.h> 12#include <linux/ethtool.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/aer.h>
14 15
15#define QLCNIC_MAX_TX_QUEUES 1 16#define QLCNIC_MAX_TX_QUEUES 1
16#define RSS_HASHTYPE_IP_TCP 0x3 17#define RSS_HASHTYPE_IP_TCP 0x3
@@ -67,6 +68,8 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
67 {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26}, 68 {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
68 {QLCNIC_CMD_CONFIG_VPORT, 4, 4}, 69 {QLCNIC_CMD_CONFIG_VPORT, 4, 4},
69 {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1}, 70 {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
71 {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
72 {QLCNIC_CMD_DCB_QUERY_PARAM, 2, 50},
70}; 73};
71 74
72const u32 qlcnic_83xx_ext_reg_tbl[] = { 75const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -149,7 +152,7 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
149 .get_mac_address = qlcnic_83xx_get_mac_address, 152 .get_mac_address = qlcnic_83xx_get_mac_address,
150 .setup_intr = qlcnic_83xx_setup_intr, 153 .setup_intr = qlcnic_83xx_setup_intr,
151 .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, 154 .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
152 .mbx_cmd = qlcnic_83xx_mbx_op, 155 .mbx_cmd = qlcnic_83xx_issue_cmd,
153 .get_func_no = qlcnic_83xx_get_func_no, 156 .get_func_no = qlcnic_83xx_get_func_no,
154 .api_lock = qlcnic_83xx_cam_lock, 157 .api_lock = qlcnic_83xx_cam_lock,
155 .api_unlock = qlcnic_83xx_cam_unlock, 158 .api_unlock = qlcnic_83xx_cam_unlock,
@@ -175,6 +178,10 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
175 .get_board_info = qlcnic_83xx_get_port_info, 178 .get_board_info = qlcnic_83xx_get_port_info,
176 .set_mac_filter_count = qlcnic_83xx_set_mac_filter_count, 179 .set_mac_filter_count = qlcnic_83xx_set_mac_filter_count,
177 .free_mac_list = qlcnic_82xx_free_mac_list, 180 .free_mac_list = qlcnic_82xx_free_mac_list,
181 .io_error_detected = qlcnic_83xx_io_error_detected,
182 .io_slot_reset = qlcnic_83xx_io_slot_reset,
183 .io_resume = qlcnic_83xx_io_resume,
184
178}; 185};
179 186
180static struct qlcnic_nic_template qlcnic_83xx_ops = { 187static struct qlcnic_nic_template qlcnic_83xx_ops = {
@@ -261,7 +268,7 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
261 } 268 }
262} 269}
263 270
264int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr) 271int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
265{ 272{
266 int err, i, num_msix; 273 int err, i, num_msix;
267 struct qlcnic_hardware_context *ahw = adapter->ahw; 274 struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -362,6 +369,10 @@ static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter,
362 struct qlcnic_cmd_args *cmd) 369 struct qlcnic_cmd_args *cmd)
363{ 370{
364 int i; 371 int i;
372
373 if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
374 return;
375
365 for (i = 0; i < cmd->rsp.num; i++) 376 for (i = 0; i < cmd->rsp.num; i++)
366 cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i)); 377 cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i));
367} 378}
@@ -398,24 +409,33 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
398 return IRQ_HANDLED; 409 return IRQ_HANDLED;
399} 410}
400 411
412static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
413{
414 atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
415 complete(&mbx->completion);
416}
417
401static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter) 418static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
402{ 419{
403 u32 resp, event; 420 u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
421 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
404 unsigned long flags; 422 unsigned long flags;
405 423
406 spin_lock_irqsave(&adapter->ahw->mbx_lock, flags); 424 spin_lock_irqsave(&mbx->aen_lock, flags);
407
408 resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL); 425 resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
409 if (!(resp & QLCNIC_SET_OWNER)) 426 if (!(resp & QLCNIC_SET_OWNER))
410 goto out; 427 goto out;
411 428
412 event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); 429 event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
413 if (event & QLCNIC_MBX_ASYNC_EVENT) 430 if (event & QLCNIC_MBX_ASYNC_EVENT) {
414 __qlcnic_83xx_process_aen(adapter); 431 __qlcnic_83xx_process_aen(adapter);
415 432 } else {
433 if (atomic_read(&mbx->rsp_status) != rsp_status)
434 qlcnic_83xx_notify_mbx_response(mbx);
435 }
416out: 436out:
417 qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); 437 qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
418 spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); 438 spin_unlock_irqrestore(&mbx->aen_lock, flags);
419} 439}
420 440
421irqreturn_t qlcnic_83xx_intr(int irq, void *data) 441irqreturn_t qlcnic_83xx_intr(int irq, void *data)
@@ -515,7 +535,7 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
515 } 535 }
516 536
517 /* Enable mailbox interrupt */ 537 /* Enable mailbox interrupt */
518 qlcnic_83xx_enable_mbx_intrpt(adapter); 538 qlcnic_83xx_enable_mbx_interrupt(adapter);
519 539
520 return err; 540 return err;
521} 541}
@@ -628,7 +648,7 @@ void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
628 ahw->max_uc_count = count; 648 ahw->max_uc_count = count;
629} 649}
630 650
631void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter) 651void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *adapter)
632{ 652{
633 u32 val; 653 u32 val;
634 654
@@ -682,11 +702,14 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
682static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter, 702static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
683 u32 data[]); 703 u32 data[]);
684 704
685static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter, 705void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
686 struct qlcnic_cmd_args *cmd) 706 struct qlcnic_cmd_args *cmd)
687{ 707{
688 int i; 708 int i;
689 709
710 if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
711 return;
712
690 dev_info(&adapter->pdev->dev, 713 dev_info(&adapter->pdev->dev,
691 "Host MBX regs(%d)\n", cmd->req.num); 714 "Host MBX regs(%d)\n", cmd->req.num);
692 for (i = 0; i < cmd->req.num; i++) { 715 for (i = 0; i < cmd->req.num; i++) {
@@ -705,120 +728,73 @@ static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
705 pr_info("\n"); 728 pr_info("\n");
706} 729}
707 730
708/* Mailbox response for mac rcode */ 731static void qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter,
709u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter) 732 struct qlcnic_cmd_args *cmd)
710{ 733{
711 u32 fw_data; 734 struct qlcnic_hardware_context *ahw = adapter->ahw;
712 u8 mac_cmd_rcode; 735 int opcode = LSW(cmd->req.arg[0]);
736 unsigned long max_loops;
713 737
714 fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2)); 738 max_loops = cmd->total_cmds * QLC_83XX_MBX_CMD_LOOP;
715 mac_cmd_rcode = (u8)fw_data;
716 if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
717 mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
718 mac_cmd_rcode == QLC_83XX_MAC_ABSENT)
719 return QLCNIC_RCODE_SUCCESS;
720 return 1;
721}
722 739
723u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time) 740 for (; max_loops; max_loops--) {
724{ 741 if (atomic_read(&cmd->rsp_status) ==
725 u32 data; 742 QLC_83XX_MBX_RESPONSE_ARRIVED)
726 struct qlcnic_hardware_context *ahw = adapter->ahw; 743 return;
727 /* wait for mailbox completion */ 744
728 do { 745 udelay(1);
729 data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); 746 }
730 if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) { 747
731 data = QLCNIC_RCODE_TIMEOUT; 748 dev_err(&adapter->pdev->dev,
732 break; 749 "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
733 } 750 __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode);
734 mdelay(1); 751 flush_workqueue(ahw->mailbox->work_q);
735 } while (!data); 752 return;
736 return data;
737} 753}
738 754
739int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter, 755int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter,
740 struct qlcnic_cmd_args *cmd) 756 struct qlcnic_cmd_args *cmd)
741{ 757{
742 int i; 758 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
743 u16 opcode;
744 u8 mbx_err_code;
745 unsigned long flags;
746 struct qlcnic_hardware_context *ahw = adapter->ahw; 759 struct qlcnic_hardware_context *ahw = adapter->ahw;
747 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0; 760 int cmd_type, err, opcode;
761 unsigned long timeout;
748 762
749 opcode = LSW(cmd->req.arg[0]); 763 opcode = LSW(cmd->req.arg[0]);
750 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { 764 cmd_type = cmd->type;
751 dev_info(&adapter->pdev->dev, 765 err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout);
752 "Mailbox cmd attempted, 0x%x\n", opcode); 766 if (err) {
753 dev_info(&adapter->pdev->dev, "Mailbox detached\n"); 767 dev_err(&adapter->pdev->dev,
754 return 0; 768 "%s: Mailbox not available, cmd_op=0x%x, cmd_context=0x%x, pci_func=0x%x, op_mode=0x%x\n",
769 __func__, opcode, cmd->type, ahw->pci_func,
770 ahw->op_mode);
771 return err;
755 } 772 }
756 773
757 spin_lock_irqsave(&adapter->ahw->mbx_lock, flags); 774 switch (cmd_type) {
758 mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); 775 case QLC_83XX_MBX_CMD_WAIT:
759 776 if (!wait_for_completion_timeout(&cmd->completion, timeout)) {
760 if (mbx_val) {
761 QLCDB(adapter, DRV,
762 "Mailbox cmd attempted, 0x%x\n", opcode);
763 QLCDB(adapter, DRV,
764 "Mailbox not available, 0x%x, collect FW dump\n",
765 mbx_val);
766 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
767 spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
768 return cmd->rsp.arg[0];
769 }
770
771 /* Fill in mailbox registers */
772 mbx_cmd = cmd->req.arg[0];
773 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
774 for (i = 1; i < cmd->req.num; i++)
775 writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
776
777 /* Signal FW about the impending command */
778 QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
779poll:
780 rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
781 if (rsp != QLCNIC_RCODE_TIMEOUT) {
782 /* Get the FW response data */
783 fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
784 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
785 __qlcnic_83xx_process_aen(adapter);
786 goto poll;
787 }
788 mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
789 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
790 opcode = QLCNIC_MBX_RSP(fw_data);
791 qlcnic_83xx_get_mbx_data(adapter, cmd);
792
793 switch (mbx_err_code) {
794 case QLCNIC_MBX_RSP_OK:
795 case QLCNIC_MBX_PORT_RSP_OK:
796 rsp = QLCNIC_RCODE_SUCCESS;
797 break;
798 default:
799 if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
800 rsp = qlcnic_83xx_mac_rcode(adapter);
801 if (!rsp)
802 goto out;
803 }
804 dev_err(&adapter->pdev->dev, 777 dev_err(&adapter->pdev->dev,
805 "MBX command 0x%x failed with err:0x%x\n", 778 "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
806 opcode, mbx_err_code); 779 __func__, opcode, cmd_type, ahw->pci_func,
807 rsp = mbx_err_code; 780 ahw->op_mode);
808 qlcnic_dump_mbx(adapter, cmd); 781 flush_workqueue(mbx->work_q);
809 break;
810 } 782 }
811 goto out; 783 break;
784 case QLC_83XX_MBX_CMD_NO_WAIT:
785 return 0;
786 case QLC_83XX_MBX_CMD_BUSY_WAIT:
787 qlcnic_83xx_poll_for_mbx_completion(adapter, cmd);
788 break;
789 default:
790 dev_err(&adapter->pdev->dev,
791 "%s: Invalid mailbox command, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
792 __func__, opcode, cmd_type, ahw->pci_func,
793 ahw->op_mode);
794 qlcnic_83xx_detach_mailbox_work(adapter);
812 } 795 }
813 796
814 dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n", 797 return cmd->rsp_opcode;
815 QLCNIC_MBX_RSP(mbx_cmd));
816 rsp = QLCNIC_RCODE_TIMEOUT;
817out:
818 /* clear fw mbx control register */
819 QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
820 spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
821 return rsp;
822} 798}
823 799
824int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, 800int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
@@ -828,6 +804,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
828 u32 temp; 804 u32 temp;
829 const struct qlcnic_mailbox_metadata *mbx_tbl; 805 const struct qlcnic_mailbox_metadata *mbx_tbl;
830 806
807 memset(mbx, 0, sizeof(struct qlcnic_cmd_args));
831 mbx_tbl = qlcnic_83xx_mbx_tbl; 808 mbx_tbl = qlcnic_83xx_mbx_tbl;
832 size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl); 809 size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
833 for (i = 0; i < size; i++) { 810 for (i = 0; i < size; i++) {
@@ -850,6 +827,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
850 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); 827 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
851 temp = adapter->ahw->fw_hal_version << 29; 828 temp = adapter->ahw->fw_hal_version << 29;
852 mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp); 829 mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
830 mbx->cmd_op = type;
853 return 0; 831 return 0;
854 } 832 }
855 } 833 }
@@ -888,9 +866,9 @@ static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
888 866
889void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) 867void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
890{ 868{
869 struct qlcnic_hardware_context *ahw = adapter->ahw;
891 u32 event[QLC_83XX_MBX_AEN_CNT]; 870 u32 event[QLC_83XX_MBX_AEN_CNT];
892 int i; 871 int i;
893 struct qlcnic_hardware_context *ahw = adapter->ahw;
894 872
895 for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++) 873 for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
896 event[i] = readl(QLCNIC_MBX_FW(ahw, i)); 874 event[i] = readl(QLCNIC_MBX_FW(ahw, i));
@@ -910,6 +888,7 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
910 &adapter->idc_aen_work, 0); 888 &adapter->idc_aen_work, 0);
911 break; 889 break;
912 case QLCNIC_MBX_TIME_EXTEND_EVENT: 890 case QLCNIC_MBX_TIME_EXTEND_EVENT:
891 ahw->extend_lb_time = event[1] >> 8 & 0xf;
913 break; 892 break;
914 case QLCNIC_MBX_BC_EVENT: 893 case QLCNIC_MBX_BC_EVENT:
915 qlcnic_sriov_handle_bc_event(adapter, event[1]); 894 qlcnic_sriov_handle_bc_event(adapter, event[1]);
@@ -922,6 +901,9 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
922 dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n", 901 dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n",
923 QLCNIC_MBX_RSP(event[0])); 902 QLCNIC_MBX_RSP(event[0]));
924 break; 903 break;
904 case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT:
905 qlcnic_dcb_handle_aen(adapter, (void *)&event[1]);
906 break;
925 default: 907 default:
926 dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n", 908 dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
927 QLCNIC_MBX_RSP(event[0])); 909 QLCNIC_MBX_RSP(event[0]));
@@ -933,20 +915,23 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
933 915
934static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) 916static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
935{ 917{
918 u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
936 struct qlcnic_hardware_context *ahw = adapter->ahw; 919 struct qlcnic_hardware_context *ahw = adapter->ahw;
937 u32 resp, event; 920 struct qlcnic_mailbox *mbx = ahw->mailbox;
938 unsigned long flags; 921 unsigned long flags;
939 922
940 spin_lock_irqsave(&ahw->mbx_lock, flags); 923 spin_lock_irqsave(&mbx->aen_lock, flags);
941
942 resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); 924 resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
943 if (resp & QLCNIC_SET_OWNER) { 925 if (resp & QLCNIC_SET_OWNER) {
944 event = readl(QLCNIC_MBX_FW(ahw, 0)); 926 event = readl(QLCNIC_MBX_FW(ahw, 0));
945 if (event & QLCNIC_MBX_ASYNC_EVENT) 927 if (event & QLCNIC_MBX_ASYNC_EVENT) {
946 __qlcnic_83xx_process_aen(adapter); 928 __qlcnic_83xx_process_aen(adapter);
929 } else {
930 if (atomic_read(&mbx->rsp_status) != rsp_status)
931 qlcnic_83xx_notify_mbx_response(mbx);
932 }
947 } 933 }
948 934 spin_unlock_irqrestore(&mbx->aen_lock, flags);
949 spin_unlock_irqrestore(&ahw->mbx_lock, flags);
950} 935}
951 936
952static void qlcnic_83xx_mbx_poll_work(struct work_struct *work) 937static void qlcnic_83xx_mbx_poll_work(struct work_struct *work)
@@ -969,6 +954,7 @@ void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *adapter)
969 return; 954 return;
970 955
971 INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work); 956 INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work);
957 queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, 0);
972} 958}
973 959
974void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter) 960void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter)
@@ -1355,8 +1341,10 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
1355 1341
1356 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { 1342 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
1357 /* disable and free mailbox interrupt */ 1343 /* disable and free mailbox interrupt */
1358 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) 1344 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
1345 qlcnic_83xx_enable_mbx_poll(adapter);
1359 qlcnic_83xx_free_mbx_intr(adapter); 1346 qlcnic_83xx_free_mbx_intr(adapter);
1347 }
1360 adapter->ahw->loopback_state = 0; 1348 adapter->ahw->loopback_state = 0;
1361 adapter->ahw->hw_ops->setup_link_event(adapter, 1); 1349 adapter->ahw->hw_ops->setup_link_event(adapter, 1);
1362 } 1350 }
@@ -1377,6 +1365,8 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
1377 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1365 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1378 sds_ring = &adapter->recv_ctx->sds_rings[ring]; 1366 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1379 qlcnic_83xx_disable_intr(adapter, sds_ring); 1367 qlcnic_83xx_disable_intr(adapter, sds_ring);
1368 if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
1369 qlcnic_83xx_enable_mbx_poll(adapter);
1380 } 1370 }
1381 } 1371 }
1382 1372
@@ -1386,6 +1376,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
1386 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { 1376 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
1387 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { 1377 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
1388 err = qlcnic_83xx_setup_mbx_intr(adapter); 1378 err = qlcnic_83xx_setup_mbx_intr(adapter);
1379 qlcnic_83xx_disable_mbx_poll(adapter);
1389 if (err) { 1380 if (err) {
1390 dev_err(&adapter->pdev->dev, 1381 dev_err(&adapter->pdev->dev,
1391 "%s: failed to setup mbx interrupt\n", 1382 "%s: failed to setup mbx interrupt\n",
@@ -1402,6 +1393,10 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
1402 1393
1403 if (netif_running(netdev)) 1394 if (netif_running(netdev))
1404 __qlcnic_up(adapter, netdev); 1395 __qlcnic_up(adapter, netdev);
1396
1397 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
1398 !(adapter->flags & QLCNIC_MSIX_ENABLED))
1399 qlcnic_83xx_disable_mbx_poll(adapter);
1405out: 1400out:
1406 netif_device_attach(netdev); 1401 netif_device_attach(netdev);
1407} 1402}
@@ -1619,26 +1614,33 @@ static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
1619 1614
1620int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) 1615int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
1621{ 1616{
1622 int err; 1617 struct qlcnic_cmd_args *cmd = NULL;
1623 u32 temp = 0; 1618 u32 temp = 0;
1624 struct qlcnic_cmd_args cmd; 1619 int err;
1625 1620
1626 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) 1621 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
1627 return -EIO; 1622 return -EIO;
1628 1623
1629 err = qlcnic_alloc_mbx_args(&cmd, adapter, 1624 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
1625 if (!cmd)
1626 return -ENOMEM;
1627
1628 err = qlcnic_alloc_mbx_args(cmd, adapter,
1630 QLCNIC_CMD_CONFIGURE_MAC_RX_MODE); 1629 QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
1631 if (err) 1630 if (err)
1632 return err; 1631 goto out;
1633 1632
1633 cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
1634 qlcnic_83xx_set_interface_id_promisc(adapter, &temp); 1634 qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
1635 cmd.req.arg[1] = (mode ? 1 : 0) | temp; 1635 cmd->req.arg[1] = (mode ? 1 : 0) | temp;
1636 err = qlcnic_issue_cmd(adapter, &cmd); 1636 err = qlcnic_issue_cmd(adapter, cmd);
1637 if (err) 1637 if (!err)
1638 dev_info(&adapter->pdev->dev, 1638 return err;
1639 "Promiscous mode config failed\n");
1640 1639
1641 qlcnic_free_mbx_args(&cmd); 1640 qlcnic_free_mbx_args(cmd);
1641
1642out:
1643 kfree(cmd);
1642 return err; 1644 return err;
1643} 1645}
1644 1646
@@ -1651,7 +1653,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1651 if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { 1653 if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
1652 netdev_warn(netdev, 1654 netdev_warn(netdev,
1653 "Loopback test not supported in non privileged mode\n"); 1655 "Loopback test not supported in non privileged mode\n");
1654 return ret; 1656 return -ENOTSUPP;
1655 } 1657 }
1656 1658
1657 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { 1659 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
@@ -1679,19 +1681,17 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1679 /* Poll for link up event before running traffic */ 1681 /* Poll for link up event before running traffic */
1680 do { 1682 do {
1681 msleep(QLC_83XX_LB_MSLEEP_COUNT); 1683 msleep(QLC_83XX_LB_MSLEEP_COUNT);
1682 if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
1683 qlcnic_83xx_process_aen(adapter);
1684 1684
1685 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { 1685 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
1686 netdev_info(netdev, 1686 netdev_info(netdev,
1687 "Device is resetting, free LB test resources\n"); 1687 "Device is resetting, free LB test resources\n");
1688 ret = -EIO; 1688 ret = -EBUSY;
1689 goto free_diag_res; 1689 goto free_diag_res;
1690 } 1690 }
1691 if (loop++ > QLC_83XX_LB_WAIT_COUNT) { 1691 if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
1692 netdev_info(netdev, 1692 netdev_info(netdev,
1693 "Firmware didn't sent link up event to loopback request\n"); 1693 "Firmware didn't sent link up event to loopback request\n");
1694 ret = -QLCNIC_FW_NOT_RESPOND; 1694 ret = -ETIMEDOUT;
1695 qlcnic_83xx_clear_lb_mode(adapter, mode); 1695 qlcnic_83xx_clear_lb_mode(adapter, mode);
1696 goto free_diag_res; 1696 goto free_diag_res;
1697 } 1697 }
@@ -1700,7 +1700,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1700 /* Make sure carrier is off and queue is stopped during loopback */ 1700 /* Make sure carrier is off and queue is stopped during loopback */
1701 if (netif_running(netdev)) { 1701 if (netif_running(netdev)) {
1702 netif_carrier_off(netdev); 1702 netif_carrier_off(netdev);
1703 netif_stop_queue(netdev); 1703 netif_tx_stop_all_queues(netdev);
1704 } 1704 }
1705 1705
1706 ret = qlcnic_do_lb_test(adapter, mode); 1706 ret = qlcnic_do_lb_test(adapter, mode);
@@ -1716,18 +1716,42 @@ fail_diag_alloc:
1716 return ret; 1716 return ret;
1717} 1717}
1718 1718
1719static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
1720 u32 *max_wait_count)
1721{
1722 struct qlcnic_hardware_context *ahw = adapter->ahw;
1723 int temp;
1724
1725 netdev_info(adapter->netdev, "Recieved loopback IDC time extend event for 0x%x seconds\n",
1726 ahw->extend_lb_time);
1727 temp = ahw->extend_lb_time * 1000;
1728 *max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT;
1729 ahw->extend_lb_time = 0;
1730}
1731
1719int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) 1732int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1720{ 1733{
1721 struct qlcnic_hardware_context *ahw = adapter->ahw; 1734 struct qlcnic_hardware_context *ahw = adapter->ahw;
1722 struct net_device *netdev = adapter->netdev; 1735 struct net_device *netdev = adapter->netdev;
1736 u32 config, max_wait_count;
1723 int status = 0, loop = 0; 1737 int status = 0, loop = 0;
1724 u32 config;
1725 1738
1739 ahw->extend_lb_time = 0;
1740 max_wait_count = QLC_83XX_LB_WAIT_COUNT;
1726 status = qlcnic_83xx_get_port_config(adapter); 1741 status = qlcnic_83xx_get_port_config(adapter);
1727 if (status) 1742 if (status)
1728 return status; 1743 return status;
1729 1744
1730 config = ahw->port_config; 1745 config = ahw->port_config;
1746
1747 /* Check if port is already in loopback mode */
1748 if ((config & QLC_83XX_CFG_LOOPBACK_HSS) ||
1749 (config & QLC_83XX_CFG_LOOPBACK_EXT)) {
1750 netdev_err(netdev,
1751 "Port already in Loopback mode.\n");
1752 return -EINPROGRESS;
1753 }
1754
1731 set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); 1755 set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1732 1756
1733 if (mode == QLCNIC_ILB_MODE) 1757 if (mode == QLCNIC_ILB_MODE)
@@ -1748,21 +1772,24 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1748 /* Wait for Link and IDC Completion AEN */ 1772 /* Wait for Link and IDC Completion AEN */
1749 do { 1773 do {
1750 msleep(QLC_83XX_LB_MSLEEP_COUNT); 1774 msleep(QLC_83XX_LB_MSLEEP_COUNT);
1751 if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
1752 qlcnic_83xx_process_aen(adapter);
1753 1775
1754 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { 1776 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
1755 netdev_info(netdev, 1777 netdev_info(netdev,
1756 "Device is resetting, free LB test resources\n"); 1778 "Device is resetting, free LB test resources\n");
1757 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); 1779 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1758 return -EIO; 1780 return -EBUSY;
1759 } 1781 }
1760 if (loop++ > QLC_83XX_LB_WAIT_COUNT) { 1782
1761 netdev_err(netdev, 1783 if (ahw->extend_lb_time)
1762 "Did not receive IDC completion AEN\n"); 1784 qlcnic_extend_lb_idc_cmpltn_wait(adapter,
1785 &max_wait_count);
1786
1787 if (loop++ > max_wait_count) {
1788 netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
1789 __func__);
1763 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); 1790 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1764 qlcnic_83xx_clear_lb_mode(adapter, mode); 1791 qlcnic_83xx_clear_lb_mode(adapter, mode);
1765 return -EIO; 1792 return -ETIMEDOUT;
1766 } 1793 }
1767 } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status)); 1794 } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
1768 1795
@@ -1774,10 +1801,12 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1774int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) 1801int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1775{ 1802{
1776 struct qlcnic_hardware_context *ahw = adapter->ahw; 1803 struct qlcnic_hardware_context *ahw = adapter->ahw;
1804 u32 config = ahw->port_config, max_wait_count;
1777 struct net_device *netdev = adapter->netdev; 1805 struct net_device *netdev = adapter->netdev;
1778 int status = 0, loop = 0; 1806 int status = 0, loop = 0;
1779 u32 config = ahw->port_config;
1780 1807
1808 ahw->extend_lb_time = 0;
1809 max_wait_count = QLC_83XX_LB_WAIT_COUNT;
1781 set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); 1810 set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1782 if (mode == QLCNIC_ILB_MODE) 1811 if (mode == QLCNIC_ILB_MODE)
1783 ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS; 1812 ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS;
@@ -1797,21 +1826,23 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1797 /* Wait for Link and IDC Completion AEN */ 1826 /* Wait for Link and IDC Completion AEN */
1798 do { 1827 do {
1799 msleep(QLC_83XX_LB_MSLEEP_COUNT); 1828 msleep(QLC_83XX_LB_MSLEEP_COUNT);
1800 if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
1801 qlcnic_83xx_process_aen(adapter);
1802 1829
1803 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { 1830 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
1804 netdev_info(netdev, 1831 netdev_info(netdev,
1805 "Device is resetting, free LB test resources\n"); 1832 "Device is resetting, free LB test resources\n");
1806 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); 1833 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1807 return -EIO; 1834 return -EBUSY;
1808 } 1835 }
1809 1836
1810 if (loop++ > QLC_83XX_LB_WAIT_COUNT) { 1837 if (ahw->extend_lb_time)
1811 netdev_err(netdev, 1838 qlcnic_extend_lb_idc_cmpltn_wait(adapter,
1812 "Did not receive IDC completion AEN\n"); 1839 &max_wait_count);
1840
1841 if (loop++ > max_wait_count) {
1842 netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
1843 __func__);
1813 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); 1844 clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
1814 return -EIO; 1845 return -ETIMEDOUT;
1815 } 1846 }
1816 } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status)); 1847 } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
1817 1848
@@ -1950,25 +1981,31 @@ static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
1950int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, 1981int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
1951 u16 vlan_id, u8 op) 1982 u16 vlan_id, u8 op)
1952{ 1983{
1953 int err; 1984 struct qlcnic_cmd_args *cmd = NULL;
1954 u32 *buf, temp = 0;
1955 struct qlcnic_cmd_args cmd;
1956 struct qlcnic_macvlan_mbx mv; 1985 struct qlcnic_macvlan_mbx mv;
1986 u32 *buf, temp = 0;
1987 int err;
1957 1988
1958 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) 1989 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
1959 return -EIO; 1990 return -EIO;
1960 1991
1961 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN); 1992 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
1993 if (!cmd)
1994 return -ENOMEM;
1995
1996 err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
1962 if (err) 1997 if (err)
1963 return err; 1998 goto out;
1999
2000 cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
1964 2001
1965 if (vlan_id) 2002 if (vlan_id)
1966 op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ? 2003 op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
1967 QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL; 2004 QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
1968 2005
1969 cmd.req.arg[1] = op | (1 << 8); 2006 cmd->req.arg[1] = op | (1 << 8);
1970 qlcnic_83xx_set_interface_id_macaddr(adapter, &temp); 2007 qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
1971 cmd.req.arg[1] |= temp; 2008 cmd->req.arg[1] |= temp;
1972 mv.vlan = vlan_id; 2009 mv.vlan = vlan_id;
1973 mv.mac_addr0 = addr[0]; 2010 mv.mac_addr0 = addr[0];
1974 mv.mac_addr1 = addr[1]; 2011 mv.mac_addr1 = addr[1];
@@ -1976,14 +2013,15 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
1976 mv.mac_addr3 = addr[3]; 2013 mv.mac_addr3 = addr[3];
1977 mv.mac_addr4 = addr[4]; 2014 mv.mac_addr4 = addr[4];
1978 mv.mac_addr5 = addr[5]; 2015 mv.mac_addr5 = addr[5];
1979 buf = &cmd.req.arg[2]; 2016 buf = &cmd->req.arg[2];
1980 memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); 2017 memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
1981 err = qlcnic_issue_cmd(adapter, &cmd); 2018 err = qlcnic_issue_cmd(adapter, cmd);
1982 if (err) 2019 if (!err)
1983 dev_err(&adapter->pdev->dev, 2020 return err;
1984 "MAC-VLAN %s to CAM failed, err=%d.\n", 2021
1985 ((op == 1) ? "add " : "delete "), err); 2022 qlcnic_free_mbx_args(cmd);
1986 qlcnic_free_mbx_args(&cmd); 2023out:
2024 kfree(cmd);
1987 return err; 2025 return err;
1988} 2026}
1989 2027
@@ -2008,12 +2046,14 @@ void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
2008 cmd->req.arg[1] = type; 2046 cmd->req.arg[1] = type;
2009} 2047}
2010 2048
2011int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) 2049int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
2050 u8 function)
2012{ 2051{
2013 int err, i; 2052 int err, i;
2014 struct qlcnic_cmd_args cmd; 2053 struct qlcnic_cmd_args cmd;
2015 u32 mac_low, mac_high; 2054 u32 mac_low, mac_high;
2016 2055
2056 function = 0;
2017 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); 2057 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
2018 if (err) 2058 if (err)
2019 return err; 2059 return err;
@@ -2099,10 +2139,12 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
2099irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data) 2139irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
2100{ 2140{
2101 struct qlcnic_adapter *adapter = data; 2141 struct qlcnic_adapter *adapter = data;
2102 unsigned long flags; 2142 struct qlcnic_mailbox *mbx;
2103 u32 mask, resp, event; 2143 u32 mask, resp, event;
2144 unsigned long flags;
2104 2145
2105 spin_lock_irqsave(&adapter->ahw->mbx_lock, flags); 2146 mbx = adapter->ahw->mailbox;
2147 spin_lock_irqsave(&mbx->aen_lock, flags);
2106 resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL); 2148 resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
2107 if (!(resp & QLCNIC_SET_OWNER)) 2149 if (!(resp & QLCNIC_SET_OWNER))
2108 goto out; 2150 goto out;
@@ -2110,11 +2152,13 @@ irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
2110 event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); 2152 event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
2111 if (event & QLCNIC_MBX_ASYNC_EVENT) 2153 if (event & QLCNIC_MBX_ASYNC_EVENT)
2112 __qlcnic_83xx_process_aen(adapter); 2154 __qlcnic_83xx_process_aen(adapter);
2155 else
2156 qlcnic_83xx_notify_mbx_response(mbx);
2157
2113out: 2158out:
2114 mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); 2159 mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
2115 writel(0, adapter->ahw->pci_base0 + mask); 2160 writel(0, adapter->ahw->pci_base0 + mask);
2116 spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); 2161 spin_unlock_irqrestore(&mbx->aen_lock, flags);
2117
2118 return IRQ_HANDLED; 2162 return IRQ_HANDLED;
2119} 2163}
2120 2164
@@ -2287,7 +2331,7 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
2287 pci_info->tx_max_bw, pci_info->mac); 2331 pci_info->tx_max_bw, pci_info->mac);
2288 } 2332 }
2289 if (ahw->op_mode == QLCNIC_MGMT_FUNC) 2333 if (ahw->op_mode == QLCNIC_MGMT_FUNC)
2290 dev_info(dev, "Max vNIC functions = %d, active vNIC functions = %d\n", 2334 dev_info(dev, "Max functions = %d, active functions = %d\n",
2291 ahw->max_pci_func, ahw->act_pci_func); 2335 ahw->max_pci_func, ahw->act_pci_func);
2292 2336
2293 } else { 2337 } else {
@@ -3477,3 +3521,360 @@ int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
3477 idc->delay); 3521 idc->delay);
3478 return err; 3522 return err;
3479} 3523}
3524
3525void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
3526{
3527 INIT_COMPLETION(mbx->completion);
3528 set_bit(QLC_83XX_MBX_READY, &mbx->status);
3529}
3530
3531void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx)
3532{
3533 destroy_workqueue(mbx->work_q);
3534 kfree(mbx);
3535}
3536
3537static inline void
3538qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter,
3539 struct qlcnic_cmd_args *cmd)
3540{
3541 atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
3542
3543 if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
3544 qlcnic_free_mbx_args(cmd);
3545 kfree(cmd);
3546 return;
3547 }
3548 complete(&cmd->completion);
3549}
3550
3551static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
3552{
3553 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
3554 struct list_head *head = &mbx->cmd_q;
3555 struct qlcnic_cmd_args *cmd = NULL;
3556
3557 spin_lock(&mbx->queue_lock);
3558
3559 while (!list_empty(head)) {
3560 cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
3561 dev_info(&adapter->pdev->dev, "%s: Mailbox command 0x%x\n",
3562 __func__, cmd->cmd_op);
3563 list_del(&cmd->list);
3564 mbx->num_cmds--;
3565 qlcnic_83xx_notify_cmd_completion(adapter, cmd);
3566 }
3567
3568 spin_unlock(&mbx->queue_lock);
3569}
3570
3571static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
3572{
3573 struct qlcnic_hardware_context *ahw = adapter->ahw;
3574 struct qlcnic_mailbox *mbx = ahw->mailbox;
3575 u32 host_mbx_ctrl;
3576
3577 if (!test_bit(QLC_83XX_MBX_READY, &mbx->status))
3578 return -EBUSY;
3579
3580 host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
3581 if (host_mbx_ctrl) {
3582 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
3583 ahw->idc.collect_dump = 1;
3584 return -EIO;
3585 }
3586
3587 return 0;
3588}
3589
3590static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter,
3591 u8 issue_cmd)
3592{
3593 if (issue_cmd)
3594 QLCWRX(adapter->ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
3595 else
3596 QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
3597}
3598
3599static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
3600 struct qlcnic_cmd_args *cmd)
3601{
3602 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
3603
3604 spin_lock(&mbx->queue_lock);
3605
3606 list_del(&cmd->list);
3607 mbx->num_cmds--;
3608
3609 spin_unlock(&mbx->queue_lock);
3610
3611 qlcnic_83xx_notify_cmd_completion(adapter, cmd);
3612}
3613
3614static void qlcnic_83xx_encode_mbx_cmd(struct qlcnic_adapter *adapter,
3615 struct qlcnic_cmd_args *cmd)
3616{
3617 u32 mbx_cmd, fw_hal_version, hdr_size, total_size, tmp;
3618 struct qlcnic_hardware_context *ahw = adapter->ahw;
3619 int i, j;
3620
3621 if (cmd->op_type != QLC_83XX_MBX_POST_BC_OP) {
3622 mbx_cmd = cmd->req.arg[0];
3623 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
3624 for (i = 1; i < cmd->req.num; i++)
3625 writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
3626 } else {
3627 fw_hal_version = ahw->fw_hal_version;
3628 hdr_size = sizeof(struct qlcnic_bc_hdr) / sizeof(u32);
3629 total_size = cmd->pay_size + hdr_size;
3630 tmp = QLCNIC_CMD_BC_EVENT_SETUP | total_size << 16;
3631 mbx_cmd = tmp | fw_hal_version << 29;
3632 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
3633
3634 /* Back channel specific operations bits */
3635 mbx_cmd = 0x1 | 1 << 4;
3636
3637 if (qlcnic_sriov_pf_check(adapter))
3638 mbx_cmd |= cmd->func_num << 5;
3639
3640 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
3641
3642 for (i = 2, j = 0; j < hdr_size; i++, j++)
3643 writel(*(cmd->hdr++), QLCNIC_MBX_HOST(ahw, i));
3644 for (j = 0; j < cmd->pay_size; j++, i++)
3645 writel(*(cmd->pay++), QLCNIC_MBX_HOST(ahw, i));
3646 }
3647}
3648
3649void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter)
3650{
3651 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
3652
3653 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
3654 complete(&mbx->completion);
3655 cancel_work_sync(&mbx->work);
3656 flush_workqueue(mbx->work_q);
3657 qlcnic_83xx_flush_mbx_queue(adapter);
3658}
3659
3660static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
3661 struct qlcnic_cmd_args *cmd,
3662 unsigned long *timeout)
3663{
3664 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
3665
3666 if (test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
3667 atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
3668 init_completion(&cmd->completion);
3669 cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
3670
3671 spin_lock(&mbx->queue_lock);
3672
3673 list_add_tail(&cmd->list, &mbx->cmd_q);
3674 mbx->num_cmds++;
3675 cmd->total_cmds = mbx->num_cmds;
3676 *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
3677 queue_work(mbx->work_q, &mbx->work);
3678
3679 spin_unlock(&mbx->queue_lock);
3680
3681 return 0;
3682 }
3683
3684 return -EBUSY;
3685}
3686
3687static int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter,
3688 struct qlcnic_cmd_args *cmd)
3689{
3690 u8 mac_cmd_rcode;
3691 u32 fw_data;
3692
3693 if (cmd->cmd_op == QLCNIC_CMD_CONFIG_MAC_VLAN) {
3694 fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
3695 mac_cmd_rcode = (u8)fw_data;
3696 if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
3697 mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
3698 mac_cmd_rcode == QLC_83XX_MAC_ABSENT) {
3699 cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
3700 return QLCNIC_RCODE_SUCCESS;
3701 }
3702 }
3703
3704 return -EINVAL;
3705}
3706
3707static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
3708 struct qlcnic_cmd_args *cmd)
3709{
3710 struct qlcnic_hardware_context *ahw = adapter->ahw;
3711 struct device *dev = &adapter->pdev->dev;
3712 u8 mbx_err_code;
3713 u32 fw_data;
3714
3715 fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
3716 mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
3717 qlcnic_83xx_get_mbx_data(adapter, cmd);
3718
3719 switch (mbx_err_code) {
3720 case QLCNIC_MBX_RSP_OK:
3721 case QLCNIC_MBX_PORT_RSP_OK:
3722 cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
3723 break;
3724 default:
3725 if (!qlcnic_83xx_check_mac_rcode(adapter, cmd))
3726 break;
3727
3728 dev_err(dev, "%s: Mailbox command failed, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x, error=0x%x\n",
3729 __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
3730 ahw->op_mode, mbx_err_code);
3731 cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_FAILED;
3732 qlcnic_dump_mbx(adapter, cmd);
3733 }
3734
3735 return;
3736}
3737
3738static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
3739{
3740 struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
3741 work);
3742 struct qlcnic_adapter *adapter = mbx->adapter;
3743 struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
3744 struct device *dev = &adapter->pdev->dev;
3745 atomic_t *rsp_status = &mbx->rsp_status;
3746 struct list_head *head = &mbx->cmd_q;
3747 struct qlcnic_hardware_context *ahw;
3748 struct qlcnic_cmd_args *cmd = NULL;
3749
3750 ahw = adapter->ahw;
3751
3752 while (true) {
3753 if (qlcnic_83xx_check_mbx_status(adapter)) {
3754 qlcnic_83xx_flush_mbx_queue(adapter);
3755 return;
3756 }
3757
3758 atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
3759
3760 spin_lock(&mbx->queue_lock);
3761
3762 if (list_empty(head)) {
3763 spin_unlock(&mbx->queue_lock);
3764 return;
3765 }
3766 cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
3767
3768 spin_unlock(&mbx->queue_lock);
3769
3770 mbx_ops->encode_cmd(adapter, cmd);
3771 mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
3772
3773 if (wait_for_completion_timeout(&mbx->completion,
3774 QLC_83XX_MBX_TIMEOUT)) {
3775 mbx_ops->decode_resp(adapter, cmd);
3776 mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_COMPLETION);
3777 } else {
3778 dev_err(dev, "%s: Mailbox command timeout, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x\n",
3779 __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
3780 ahw->op_mode);
3781 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
3782 qlcnic_dump_mbx(adapter, cmd);
3783 qlcnic_83xx_idc_request_reset(adapter,
3784 QLCNIC_FORCE_FW_DUMP_KEY);
3785 cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT;
3786 }
3787 mbx_ops->dequeue_cmd(adapter, cmd);
3788 }
3789}
3790
3791static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
3792 .enqueue_cmd = qlcnic_83xx_enqueue_mbx_cmd,
3793 .dequeue_cmd = qlcnic_83xx_dequeue_mbx_cmd,
3794 .decode_resp = qlcnic_83xx_decode_mbx_rsp,
3795 .encode_cmd = qlcnic_83xx_encode_mbx_cmd,
3796 .nofity_fw = qlcnic_83xx_signal_mbx_cmd,
3797};
3798
3799int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter)
3800{
3801 struct qlcnic_hardware_context *ahw = adapter->ahw;
3802 struct qlcnic_mailbox *mbx;
3803
3804 ahw->mailbox = kzalloc(sizeof(*mbx), GFP_KERNEL);
3805 if (!ahw->mailbox)
3806 return -ENOMEM;
3807
3808 mbx = ahw->mailbox;
3809 mbx->ops = &qlcnic_83xx_mbx_ops;
3810 mbx->adapter = adapter;
3811
3812 spin_lock_init(&mbx->queue_lock);
3813 spin_lock_init(&mbx->aen_lock);
3814 INIT_LIST_HEAD(&mbx->cmd_q);
3815 init_completion(&mbx->completion);
3816
3817 mbx->work_q = create_singlethread_workqueue("qlcnic_mailbox");
3818 if (mbx->work_q == NULL) {
3819 kfree(mbx);
3820 return -ENOMEM;
3821 }
3822
3823 INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker);
3824 set_bit(QLC_83XX_MBX_READY, &mbx->status);
3825 return 0;
3826}
3827
3828pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
3829 pci_channel_state_t state)
3830{
3831 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3832
3833 if (state == pci_channel_io_perm_failure)
3834 return PCI_ERS_RESULT_DISCONNECT;
3835
3836 if (state == pci_channel_io_normal)
3837 return PCI_ERS_RESULT_RECOVERED;
3838
3839 set_bit(__QLCNIC_AER, &adapter->state);
3840 set_bit(__QLCNIC_RESETTING, &adapter->state);
3841
3842 qlcnic_83xx_aer_stop_poll_work(adapter);
3843
3844 pci_save_state(pdev);
3845 pci_disable_device(pdev);
3846
3847 return PCI_ERS_RESULT_NEED_RESET;
3848}
3849
3850pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
3851{
3852 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3853 int err = 0;
3854
3855 pdev->error_state = pci_channel_io_normal;
3856 err = pci_enable_device(pdev);
3857 if (err)
3858 goto disconnect;
3859
3860 pci_set_power_state(pdev, PCI_D0);
3861 pci_set_master(pdev);
3862 pci_restore_state(pdev);
3863
3864 err = qlcnic_83xx_aer_reset(adapter);
3865 if (err == 0)
3866 return PCI_ERS_RESULT_RECOVERED;
3867disconnect:
3868 clear_bit(__QLCNIC_AER, &adapter->state);
3869 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3870 return PCI_ERS_RESULT_DISCONNECT;
3871}
3872
3873void qlcnic_83xx_io_resume(struct pci_dev *pdev)
3874{
3875 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3876
3877 pci_cleanup_aer_uncorrect_error_status(pdev);
3878 if (test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3879 qlcnic_83xx_aer_start_poll_work(adapter);
3880}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 272f56a2e14b..533e150503af 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -84,11 +84,20 @@
84/* Firmware image definitions */ 84/* Firmware image definitions */
85#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000 85#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000
86#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin" 86#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin"
87#define QLC_84XX_FW_FILE_NAME "84xx_fw.bin"
87#define QLC_83XX_BOOT_FROM_FLASH 0 88#define QLC_83XX_BOOT_FROM_FLASH 0
88#define QLC_83XX_BOOT_FROM_FILE 0x12345678 89#define QLC_83XX_BOOT_FROM_FILE 0x12345678
89 90
91#define QLC_FW_FILE_NAME_LEN 20
90#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16 92#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
91 93
94#define QLC_83XX_MBX_POST_BC_OP 0x1
95#define QLC_83XX_MBX_COMPLETION 0x0
96#define QLC_83XX_MBX_REQUEST 0x1
97
98#define QLC_83XX_MBX_TIMEOUT (5 * HZ)
99#define QLC_83XX_MBX_CMD_LOOP 5000000
100
92/* status descriptor mailbox data 101/* status descriptor mailbox data
93 * @phy_addr_{low|high}: physical address of buffer 102 * @phy_addr_{low|high}: physical address of buffer
94 * @sds_ring_size: buffer size 103 * @sds_ring_size: buffer size
@@ -265,11 +274,7 @@ struct qlcnic_macvlan_mbx {
265 274
266struct qlc_83xx_fw_info { 275struct qlc_83xx_fw_info {
267 const struct firmware *fw; 276 const struct firmware *fw;
268 u16 major_fw_version; 277 char fw_file_name[QLC_FW_FILE_NAME_LEN];
269 u8 minor_fw_version;
270 u8 sub_fw_version;
271 u8 fw_build_num;
272 u8 load_from_file;
273}; 278};
274 279
275struct qlc_83xx_reset { 280struct qlc_83xx_reset {
@@ -288,6 +293,7 @@ struct qlc_83xx_reset {
288 293
289#define QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY 0x1 294#define QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY 0x1
290#define QLC_83XX_IDC_GRACEFULL_RESET 0x2 295#define QLC_83XX_IDC_GRACEFULL_RESET 0x2
296#define QLC_83XX_IDC_DISABLE_FW_DUMP 0x4
291#define QLC_83XX_IDC_TIMESTAMP 0 297#define QLC_83XX_IDC_TIMESTAMP 0
292#define QLC_83XX_IDC_DURATION 1 298#define QLC_83XX_IDC_DURATION 1
293#define QLC_83XX_IDC_INIT_TIMEOUT_SECS 30 299#define QLC_83XX_IDC_INIT_TIMEOUT_SECS 30
@@ -397,6 +403,7 @@ enum qlcnic_83xx_states {
397#define QLC_83XX_MAX_MC_COUNT 38 403#define QLC_83XX_MAX_MC_COUNT 38
398#define QLC_83XX_MAX_UC_COUNT 4096 404#define QLC_83XX_MAX_UC_COUNT 4096
399 405
406#define QLC_83XX_PVID_STRIP_CAPABILITY BIT_22
400#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000) 407#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
401#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20) 408#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
402#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40) 409#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
@@ -404,6 +411,7 @@ enum qlcnic_83xx_states {
404#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400) 411#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400)
405#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000) 412#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
406#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000) 413#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
414#define QLC_83XX_ESWITCH_CAPABILITY BIT_23
407#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF 415#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF
408#define QLC_83XX_DEFAULT_MODE 0x0 416#define QLC_83XX_DEFAULT_MODE 0x0
409#define QLC_83XX_SRIOV_MODE 0x1 417#define QLC_83XX_SRIOV_MODE 0x1
@@ -449,6 +457,20 @@ enum qlcnic_83xx_states {
449#define QLC_83xx_FLASH_MAX_WAIT_USEC 100 457#define QLC_83xx_FLASH_MAX_WAIT_USEC 100
450#define QLC_83XX_FLASH_LOCK_TIMEOUT 10000 458#define QLC_83XX_FLASH_LOCK_TIMEOUT 10000
451 459
460enum qlc_83xx_mbx_cmd_type {
461 QLC_83XX_MBX_CMD_WAIT = 0,
462 QLC_83XX_MBX_CMD_NO_WAIT,
463 QLC_83XX_MBX_CMD_BUSY_WAIT,
464};
465
466enum qlc_83xx_mbx_response_states {
467 QLC_83XX_MBX_RESPONSE_WAIT = 0,
468 QLC_83XX_MBX_RESPONSE_ARRIVED,
469};
470
471#define QLC_83XX_MBX_RESPONSE_FAILED 0x2
472#define QLC_83XX_MBX_RESPONSE_UNKNOWN 0x3
473
452/* Additional registers in 83xx */ 474/* Additional registers in 83xx */
453enum qlc_83xx_ext_regs { 475enum qlc_83xx_ext_regs {
454 QLCNIC_GLOBAL_RESET = 0, 476 QLCNIC_GLOBAL_RESET = 0,
@@ -498,8 +520,8 @@ enum qlc_83xx_ext_regs {
498 520
499/* 83xx funcitons */ 521/* 83xx funcitons */
500int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *); 522int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
501int qlcnic_83xx_mbx_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *); 523int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
502int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8); 524int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8, int);
503void qlcnic_83xx_get_func_no(struct qlcnic_adapter *); 525void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
504int qlcnic_83xx_cam_lock(struct qlcnic_adapter *); 526int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
505void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *); 527void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
@@ -540,7 +562,7 @@ int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int);
540void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *); 562void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
541int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool); 563int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
542int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8); 564int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
543int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *); 565int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
544void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8, 566void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
545 struct qlcnic_cmd_args *); 567 struct qlcnic_cmd_args *);
546int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *, 568int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
@@ -551,7 +573,7 @@ void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
551void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *); 573void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *);
552irqreturn_t qlcnic_83xx_handle_aen(int, void *); 574irqreturn_t qlcnic_83xx_handle_aen(int, void *);
553int qlcnic_83xx_get_port_info(struct qlcnic_adapter *); 575int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
554void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *); 576void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *);
555void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *); 577void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *);
556irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *); 578irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
557irqreturn_t qlcnic_83xx_intr(int, void *); 579irqreturn_t qlcnic_83xx_intr(int, void *);
@@ -604,6 +626,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
604int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *, 626int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
605 struct qlcnic_info *, u8); 627 struct qlcnic_info *, u8);
606int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *); 628int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
629int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *, int);
607 630
608void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); 631void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
609void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); 632void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
@@ -623,8 +646,6 @@ int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state);
623int qlcnic_83xx_flash_test(struct qlcnic_adapter *); 646int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
624int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *); 647int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
625int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *); 648int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
626u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
627u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *);
628void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); 649void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
629void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); 650void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
630void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *); 651void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
@@ -634,4 +655,11 @@ int qlcnic_83xx_idc_init(struct qlcnic_adapter *);
634int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *); 655int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *);
635int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *); 656int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *);
636int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *); 657int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
658void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
659int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
660void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
661pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
662 pci_channel_state_t);
663pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
664void qlcnic_83xx_io_resume(struct pci_dev *);
637#endif 665#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 345d987aede4..f09e787af0b2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -399,6 +399,7 @@ static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter)
399 struct net_device *netdev = adapter->netdev; 399 struct net_device *netdev = adapter->netdev;
400 400
401 netif_device_detach(netdev); 401 netif_device_detach(netdev);
402 qlcnic_83xx_detach_mailbox_work(adapter);
402 403
403 /* Disable mailbox interrupt */ 404 /* Disable mailbox interrupt */
404 qlcnic_83xx_disable_mbx_intr(adapter); 405 qlcnic_83xx_disable_mbx_intr(adapter);
@@ -610,6 +611,9 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
610{ 611{
611 int err; 612 int err;
612 613
614 qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
615 qlcnic_83xx_enable_mbx_interrupt(adapter);
616
613 /* register for NIC IDC AEN Events */ 617 /* register for NIC IDC AEN Events */
614 qlcnic_83xx_register_nic_idc_func(adapter, 1); 618 qlcnic_83xx_register_nic_idc_func(adapter, 1);
615 619
@@ -617,7 +621,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
617 if (err) 621 if (err)
618 return err; 622 return err;
619 623
620 qlcnic_83xx_enable_mbx_intrpt(adapter); 624 qlcnic_83xx_enable_mbx_interrupt(adapter);
621 625
622 if (qlcnic_83xx_configure_opmode(adapter)) { 626 if (qlcnic_83xx_configure_opmode(adapter)) {
623 qlcnic_83xx_idc_enter_failed_state(adapter, 1); 627 qlcnic_83xx_idc_enter_failed_state(adapter, 1);
@@ -631,6 +635,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
631 635
632 if (adapter->portnum == 0) 636 if (adapter->portnum == 0)
633 qlcnic_set_drv_version(adapter); 637 qlcnic_set_drv_version(adapter);
638
639 qlcnic_dcb_get_info(adapter);
634 qlcnic_83xx_idc_attach_driver(adapter); 640 qlcnic_83xx_idc_attach_driver(adapter);
635 641
636 return 0; 642 return 0;
@@ -641,7 +647,6 @@ static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
641 struct qlcnic_hardware_context *ahw = adapter->ahw; 647 struct qlcnic_hardware_context *ahw = adapter->ahw;
642 648
643 qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1); 649 qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
644 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
645 qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); 650 qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
646 set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); 651 set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
647 652
@@ -792,7 +797,6 @@ static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
792 ret = qlcnic_83xx_idc_restart_hw(adapter, 1); 797 ret = qlcnic_83xx_idc_restart_hw(adapter, 1);
793 } else { 798 } else {
794 ret = qlcnic_83xx_idc_check_timeout(adapter, timeout); 799 ret = qlcnic_83xx_idc_check_timeout(adapter, timeout);
795 return ret;
796 } 800 }
797 801
798 return ret; 802 return ret;
@@ -811,9 +815,10 @@ static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
811 **/ 815 **/
812static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) 816static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
813{ 817{
814 u32 val;
815 struct qlcnic_hardware_context *ahw = adapter->ahw; 818 struct qlcnic_hardware_context *ahw = adapter->ahw;
819 struct qlcnic_mailbox *mbx = ahw->mailbox;
816 int ret = 0; 820 int ret = 0;
821 u32 val;
817 822
818 /* Perform NIC configuration based ready state entry actions */ 823 /* Perform NIC configuration based ready state entry actions */
819 if (ahw->idc.state_entry(adapter)) 824 if (ahw->idc.state_entry(adapter))
@@ -825,7 +830,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
825 dev_err(&adapter->pdev->dev, 830 dev_err(&adapter->pdev->dev,
826 "Error: device temperature %d above limits\n", 831 "Error: device temperature %d above limits\n",
827 adapter->ahw->temp); 832 adapter->ahw->temp);
828 clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status); 833 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
829 set_bit(__QLCNIC_RESETTING, &adapter->state); 834 set_bit(__QLCNIC_RESETTING, &adapter->state);
830 qlcnic_83xx_idc_detach_driver(adapter); 835 qlcnic_83xx_idc_detach_driver(adapter);
831 qlcnic_83xx_idc_enter_failed_state(adapter, 1); 836 qlcnic_83xx_idc_enter_failed_state(adapter, 1);
@@ -838,7 +843,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
838 if (ret) { 843 if (ret) {
839 adapter->flags |= QLCNIC_FW_HANG; 844 adapter->flags |= QLCNIC_FW_HANG;
840 if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { 845 if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
841 clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status); 846 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
842 set_bit(__QLCNIC_RESETTING, &adapter->state); 847 set_bit(__QLCNIC_RESETTING, &adapter->state);
843 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); 848 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
844 } 849 }
@@ -846,6 +851,8 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
846 } 851 }
847 852
848 if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) { 853 if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) {
854 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
855
849 /* Move to need reset state and prepare for reset */ 856 /* Move to need reset state and prepare for reset */
850 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); 857 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
851 return ret; 858 return ret;
@@ -883,12 +890,13 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
883 **/ 890 **/
884static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter) 891static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
885{ 892{
893 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
886 int ret = 0; 894 int ret = 0;
887 895
888 if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) { 896 if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) {
889 qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); 897 qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
890 set_bit(__QLCNIC_RESETTING, &adapter->state); 898 set_bit(__QLCNIC_RESETTING, &adapter->state);
891 clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); 899 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
892 if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) 900 if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
893 qlcnic_83xx_disable_vnic_mode(adapter, 1); 901 qlcnic_83xx_disable_vnic_mode(adapter, 1);
894 902
@@ -1080,7 +1088,6 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
1080 adapter->ahw->idc.name = (char **)qlc_83xx_idc_states; 1088 adapter->ahw->idc.name = (char **)qlc_83xx_idc_states;
1081 1089
1082 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1090 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1083 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
1084 set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); 1091 set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
1085 1092
1086 /* Check if reset recovery is disabled */ 1093 /* Check if reset recovery is disabled */
@@ -1191,6 +1198,9 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
1191{ 1198{
1192 u32 val; 1199 u32 val;
1193 1200
1201 if (qlcnic_sriov_vf_check(adapter))
1202 return;
1203
1194 if (qlcnic_83xx_lock_driver(adapter)) { 1204 if (qlcnic_83xx_lock_driver(adapter)) {
1195 dev_err(&adapter->pdev->dev, 1205 dev_err(&adapter->pdev->dev,
1196 "%s:failed, please retry\n", __func__); 1206 "%s:failed, please retry\n", __func__);
@@ -1257,31 +1267,33 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
1257 1267
1258static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) 1268static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1259{ 1269{
1270 struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
1271 const struct firmware *fw = fw_info->fw;
1260 u32 dest, *p_cache; 1272 u32 dest, *p_cache;
1261 u64 addr; 1273 int i, ret = -EIO;
1262 u8 data[16]; 1274 u8 data[16];
1263 size_t size; 1275 size_t size;
1264 int i, ret = -EIO; 1276 u64 addr;
1265 1277
1266 dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR); 1278 dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR);
1267 size = (adapter->ahw->fw_info.fw->size & ~0xF); 1279 size = (fw->size & ~0xF);
1268 p_cache = (u32 *)adapter->ahw->fw_info.fw->data; 1280 p_cache = (u32 *)fw->data;
1269 addr = (u64)dest; 1281 addr = (u64)dest;
1270 1282
1271 ret = qlcnic_83xx_ms_mem_write128(adapter, addr, 1283 ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
1272 (u32 *)p_cache, size / 16); 1284 (u32 *)p_cache, size / 16);
1273 if (ret) { 1285 if (ret) {
1274 dev_err(&adapter->pdev->dev, "MS memory write failed\n"); 1286 dev_err(&adapter->pdev->dev, "MS memory write failed\n");
1275 release_firmware(adapter->ahw->fw_info.fw); 1287 release_firmware(fw);
1276 adapter->ahw->fw_info.fw = NULL; 1288 fw_info->fw = NULL;
1277 return -EIO; 1289 return -EIO;
1278 } 1290 }
1279 1291
1280 /* alignment check */ 1292 /* alignment check */
1281 if (adapter->ahw->fw_info.fw->size & 0xF) { 1293 if (fw->size & 0xF) {
1282 addr = dest + size; 1294 addr = dest + size;
1283 for (i = 0; i < (adapter->ahw->fw_info.fw->size & 0xF); i++) 1295 for (i = 0; i < (fw->size & 0xF); i++)
1284 data[i] = adapter->ahw->fw_info.fw->data[size + i]; 1296 data[i] = fw->data[size + i];
1285 for (; i < 16; i++) 1297 for (; i < 16; i++)
1286 data[i] = 0; 1298 data[i] = 0;
1287 ret = qlcnic_83xx_ms_mem_write128(adapter, addr, 1299 ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
@@ -1289,13 +1301,13 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1289 if (ret) { 1301 if (ret) {
1290 dev_err(&adapter->pdev->dev, 1302 dev_err(&adapter->pdev->dev,
1291 "MS memory write failed\n"); 1303 "MS memory write failed\n");
1292 release_firmware(adapter->ahw->fw_info.fw); 1304 release_firmware(fw);
1293 adapter->ahw->fw_info.fw = NULL; 1305 fw_info->fw = NULL;
1294 return -EIO; 1306 return -EIO;
1295 } 1307 }
1296 } 1308 }
1297 release_firmware(adapter->ahw->fw_info.fw); 1309 release_firmware(fw);
1298 adapter->ahw->fw_info.fw = NULL; 1310 fw_info->fw = NULL;
1299 1311
1300 return 0; 1312 return 0;
1301} 1313}
@@ -1941,10 +1953,11 @@ static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
1941 1953
1942static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter) 1954static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter)
1943{ 1955{
1956 struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
1944 int err = -EIO; 1957 int err = -EIO;
1945 1958
1946 if (request_firmware(&adapter->ahw->fw_info.fw, 1959 if (request_firmware(&fw_info->fw, fw_info->fw_file_name,
1947 QLC_83XX_FW_FILE_NAME, &(adapter->pdev->dev))) { 1960 &(adapter->pdev->dev))) {
1948 dev_err(&adapter->pdev->dev, 1961 dev_err(&adapter->pdev->dev,
1949 "No file FW image, loading flash FW image.\n"); 1962 "No file FW image, loading flash FW image.\n");
1950 QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, 1963 QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
@@ -1990,36 +2003,6 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
1990 return 0; 2003 return 0;
1991} 2004}
1992 2005
1993/**
1994* qlcnic_83xx_config_default_opmode
1995*
1996* @adapter: adapter structure
1997*
1998* Configure default driver operating mode
1999*
2000* Returns: Error code or Success(0)
2001* */
2002int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
2003{
2004 u32 op_mode;
2005 struct qlcnic_hardware_context *ahw = adapter->ahw;
2006
2007 qlcnic_get_func_no(adapter);
2008 op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
2009
2010 if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
2011 op_mode = QLC_83XX_DEFAULT_OPMODE;
2012
2013 if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
2014 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
2015 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
2016 } else {
2017 return -EIO;
2018 }
2019
2020 return 0;
2021}
2022
2023int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter) 2006int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
2024{ 2007{
2025 int err; 2008 int err;
@@ -2039,26 +2022,26 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
2039 ahw->max_mac_filters = nic_info.max_mac_filters; 2022 ahw->max_mac_filters = nic_info.max_mac_filters;
2040 ahw->max_mtu = nic_info.max_mtu; 2023 ahw->max_mtu = nic_info.max_mtu;
2041 2024
2042 /* VNIC mode is detected by BIT_23 in capabilities. This bit is also 2025 /* eSwitch capability indicates vNIC mode.
2043 * set in case device is SRIOV capable. VNIC and SRIOV are mutually 2026 * vNIC and SRIOV are mutually exclusive operational modes.
2044 * exclusive. So in case of sriov capable device load driver in 2027 * If SR-IOV capability is detected, SR-IOV physical function
2045 * default mode 2028 * will get initialized in default mode.
2029 * SR-IOV virtual function initialization follows a
2030 * different code path and opmode.
2031 * SRIOV mode has precedence over vNIC mode.
2046 */ 2032 */
2047 if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state)) { 2033 if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
2048 ahw->nic_mode = QLC_83XX_DEFAULT_MODE; 2034 return QLC_83XX_DEFAULT_OPMODE;
2049 return ahw->nic_mode;
2050 }
2051 2035
2052 if (ahw->capabilities & BIT_23) 2036 if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
2053 ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE; 2037 return QLC_83XX_VIRTUAL_NIC_MODE;
2054 else
2055 ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
2056 2038
2057 return ahw->nic_mode; 2039 return QLC_83XX_DEFAULT_OPMODE;
2058} 2040}
2059 2041
2060int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) 2042int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2061{ 2043{
2044 struct qlcnic_hardware_context *ahw = adapter->ahw;
2062 int ret; 2045 int ret;
2063 2046
2064 ret = qlcnic_83xx_get_nic_configuration(adapter); 2047 ret = qlcnic_83xx_get_nic_configuration(adapter);
@@ -2066,11 +2049,16 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2066 return -EIO; 2049 return -EIO;
2067 2050
2068 if (ret == QLC_83XX_VIRTUAL_NIC_MODE) { 2051 if (ret == QLC_83XX_VIRTUAL_NIC_MODE) {
2052 ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
2069 if (qlcnic_83xx_config_vnic_opmode(adapter)) 2053 if (qlcnic_83xx_config_vnic_opmode(adapter))
2070 return -EIO; 2054 return -EIO;
2071 } else if (ret == QLC_83XX_DEFAULT_MODE) { 2055
2072 if (qlcnic_83xx_config_default_opmode(adapter)) 2056 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
2073 return -EIO; 2057 ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
2058 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
2059 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
2060 } else {
2061 return -EIO;
2074 } 2062 }
2075 2063
2076 return 0; 2064 return 0;
@@ -2139,20 +2127,82 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
2139 } 2127 }
2140} 2128}
2141 2129
2130static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
2131{
2132 struct qlcnic_hardware_context *ahw = adapter->ahw;
2133 struct pci_dev *pdev = adapter->pdev;
2134 struct qlc_83xx_fw_info *fw_info;
2135 int err = 0;
2136
2137 ahw->fw_info = kzalloc(sizeof(*fw_info), GFP_KERNEL);
2138 if (!ahw->fw_info) {
2139 err = -ENOMEM;
2140 } else {
2141 fw_info = ahw->fw_info;
2142 switch (pdev->device) {
2143 case PCI_DEVICE_ID_QLOGIC_QLE834X:
2144 strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME,
2145 QLC_FW_FILE_NAME_LEN);
2146 break;
2147 case PCI_DEVICE_ID_QLOGIC_QLE844X:
2148 strncpy(fw_info->fw_file_name, QLC_84XX_FW_FILE_NAME,
2149 QLC_FW_FILE_NAME_LEN);
2150 break;
2151 default:
2152 dev_err(&pdev->dev, "%s: Invalid device id\n",
2153 __func__);
2154 err = -EINVAL;
2155 break;
2156 }
2157 }
2158
2159 return err;
2160}
2161
2162
2142int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) 2163int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
2143{ 2164{
2144 struct qlcnic_hardware_context *ahw = adapter->ahw; 2165 struct qlcnic_hardware_context *ahw = adapter->ahw;
2166 int err = 0;
2145 2167
2146 if (qlcnic_sriov_vf_check(adapter)) 2168 ahw->msix_supported = !!qlcnic_use_msi_x;
2147 return qlcnic_sriov_vf_init(adapter, pci_using_dac); 2169 err = qlcnic_83xx_init_mailbox_work(adapter);
2170 if (err)
2171 goto exit;
2148 2172
2149 if (qlcnic_83xx_check_hw_status(adapter)) 2173 if (qlcnic_sriov_vf_check(adapter)) {
2150 return -EIO; 2174 err = qlcnic_sriov_vf_init(adapter, pci_using_dac);
2175 if (err)
2176 goto detach_mbx;
2177 else
2178 return err;
2179 }
2180
2181 err = qlcnic_83xx_check_hw_status(adapter);
2182 if (err)
2183 goto detach_mbx;
2184
2185 if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
2186 qlcnic_83xx_read_flash_mfg_id(adapter);
2187
2188 err = qlcnic_83xx_get_fw_info(adapter);
2189 if (err)
2190 goto detach_mbx;
2151 2191
2152 /* Initilaize 83xx mailbox spinlock */ 2192 err = qlcnic_83xx_idc_init(adapter);
2153 spin_lock_init(&ahw->mbx_lock); 2193 if (err)
2194 goto clear_fw_info;
2195
2196 err = qlcnic_setup_intr(adapter, 0, 0);
2197 if (err) {
2198 dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
2199 goto disable_intr;
2200 }
2201
2202 err = qlcnic_83xx_setup_mbx_intr(adapter);
2203 if (err)
2204 goto disable_mbx_intr;
2154 2205
2155 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
2156 qlcnic_83xx_clear_function_resources(adapter); 2206 qlcnic_83xx_clear_function_resources(adapter);
2157 2207
2158 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); 2208 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
@@ -2160,22 +2210,90 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
2160 /* register for NIC IDC AEN Events */ 2210 /* register for NIC IDC AEN Events */
2161 qlcnic_83xx_register_nic_idc_func(adapter, 1); 2211 qlcnic_83xx_register_nic_idc_func(adapter, 1);
2162 2212
2163 if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
2164 qlcnic_83xx_read_flash_mfg_id(adapter);
2165
2166 if (qlcnic_83xx_idc_init(adapter))
2167 return -EIO;
2168
2169 /* Configure default, SR-IOV or Virtual NIC mode of operation */ 2213 /* Configure default, SR-IOV or Virtual NIC mode of operation */
2170 if (qlcnic_83xx_configure_opmode(adapter)) 2214 err = qlcnic_83xx_configure_opmode(adapter);
2171 return -EIO; 2215 if (err)
2216 goto disable_mbx_intr;
2172 2217
2173 /* Perform operating mode specific initialization */ 2218 /* Perform operating mode specific initialization */
2174 if (adapter->nic_ops->init_driver(adapter)) 2219 err = adapter->nic_ops->init_driver(adapter);
2175 return -EIO; 2220 if (err)
2221 goto disable_mbx_intr;
2222
2223 if (adapter->dcb && qlcnic_dcb_attach(adapter))
2224 qlcnic_clear_dcb_ops(adapter);
2176 2225
2177 /* Periodically monitor device status */ 2226 /* Periodically monitor device status */
2178 qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work); 2227 qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
2228 return 0;
2229
2230disable_mbx_intr:
2231 qlcnic_83xx_free_mbx_intr(adapter);
2232
2233disable_intr:
2234 qlcnic_teardown_intr(adapter);
2235
2236clear_fw_info:
2237 kfree(ahw->fw_info);
2238
2239detach_mbx:
2240 qlcnic_83xx_detach_mailbox_work(adapter);
2241 qlcnic_83xx_free_mailbox(ahw->mailbox);
2242exit:
2243 return err;
2244}
2245
2246void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter)
2247{
2248 struct qlcnic_hardware_context *ahw = adapter->ahw;
2249 struct qlc_83xx_idc *idc = &ahw->idc;
2250
2251 clear_bit(QLC_83XX_MBX_READY, &idc->status);
2252 cancel_delayed_work_sync(&adapter->fw_work);
2253
2254 if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
2255 qlcnic_83xx_disable_vnic_mode(adapter, 1);
2256
2257 qlcnic_83xx_idc_detach_driver(adapter);
2258 qlcnic_83xx_register_nic_idc_func(adapter, 0);
2259
2260 cancel_delayed_work_sync(&adapter->idc_aen_work);
2261}
2262
2263int qlcnic_83xx_aer_reset(struct qlcnic_adapter *adapter)
2264{
2265 struct qlcnic_hardware_context *ahw = adapter->ahw;
2266 struct qlc_83xx_idc *idc = &ahw->idc;
2267 int ret = 0;
2268 u32 owner;
2269
2270 /* Mark the previous IDC state as NEED_RESET so
2271 * that state_entry() will perform the reattachment
2272 * and bringup the device
2273 */
2274 idc->prev_state = QLC_83XX_IDC_DEV_NEED_RESET;
2275 owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
2276 if (ahw->pci_func == owner) {
2277 ret = qlcnic_83xx_restart_hw(adapter);
2278 if (ret < 0)
2279 return ret;
2280 qlcnic_83xx_idc_clear_registers(adapter, 0);
2281 }
2282
2283 ret = idc->state_entry(adapter);
2284 return ret;
2285}
2286
2287void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *adapter)
2288{
2289 struct qlcnic_hardware_context *ahw = adapter->ahw;
2290 struct qlc_83xx_idc *idc = &ahw->idc;
2291 u32 owner;
2292
2293 idc->prev_state = QLC_83XX_IDC_DEV_READY;
2294 owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
2295 if (ahw->pci_func == owner)
2296 qlcnic_83xx_idc_enter_ready_state(adapter, 0);
2179 2297
2180 return adapter->ahw->idc.err_code; 2298 qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 0);
2181} 2299}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index 599d1fda52f2..0248a4c2f5dd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -208,7 +208,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
208 return -EIO; 208 return -EIO;
209 } 209 }
210 210
211 if (ahw->capabilities & BIT_23) 211 if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
212 adapter->flags |= QLCNIC_ESWITCH_ENABLED; 212 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
213 else 213 else
214 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; 214 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
@@ -239,3 +239,41 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
239 239
240 return 0; 240 return 0;
241} 241}
242
243static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter,
244 int func, int *port_id)
245{
246 struct qlcnic_info nic_info;
247 int err = 0;
248
249 memset(&nic_info, 0, sizeof(struct qlcnic_info));
250
251 err = qlcnic_get_nic_info(adapter, &nic_info, func);
252 if (err)
253 return err;
254
255 if (nic_info.capabilities & QLC_83XX_ESWITCH_CAPABILITY)
256 *port_id = nic_info.phys_port;
257 else
258 err = -EIO;
259
260 return err;
261}
262
263int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *adapter, int func)
264{
265 int id, err = 0;
266
267 err = qlcnic_83xx_get_eswitch_port_info(adapter, func, &id);
268 if (err)
269 return err;
270
271 if (!(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
272 if (!qlcnic_enable_eswitch(adapter, id, 1))
273 adapter->eswitch[id].flags |= QLCNIC_SWITCH_ENABLE;
274 else
275 err = -EIO;
276 }
277
278 return err;
279}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index d09389b33474..86850dd633a1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -38,6 +38,9 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
38 {QLCNIC_CMD_GET_TEMP_HDR, 4, 1}, 38 {QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
39 {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1}, 39 {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1},
40 {QLCNIC_CMD_GET_LED_STATUS, 4, 2}, 40 {QLCNIC_CMD_GET_LED_STATUS, 4, 2},
41 {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3},
42 {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
43 {QLCNIC_CMD_DCB_QUERY_PARAM, 4, 1},
41}; 44};
42 45
43static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw) 46static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
@@ -171,6 +174,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
171 break; 174 break;
172 } 175 }
173 dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]); 176 dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]);
177 qlcnic_dump_mbx(adapter, cmd);
174 } else if (rsp == QLCNIC_CDRP_RSP_OK) 178 } else if (rsp == QLCNIC_CDRP_RSP_OK)
175 cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS; 179 cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
176 180
@@ -243,40 +247,38 @@ qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
243 247
244int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) 248int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
245{ 249{
246 void *addr; 250 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
247 struct qlcnic_hostrq_rx_ctx *prq; 251 struct qlcnic_hardware_context *ahw = adapter->ahw;
248 struct qlcnic_cardrsp_rx_ctx *prsp; 252 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
249 struct qlcnic_hostrq_rds_ring *prq_rds; 253 struct net_device *netdev = adapter->netdev;
250 struct qlcnic_hostrq_sds_ring *prq_sds; 254 u32 temp_intr_crb_mode, temp_rds_crb_mode;
251 struct qlcnic_cardrsp_rds_ring *prsp_rds; 255 struct qlcnic_cardrsp_rds_ring *prsp_rds;
252 struct qlcnic_cardrsp_sds_ring *prsp_sds; 256 struct qlcnic_cardrsp_sds_ring *prsp_sds;
257 struct qlcnic_hostrq_rds_ring *prq_rds;
258 struct qlcnic_hostrq_sds_ring *prq_sds;
253 struct qlcnic_host_rds_ring *rds_ring; 259 struct qlcnic_host_rds_ring *rds_ring;
254 struct qlcnic_host_sds_ring *sds_ring; 260 struct qlcnic_host_sds_ring *sds_ring;
255 struct qlcnic_cmd_args cmd; 261 struct qlcnic_cardrsp_rx_ctx *prsp;
256 262 struct qlcnic_hostrq_rx_ctx *prq;
257 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
258 u64 phys_addr;
259
260 u8 i, nrds_rings, nsds_rings; 263 u8 i, nrds_rings, nsds_rings;
261 u16 temp_u16; 264 struct qlcnic_cmd_args cmd;
262 size_t rq_size, rsp_size; 265 size_t rq_size, rsp_size;
263 u32 cap, reg, val, reg2; 266 u32 cap, reg, val, reg2;
267 u64 phys_addr;
268 u16 temp_u16;
269 void *addr;
264 int err; 270 int err;
265 271
266 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
267
268 nrds_rings = adapter->max_rds_rings; 272 nrds_rings = adapter->max_rds_rings;
269 nsds_rings = adapter->max_sds_rings; 273 nsds_rings = adapter->max_sds_rings;
270 274
271 rq_size = 275 rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
272 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings, 276 nsds_rings);
273 nsds_rings); 277 rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
274 rsp_size = 278 nsds_rings);
275 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
276 nsds_rings);
277 279
278 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 280 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
279 &hostrq_phys_addr, GFP_KERNEL); 281 &hostrq_phys_addr, GFP_KERNEL);
280 if (addr == NULL) 282 if (addr == NULL)
281 return -ENOMEM; 283 return -ENOMEM;
282 prq = addr; 284 prq = addr;
@@ -295,15 +297,20 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
295 | QLCNIC_CAP0_VALIDOFF); 297 | QLCNIC_CAP0_VALIDOFF);
296 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); 298 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
297 299
298 temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler); 300 if (qlcnic_check_multi_tx(adapter) &&
299 prq->valid_field_offset = cpu_to_le16(temp_u16); 301 !adapter->ahw->diag_test) {
300 prq->txrx_sds_binding = nsds_rings - 1; 302 cap |= QLCNIC_CAP0_TX_MULTI;
303 } else {
304 temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
305 prq->valid_field_offset = cpu_to_le16(temp_u16);
306 prq->txrx_sds_binding = nsds_rings - 1;
307 temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
308 prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode);
309 temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE;
310 prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode);
311 }
301 312
302 prq->capabilities[0] = cpu_to_le32(cap); 313 prq->capabilities[0] = cpu_to_le32(cap);
303 prq->host_int_crb_mode =
304 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
305 prq->host_rds_crb_mode =
306 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
307 314
308 prq->num_rds_rings = cpu_to_le16(nrds_rings); 315 prq->num_rds_rings = cpu_to_le16(nrds_rings);
309 prq->num_sds_rings = cpu_to_le16(nsds_rings); 316 prq->num_sds_rings = cpu_to_le16(nsds_rings);
@@ -317,10 +324,8 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
317 le32_to_cpu(prq->rds_ring_offset)); 324 le32_to_cpu(prq->rds_ring_offset));
318 325
319 for (i = 0; i < nrds_rings; i++) { 326 for (i = 0; i < nrds_rings; i++) {
320
321 rds_ring = &recv_ctx->rds_rings[i]; 327 rds_ring = &recv_ctx->rds_rings[i];
322 rds_ring->producer = 0; 328 rds_ring->producer = 0;
323
324 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); 329 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
325 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); 330 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
326 prq_rds[i].ring_kind = cpu_to_le32(i); 331 prq_rds[i].ring_kind = cpu_to_le32(i);
@@ -331,14 +336,16 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
331 le32_to_cpu(prq->sds_ring_offset)); 336 le32_to_cpu(prq->sds_ring_offset));
332 337
333 for (i = 0; i < nsds_rings; i++) { 338 for (i = 0; i < nsds_rings; i++) {
334
335 sds_ring = &recv_ctx->sds_rings[i]; 339 sds_ring = &recv_ctx->sds_rings[i];
336 sds_ring->consumer = 0; 340 sds_ring->consumer = 0;
337 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring)); 341 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
338
339 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); 342 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
340 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); 343 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
341 prq_sds[i].msi_index = cpu_to_le16(i); 344 if (qlcnic_check_multi_tx(adapter) &&
345 !adapter->ahw->diag_test)
346 prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id);
347 else
348 prq_sds[i].msi_index = cpu_to_le16(i);
342 } 349 }
343 350
344 phys_addr = hostrq_phys_addr; 351 phys_addr = hostrq_phys_addr;
@@ -361,9 +368,8 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
361 368
362 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { 369 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
363 rds_ring = &recv_ctx->rds_rings[i]; 370 rds_ring = &recv_ctx->rds_rings[i];
364
365 reg = le32_to_cpu(prsp_rds[i].host_producer_crb); 371 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
366 rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg; 372 rds_ring->crb_rcv_producer = ahw->pci_base0 + reg;
367 } 373 }
368 374
369 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) 375 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
@@ -371,24 +377,30 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
371 377
372 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { 378 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
373 sds_ring = &recv_ctx->sds_rings[i]; 379 sds_ring = &recv_ctx->sds_rings[i];
374
375 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); 380 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
376 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); 381 if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
382 reg2 = ahw->intr_tbl[i].src;
383 else
384 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
377 385
378 sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg; 386 sds_ring->crb_intr_mask = ahw->pci_base0 + reg2;
379 sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2; 387 sds_ring->crb_sts_consumer = ahw->pci_base0 + reg;
380 } 388 }
381 389
382 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); 390 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
383 recv_ctx->context_id = le16_to_cpu(prsp->context_id); 391 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
384 recv_ctx->virt_port = prsp->virt_port; 392 recv_ctx->virt_port = prsp->virt_port;
385 393
394 netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n",
395 recv_ctx->context_id, recv_ctx->state);
386 qlcnic_free_mbx_args(&cmd); 396 qlcnic_free_mbx_args(&cmd);
397
387out_free_rsp: 398out_free_rsp:
388 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, 399 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
389 cardrsp_phys_addr); 400 cardrsp_phys_addr);
390out_free_rq: 401out_free_rq:
391 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); 402 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
403
392 return err; 404 return err;
393} 405}
394 406
@@ -416,16 +428,19 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
416 struct qlcnic_host_tx_ring *tx_ring, 428 struct qlcnic_host_tx_ring *tx_ring,
417 int ring) 429 int ring)
418{ 430{
431 struct qlcnic_hardware_context *ahw = adapter->ahw;
432 struct net_device *netdev = adapter->netdev;
419 struct qlcnic_hostrq_tx_ctx *prq; 433 struct qlcnic_hostrq_tx_ctx *prq;
420 struct qlcnic_hostrq_cds_ring *prq_cds; 434 struct qlcnic_hostrq_cds_ring *prq_cds;
421 struct qlcnic_cardrsp_tx_ctx *prsp; 435 struct qlcnic_cardrsp_tx_ctx *prsp;
422 void *rq_addr, *rsp_addr;
423 size_t rq_size, rsp_size;
424 u32 temp;
425 struct qlcnic_cmd_args cmd; 436 struct qlcnic_cmd_args cmd;
426 int err; 437 u32 temp, intr_mask, temp_int_crb_mode;
427 u64 phys_addr; 438 dma_addr_t rq_phys_addr, rsp_phys_addr;
428 dma_addr_t rq_phys_addr, rsp_phys_addr; 439 int temp_nsds_rings, index, err;
440 void *rq_addr, *rsp_addr;
441 size_t rq_size, rsp_size;
442 u64 phys_addr;
443 u16 msix_id;
429 444
430 /* reset host resources */ 445 /* reset host resources */
431 tx_ring->producer = 0; 446 tx_ring->producer = 0;
@@ -433,32 +448,42 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
433 *(tx_ring->hw_consumer) = 0; 448 *(tx_ring->hw_consumer) = 0;
434 449
435 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 450 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
436 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 451 rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size,
437 &rq_phys_addr, GFP_KERNEL | __GFP_ZERO); 452 &rq_phys_addr, GFP_KERNEL);
438 if (!rq_addr) 453 if (!rq_addr)
439 return -ENOMEM; 454 return -ENOMEM;
440 455
441 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); 456 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
442 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, 457 rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size,
443 &rsp_phys_addr, GFP_KERNEL | __GFP_ZERO); 458 &rsp_phys_addr, GFP_KERNEL);
444 if (!rsp_addr) { 459 if (!rsp_addr) {
445 err = -ENOMEM; 460 err = -ENOMEM;
446 goto out_free_rq; 461 goto out_free_rq;
447 } 462 }
448 463
449 prq = rq_addr; 464 prq = rq_addr;
450
451 prsp = rsp_addr; 465 prsp = rsp_addr;
452 466
453 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); 467 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
454 468
455 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN | 469 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
456 QLCNIC_CAP0_LSO); 470 QLCNIC_CAP0_LSO);
471 if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
472 temp |= QLCNIC_CAP0_TX_MULTI;
473
457 prq->capabilities[0] = cpu_to_le32(temp); 474 prq->capabilities[0] = cpu_to_le32(temp);
458 475
459 prq->host_int_crb_mode = 476 if (qlcnic_check_multi_tx(adapter) &&
460 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); 477 !adapter->ahw->diag_test) {
461 prq->msi_index = 0; 478 temp_nsds_rings = adapter->max_sds_rings;
479 index = temp_nsds_rings + ring;
480 msix_id = ahw->intr_tbl[index].id;
481 prq->msi_index = cpu_to_le16(msix_id);
482 } else {
483 temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
484 prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode);
485 prq->msi_index = 0;
486 }
462 487
463 prq->interrupt_ctl = 0; 488 prq->interrupt_ctl = 0;
464 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr); 489 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
@@ -480,15 +505,25 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
480 err = qlcnic_issue_cmd(adapter, &cmd); 505 err = qlcnic_issue_cmd(adapter, &cmd);
481 506
482 if (err == QLCNIC_RCODE_SUCCESS) { 507 if (err == QLCNIC_RCODE_SUCCESS) {
508 tx_ring->state = le32_to_cpu(prsp->host_ctx_state);
483 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); 509 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
484 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; 510 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
485 tx_ring->ctx_id = le16_to_cpu(prsp->context_id); 511 tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
512 if (qlcnic_check_multi_tx(adapter) &&
513 !adapter->ahw->diag_test &&
514 (adapter->flags & QLCNIC_MSIX_ENABLED)) {
515 index = adapter->max_sds_rings + ring;
516 intr_mask = ahw->intr_tbl[index].src;
517 tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
518 }
519
520 netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n",
521 tx_ring->ctx_id, tx_ring->state);
486 } else { 522 } else {
487 dev_err(&adapter->pdev->dev, 523 netdev_err(netdev, "Failed to create tx ctx in firmware%d\n",
488 "Failed to create tx ctx in firmware%d\n", err); 524 err);
489 err = -EIO; 525 err = -EIO;
490 } 526 }
491
492 qlcnic_free_mbx_args(&cmd); 527 qlcnic_free_mbx_args(&cmd);
493 528
494out_free_rsp: 529out_free_rsp:
@@ -618,6 +653,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
618 } 653 }
619 } 654 }
620 655
656 if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
657 qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) {
658 err = qlcnic_82xx_mq_intrpt(dev, 1);
659 if (err)
660 return err;
661 }
662
621 err = qlcnic_fw_cmd_create_rx_ctx(dev); 663 err = qlcnic_fw_cmd_create_rx_ctx(dev);
622 if (err) 664 if (err)
623 goto err_out; 665 goto err_out;
@@ -639,13 +681,19 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
639 } 681 }
640 682
641 set_bit(__QLCNIC_FW_ATTACHED, &dev->state); 683 set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
684
642 return 0; 685 return 0;
643 686
644err_out: 687err_out:
688 if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
689 qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test)
690 qlcnic_82xx_config_intrpt(dev, 0);
691
645 if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { 692 if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
646 if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) 693 if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
647 qlcnic_83xx_config_intrpt(dev, 0); 694 qlcnic_83xx_config_intrpt(dev, 0);
648 } 695 }
696
649 return err; 697 return err;
650} 698}
651 699
@@ -659,6 +707,12 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
659 qlcnic_fw_cmd_del_tx_ctx(adapter, 707 qlcnic_fw_cmd_del_tx_ctx(adapter,
660 &adapter->tx_ring[ring]); 708 &adapter->tx_ring[ring]);
661 709
710 if (qlcnic_82xx_check(adapter) &&
711 (adapter->flags & QLCNIC_MSIX_ENABLED) &&
712 qlcnic_check_multi_tx(adapter) &&
713 !adapter->ahw->diag_test)
714 qlcnic_82xx_config_intrpt(adapter, 0);
715
662 if (qlcnic_83xx_check(adapter) && 716 if (qlcnic_83xx_check(adapter) &&
663 (adapter->flags & QLCNIC_MSIX_ENABLED)) { 717 (adapter->flags & QLCNIC_MSIX_ENABLED)) {
664 if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) 718 if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
@@ -723,8 +777,54 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
723 } 777 }
724} 778}
725 779
780int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type)
781{
782 struct qlcnic_hardware_context *ahw = adapter->ahw;
783 struct net_device *netdev = adapter->netdev;
784 struct qlcnic_cmd_args cmd;
785 u32 type, val;
786 int i, err = 0;
787
788 for (i = 0; i < ahw->num_msix; i++) {
789 qlcnic_alloc_mbx_args(&cmd, adapter,
790 QLCNIC_CMD_MQ_TX_CONFIG_INTR);
791 type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
792 val = type | (ahw->intr_tbl[i].type << 4);
793 if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
794 val |= (ahw->intr_tbl[i].id << 16);
795 cmd.req.arg[1] = val;
796 err = qlcnic_issue_cmd(adapter, &cmd);
797 if (err) {
798 netdev_err(netdev, "Failed to %s interrupts %d\n",
799 op_type == QLCNIC_INTRPT_ADD ? "Add" :
800 "Delete", err);
801 qlcnic_free_mbx_args(&cmd);
802 return err;
803 }
804 val = cmd.rsp.arg[1];
805 if (LSB(val)) {
806 netdev_info(netdev,
807 "failed to configure interrupt for %d\n",
808 ahw->intr_tbl[i].id);
809 continue;
810 }
811 if (op_type) {
812 ahw->intr_tbl[i].id = MSW(val);
813 ahw->intr_tbl[i].enabled = 1;
814 ahw->intr_tbl[i].src = cmd.rsp.arg[2];
815 } else {
816 ahw->intr_tbl[i].id = i;
817 ahw->intr_tbl[i].enabled = 0;
818 ahw->intr_tbl[i].src = 0;
819 }
820 qlcnic_free_mbx_args(&cmd);
821 }
822
823 return err;
824}
726 825
727int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) 826int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
827 u8 function)
728{ 828{
729 int err, i; 829 int err, i;
730 struct qlcnic_cmd_args cmd; 830 struct qlcnic_cmd_args cmd;
@@ -734,7 +834,7 @@ int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
734 if (err) 834 if (err)
735 return err; 835 return err;
736 836
737 cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8; 837 cmd.req.arg[1] = function | BIT_8;
738 err = qlcnic_issue_cmd(adapter, &cmd); 838 err = qlcnic_issue_cmd(adapter, &cmd);
739 839
740 if (err == QLCNIC_RCODE_SUCCESS) { 840 if (err == QLCNIC_RCODE_SUCCESS) {
@@ -765,8 +865,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
765 struct qlcnic_cmd_args cmd; 865 struct qlcnic_cmd_args cmd;
766 size_t nic_size = sizeof(struct qlcnic_info_le); 866 size_t nic_size = sizeof(struct qlcnic_info_le);
767 867
768 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 868 nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
769 &nic_dma_t, GFP_KERNEL | __GFP_ZERO); 869 &nic_dma_t, GFP_KERNEL);
770 if (!nic_info_addr) 870 if (!nic_info_addr)
771 return -ENOMEM; 871 return -ENOMEM;
772 872
@@ -819,8 +919,8 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
819 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 919 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
820 return err; 920 return err;
821 921
822 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 922 nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
823 &nic_dma_t, GFP_KERNEL | __GFP_ZERO); 923 &nic_dma_t, GFP_KERNEL);
824 if (!nic_info_addr) 924 if (!nic_info_addr)
825 return -ENOMEM; 925 return -ENOMEM;
826 926
@@ -872,9 +972,8 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
872 size_t npar_size = sizeof(struct qlcnic_pci_info_le); 972 size_t npar_size = sizeof(struct qlcnic_pci_info_le);
873 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; 973 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
874 974
875 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, 975 pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
876 &pci_info_dma_t, 976 &pci_info_dma_t, GFP_KERNEL);
877 GFP_KERNEL | __GFP_ZERO);
878 if (!pci_info_addr) 977 if (!pci_info_addr)
879 return -ENOMEM; 978 return -ENOMEM;
880 979
@@ -974,8 +1073,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
974 return -EIO; 1073 return -EIO;
975 } 1074 }
976 1075
977 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 1076 stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
978 &stats_dma_t, GFP_KERNEL | __GFP_ZERO); 1077 &stats_dma_t, GFP_KERNEL);
979 if (!stats_addr) 1078 if (!stats_addr)
980 return -ENOMEM; 1079 return -ENOMEM;
981 1080
@@ -1030,8 +1129,8 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
1030 if (mac_stats == NULL) 1129 if (mac_stats == NULL)
1031 return -ENOMEM; 1130 return -ENOMEM;
1032 1131
1033 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 1132 stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
1034 &stats_dma_t, GFP_KERNEL | __GFP_ZERO); 1133 &stats_dma_t, GFP_KERNEL);
1035 if (!stats_addr) 1134 if (!stats_addr)
1036 return -ENOMEM; 1135 return -ENOMEM;
1037 1136
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
new file mode 100644
index 000000000000..d62d5ce432ec
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -0,0 +1,1179 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include <linux/types.h>
9#include "qlcnic.h"
10
11#define QLC_DCB_NUM_PARAM 3
12#define QLC_DCB_LOCAL_IDX 0
13#define QLC_DCB_OPER_IDX 1
14#define QLC_DCB_PEER_IDX 2
15
16#define QLC_DCB_GET_MAP(V) (1 << V)
17
18#define QLC_DCB_AEN_BIT 0x2
19#define QLC_DCB_FW_VER 0x2
20#define QLC_DCB_MAX_TC 0x8
21#define QLC_DCB_MAX_APP 0x8
22#define QLC_DCB_MAX_PRIO QLC_DCB_MAX_TC
23#define QLC_DCB_MAX_PG QLC_DCB_MAX_TC
24
25#define QLC_DCB_TSA_SUPPORT(V) (V & 0x1)
26#define QLC_DCB_ETS_SUPPORT(V) ((V >> 1) & 0x1)
27#define QLC_DCB_VERSION_SUPPORT(V) ((V >> 2) & 0xf)
28#define QLC_DCB_MAX_NUM_TC(V) ((V >> 20) & 0xf)
29#define QLC_DCB_MAX_NUM_ETS_TC(V) ((V >> 24) & 0xf)
30#define QLC_DCB_MAX_NUM_PFC_TC(V) ((V >> 28) & 0xf)
31#define QLC_DCB_GET_TC_PRIO(X, P) ((X >> (P * 3)) & 0x7)
32#define QLC_DCB_GET_PGID_PRIO(X, P) ((X >> (P * 8)) & 0xff)
33#define QLC_DCB_GET_BWPER_PG(X, P) ((X >> (P * 8)) & 0xff)
34#define QLC_DCB_GET_TSA_PG(X, P) ((X >> (P * 8)) & 0xff)
35#define QLC_DCB_GET_PFC_PRIO(X, P) (((X >> 24) >> P) & 0x1)
36#define QLC_DCB_GET_PROTO_ID_APP(X) ((X >> 8) & 0xffff)
37#define QLC_DCB_GET_SELECTOR_APP(X) (X & 0xff)
38
39#define QLC_DCB_LOCAL_PARAM_FWID 0x3
40#define QLC_DCB_OPER_PARAM_FWID 0x1
41#define QLC_DCB_PEER_PARAM_FWID 0x2
42
43#define QLC_83XX_DCB_GET_NUMAPP(X) ((X >> 2) & 0xf)
44#define QLC_83XX_DCB_TSA_VALID(X) (X & 0x1)
45#define QLC_83XX_DCB_PFC_VALID(X) ((X >> 1) & 0x1)
46#define QLC_83XX_DCB_GET_PRIOMAP_APP(X) (X >> 24)
47
48#define QLC_82XX_DCB_GET_NUMAPP(X) ((X >> 12) & 0xf)
49#define QLC_82XX_DCB_TSA_VALID(X) ((X >> 4) & 0x1)
50#define QLC_82XX_DCB_PFC_VALID(X) ((X >> 5) & 0x1)
51#define QLC_82XX_DCB_GET_PRIOVAL_APP(X) ((X >> 24) & 0x7)
52#define QLC_82XX_DCB_GET_PRIOMAP_APP(X) (1 << X)
53#define QLC_82XX_DCB_PRIO_TC_MAP (0x76543210)
54
55static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops;
56
57static void qlcnic_dcb_aen_work(struct work_struct *);
58static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *);
59
60static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *);
61static void __qlcnic_dcb_free(struct qlcnic_adapter *);
62static int __qlcnic_dcb_attach(struct qlcnic_adapter *);
63static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *, char *);
64static void __qlcnic_dcb_get_info(struct qlcnic_adapter *);
65
66static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *);
67static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
68static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
69static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
70
71static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *);
72static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
73static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
74static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *, bool);
75static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
76
77struct qlcnic_dcb_capability {
78 bool tsa_capability;
79 bool ets_capability;
80 u8 max_num_tc;
81 u8 max_ets_tc;
82 u8 max_pfc_tc;
83 u8 dcb_capability;
84};
85
86struct qlcnic_dcb_param {
87 u32 hdr_prio_pfc_map[2];
88 u32 prio_pg_map[2];
89 u32 pg_bw_map[2];
90 u32 pg_tsa_map[2];
91 u32 app[QLC_DCB_MAX_APP];
92};
93
94struct qlcnic_dcb_mbx_params {
95 /* 1st local, 2nd operational 3rd remote */
96 struct qlcnic_dcb_param type[3];
97 u32 prio_tc_map;
98};
99
100struct qlcnic_82xx_dcb_param_mbx_le {
101 __le32 hdr_prio_pfc_map[2];
102 __le32 prio_pg_map[2];
103 __le32 pg_bw_map[2];
104 __le32 pg_tsa_map[2];
105 __le32 app[QLC_DCB_MAX_APP];
106};
107
108enum qlcnic_dcb_selector {
109 QLC_SELECTOR_DEF = 0x0,
110 QLC_SELECTOR_ETHER,
111 QLC_SELECTOR_TCP,
112 QLC_SELECTOR_UDP,
113};
114
115enum qlcnic_dcb_prio_type {
116 QLC_PRIO_NONE = 0,
117 QLC_PRIO_GROUP,
118 QLC_PRIO_LINK,
119};
120
121enum qlcnic_dcb_pfc_type {
122 QLC_PFC_DISABLED = 0,
123 QLC_PFC_FULL,
124 QLC_PFC_TX,
125 QLC_PFC_RX
126};
127
128struct qlcnic_dcb_prio_cfg {
129 bool valid;
130 enum qlcnic_dcb_pfc_type pfc_type;
131};
132
133struct qlcnic_dcb_pg_cfg {
134 bool valid;
135 u8 total_bw_percent; /* of Link/ port BW */
136 u8 prio_count;
137 u8 tsa_type;
138};
139
140struct qlcnic_dcb_tc_cfg {
141 bool valid;
142 struct qlcnic_dcb_prio_cfg prio_cfg[QLC_DCB_MAX_PRIO];
143 enum qlcnic_dcb_prio_type prio_type; /* always prio_link */
144 u8 link_percent; /* % of link bandwidth */
145 u8 bwg_percent; /* % of BWG's bandwidth */
146 u8 up_tc_map;
147 u8 pgid;
148};
149
150struct qlcnic_dcb_app {
151 bool valid;
152 enum qlcnic_dcb_selector selector;
153 u16 protocol;
154 u8 priority;
155};
156
157struct qlcnic_dcb_cee {
158 struct qlcnic_dcb_tc_cfg tc_cfg[QLC_DCB_MAX_TC];
159 struct qlcnic_dcb_pg_cfg pg_cfg[QLC_DCB_MAX_PG];
160 struct qlcnic_dcb_app app[QLC_DCB_MAX_APP];
161 bool tc_param_valid;
162 bool pfc_mode_enable;
163};
164
165struct qlcnic_dcb_cfg {
166 /* 0 - local, 1 - operational, 2 - remote */
167 struct qlcnic_dcb_cee type[QLC_DCB_NUM_PARAM];
168 struct qlcnic_dcb_capability capability;
169 u32 version;
170};
171
172static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
173 .init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
174 .free = __qlcnic_dcb_free,
175 .attach = __qlcnic_dcb_attach,
176 .query_hw_capability = __qlcnic_dcb_query_hw_capability,
177 .get_info = __qlcnic_dcb_get_info,
178
179 .get_hw_capability = qlcnic_83xx_dcb_get_hw_capability,
180 .query_cee_param = qlcnic_83xx_dcb_query_cee_param,
181 .get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg,
182 .register_aen = qlcnic_83xx_dcb_register_aen,
183 .handle_aen = qlcnic_83xx_dcb_handle_aen,
184};
185
186static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
187 .init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
188 .free = __qlcnic_dcb_free,
189 .attach = __qlcnic_dcb_attach,
190 .query_hw_capability = __qlcnic_dcb_query_hw_capability,
191 .get_info = __qlcnic_dcb_get_info,
192
193 .get_hw_capability = qlcnic_82xx_dcb_get_hw_capability,
194 .query_cee_param = qlcnic_82xx_dcb_query_cee_param,
195 .get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg,
196 .handle_aen = qlcnic_82xx_dcb_handle_aen,
197};
198
199static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val)
200{
201 if (qlcnic_82xx_check(adapter))
202 return QLC_82XX_DCB_GET_NUMAPP(val);
203 else
204 return QLC_83XX_DCB_GET_NUMAPP(val);
205}
206
207static inline u8 qlcnic_dcb_pfc_hdr_valid(struct qlcnic_adapter *adapter,
208 u32 val)
209{
210 if (qlcnic_82xx_check(adapter))
211 return QLC_82XX_DCB_PFC_VALID(val);
212 else
213 return QLC_83XX_DCB_PFC_VALID(val);
214}
215
216static inline u8 qlcnic_dcb_tsa_hdr_valid(struct qlcnic_adapter *adapter,
217 u32 val)
218{
219 if (qlcnic_82xx_check(adapter))
220 return QLC_82XX_DCB_TSA_VALID(val);
221 else
222 return QLC_83XX_DCB_TSA_VALID(val);
223}
224
225static inline u8 qlcnic_dcb_get_prio_map_app(struct qlcnic_adapter *adapter,
226 u32 val)
227{
228 if (qlcnic_82xx_check(adapter))
229 return QLC_82XX_DCB_GET_PRIOMAP_APP(val);
230 else
231 return QLC_83XX_DCB_GET_PRIOMAP_APP(val);
232}
233
234static int qlcnic_dcb_prio_count(u8 up_tc_map)
235{
236 int j;
237
238 for (j = 0; j < QLC_DCB_MAX_TC; j++)
239 if (up_tc_map & QLC_DCB_GET_MAP(j))
240 break;
241
242 return j;
243}
244
245static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *adapter)
246{
247 if (test_bit(__QLCNIC_DCB_STATE, &adapter->state))
248 adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
249}
250
251static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
252{
253 if (qlcnic_82xx_check(adapter))
254 adapter->dcb->ops = &qlcnic_82xx_dcb_ops;
255 else if (qlcnic_83xx_check(adapter))
256 adapter->dcb->ops = &qlcnic_83xx_dcb_ops;
257}
258
259int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
260{
261 struct qlcnic_dcb *dcb;
262
263 dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC);
264 if (!dcb)
265 return -ENOMEM;
266
267 adapter->dcb = dcb;
268 dcb->adapter = adapter;
269 qlcnic_set_dcb_ops(adapter);
270
271 return 0;
272}
273
274static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter)
275{
276 struct qlcnic_dcb *dcb = adapter->dcb;
277
278 if (!dcb)
279 return;
280
281 qlcnic_dcb_register_aen(adapter, 0);
282
283 while (test_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
284 usleep_range(10000, 11000);
285
286 cancel_delayed_work_sync(&dcb->aen_work);
287
288 if (dcb->wq) {
289 destroy_workqueue(dcb->wq);
290 dcb->wq = NULL;
291 }
292
293 kfree(dcb->cfg);
294 dcb->cfg = NULL;
295 kfree(dcb->param);
296 dcb->param = NULL;
297 kfree(dcb);
298 adapter->dcb = NULL;
299}
300
301static void __qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
302{
303 qlcnic_dcb_get_hw_capability(adapter);
304 qlcnic_dcb_get_cee_cfg(adapter);
305 qlcnic_dcb_register_aen(adapter, 1);
306}
307
308static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
309{
310 struct qlcnic_dcb *dcb = adapter->dcb;
311 int err = 0;
312
313 INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work);
314
315 dcb->wq = create_singlethread_workqueue("qlcnic-dcb");
316 if (!dcb->wq) {
317 dev_err(&adapter->pdev->dev,
318 "DCB workqueue allocation failed. DCB will be disabled\n");
319 return -1;
320 }
321
322 dcb->cfg = kzalloc(sizeof(struct qlcnic_dcb_cfg), GFP_ATOMIC);
323 if (!dcb->cfg) {
324 err = -ENOMEM;
325 goto out_free_wq;
326 }
327
328 dcb->param = kzalloc(sizeof(struct qlcnic_dcb_mbx_params), GFP_ATOMIC);
329 if (!dcb->param) {
330 err = -ENOMEM;
331 goto out_free_cfg;
332 }
333
334 qlcnic_dcb_get_info(adapter);
335
336 return 0;
337out_free_cfg:
338 kfree(dcb->cfg);
339 dcb->cfg = NULL;
340
341out_free_wq:
342 destroy_workqueue(dcb->wq);
343 dcb->wq = NULL;
344
345 return err;
346}
347
348static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter,
349 char *buf)
350{
351 struct qlcnic_cmd_args cmd;
352 u32 mbx_out;
353 int err;
354
355 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_CAP);
356 if (err)
357 return err;
358
359 err = qlcnic_issue_cmd(adapter, &cmd);
360 if (err) {
361 dev_err(&adapter->pdev->dev,
362 "Failed to query DCBX capability, err %d\n", err);
363 } else {
364 mbx_out = cmd.rsp.arg[1];
365 if (buf)
366 memcpy(buf, &mbx_out, sizeof(u32));
367 }
368
369 qlcnic_free_mbx_args(&cmd);
370
371 return err;
372}
373
374static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val)
375{
376 struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
377 u32 mbx_out;
378 int err;
379
380 memset(cap, 0, sizeof(struct qlcnic_dcb_capability));
381
382 err = qlcnic_dcb_query_hw_capability(adapter, (char *)val);
383 if (err)
384 return err;
385
386 mbx_out = *val;
387 if (QLC_DCB_TSA_SUPPORT(mbx_out))
388 cap->tsa_capability = true;
389
390 if (QLC_DCB_ETS_SUPPORT(mbx_out))
391 cap->ets_capability = true;
392
393 cap->max_num_tc = QLC_DCB_MAX_NUM_TC(mbx_out);
394 cap->max_ets_tc = QLC_DCB_MAX_NUM_ETS_TC(mbx_out);
395 cap->max_pfc_tc = QLC_DCB_MAX_NUM_PFC_TC(mbx_out);
396
397 if (cap->max_num_tc > QLC_DCB_MAX_TC ||
398 cap->max_ets_tc > cap->max_num_tc ||
399 cap->max_pfc_tc > cap->max_num_tc) {
400 dev_err(&adapter->pdev->dev, "Invalid DCB configuration\n");
401 return -EINVAL;
402 }
403
404 return err;
405}
406
407static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
408{
409 struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
410 struct qlcnic_dcb_capability *cap;
411 u32 mbx_out;
412 int err;
413
414 err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
415 if (err)
416 return err;
417
418 cap = &cfg->capability;
419 cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED;
420
421 if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
422 set_bit(__QLCNIC_DCB_STATE, &adapter->state);
423
424 return err;
425}
426
427static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
428 char *buf, u8 type)
429{
430 u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le);
431 struct qlcnic_82xx_dcb_param_mbx_le *prsp_le;
432 struct device *dev = &adapter->pdev->dev;
433 dma_addr_t cardrsp_phys_addr;
434 struct qlcnic_dcb_param rsp;
435 struct qlcnic_cmd_args cmd;
436 u64 phys_addr;
437 void *addr;
438 int err, i;
439
440 switch (type) {
441 case QLC_DCB_LOCAL_PARAM_FWID:
442 case QLC_DCB_OPER_PARAM_FWID:
443 case QLC_DCB_PEER_PARAM_FWID:
444 break;
445 default:
446 dev_err(dev, "Invalid parameter type %d\n", type);
447 return -EINVAL;
448 }
449
450 addr = dma_alloc_coherent(&adapter->pdev->dev, size, &cardrsp_phys_addr,
451 GFP_KERNEL);
452 if (addr == NULL)
453 return -ENOMEM;
454
455 prsp_le = addr;
456
457 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
458 if (err)
459 goto out_free_rsp;
460
461 phys_addr = cardrsp_phys_addr;
462 cmd.req.arg[1] = size | (type << 16);
463 cmd.req.arg[2] = MSD(phys_addr);
464 cmd.req.arg[3] = LSD(phys_addr);
465
466 err = qlcnic_issue_cmd(adapter, &cmd);
467 if (err) {
468 dev_err(dev, "Failed to query DCBX parameter, err %d\n", err);
469 goto out;
470 }
471
472 memset(&rsp, 0, sizeof(struct qlcnic_dcb_param));
473 rsp.hdr_prio_pfc_map[0] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[0]);
474 rsp.hdr_prio_pfc_map[1] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[1]);
475 rsp.prio_pg_map[0] = le32_to_cpu(prsp_le->prio_pg_map[0]);
476 rsp.prio_pg_map[1] = le32_to_cpu(prsp_le->prio_pg_map[1]);
477 rsp.pg_bw_map[0] = le32_to_cpu(prsp_le->pg_bw_map[0]);
478 rsp.pg_bw_map[1] = le32_to_cpu(prsp_le->pg_bw_map[1]);
479 rsp.pg_tsa_map[0] = le32_to_cpu(prsp_le->pg_tsa_map[0]);
480 rsp.pg_tsa_map[1] = le32_to_cpu(prsp_le->pg_tsa_map[1]);
481
482 for (i = 0; i < QLC_DCB_MAX_APP; i++)
483 rsp.app[i] = le32_to_cpu(prsp_le->app[i]);
484
485 if (buf)
486 memcpy(buf, &rsp, size);
487out:
488 qlcnic_free_mbx_args(&cmd);
489
490out_free_rsp:
491 dma_free_coherent(&adapter->pdev->dev, size, addr, cardrsp_phys_addr);
492
493 return err;
494}
495
496static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
497{
498 struct qlcnic_dcb_mbx_params *mbx;
499 int err;
500
501 mbx = adapter->dcb->param;
502 if (!mbx)
503 return 0;
504
505 err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[0],
506 QLC_DCB_LOCAL_PARAM_FWID);
507 if (err)
508 return err;
509
510 err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[1],
511 QLC_DCB_OPER_PARAM_FWID);
512 if (err)
513 return err;
514
515 err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[2],
516 QLC_DCB_PEER_PARAM_FWID);
517 if (err)
518 return err;
519
520 mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP;
521
522 qlcnic_dcb_data_cee_param_map(adapter);
523
524 return err;
525}
526
527static void qlcnic_dcb_aen_work(struct work_struct *work)
528{
529 struct qlcnic_adapter *adapter;
530 struct qlcnic_dcb *dcb;
531
532 dcb = container_of(work, struct qlcnic_dcb, aen_work.work);
533 adapter = dcb->adapter;
534
535 qlcnic_dcb_get_cee_cfg(adapter);
536 clear_bit(__QLCNIC_DCB_IN_AEN, &adapter->state);
537}
538
539static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
540 void *data)
541{
542 struct qlcnic_dcb *dcb = adapter->dcb;
543
544 if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
545 return;
546
547 queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
548}
549
550static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
551{
552 struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
553 u32 mbx_out;
554 int err;
555
556 err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
557 if (err)
558 return err;
559
560 if (mbx_out & BIT_2)
561 cap->dcb_capability = DCB_CAP_DCBX_VER_CEE;
562 if (mbx_out & BIT_3)
563 cap->dcb_capability |= DCB_CAP_DCBX_VER_IEEE;
564 if (cap->dcb_capability)
565 cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED;
566
567 if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
568 set_bit(__QLCNIC_DCB_STATE, &adapter->state);
569
570 return err;
571}
572
573static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
574 char *buf, u8 idx)
575{
576 struct qlcnic_dcb_mbx_params mbx_out;
577 int err, i, j, k, max_app, size;
578 struct qlcnic_dcb_param *each;
579 struct qlcnic_cmd_args cmd;
580 u32 val;
581 char *p;
582
583 size = 0;
584 memset(&mbx_out, 0, sizeof(struct qlcnic_dcb_mbx_params));
585 memset(buf, 0, sizeof(struct qlcnic_dcb_mbx_params));
586
587 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
588 if (err)
589 return err;
590
591 cmd.req.arg[0] |= QLC_DCB_FW_VER << 29;
592 err = qlcnic_issue_cmd(adapter, &cmd);
593 if (err) {
594 dev_err(&adapter->pdev->dev,
595 "Failed to query DCBX param, err %d\n", err);
596 goto out;
597 }
598
599 mbx_out.prio_tc_map = cmd.rsp.arg[1];
600 p = memcpy(buf, &mbx_out, sizeof(u32));
601 k = 2;
602 p += sizeof(u32);
603
604 for (j = 0; j < QLC_DCB_NUM_PARAM; j++) {
605 each = &mbx_out.type[j];
606
607 each->hdr_prio_pfc_map[0] = cmd.rsp.arg[k++];
608 each->hdr_prio_pfc_map[1] = cmd.rsp.arg[k++];
609 each->prio_pg_map[0] = cmd.rsp.arg[k++];
610 each->prio_pg_map[1] = cmd.rsp.arg[k++];
611 each->pg_bw_map[0] = cmd.rsp.arg[k++];
612 each->pg_bw_map[1] = cmd.rsp.arg[k++];
613 each->pg_tsa_map[0] = cmd.rsp.arg[k++];
614 each->pg_tsa_map[1] = cmd.rsp.arg[k++];
615 val = each->hdr_prio_pfc_map[0];
616
617 max_app = qlcnic_dcb_get_num_app(adapter, val);
618 for (i = 0; i < max_app; i++)
619 each->app[i] = cmd.rsp.arg[i + k];
620
621 size = 16 * sizeof(u32);
622 memcpy(p, &each->hdr_prio_pfc_map[0], size);
623 p += size;
624 if (j == 0)
625 k = 18;
626 else
627 k = 34;
628 }
629out:
630 qlcnic_free_mbx_args(&cmd);
631
632 return err;
633}
634
635static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
636{
637 struct qlcnic_dcb *dcb = adapter->dcb;
638 int err;
639
640 err = qlcnic_dcb_query_cee_param(adapter, (char *)dcb->param, 0);
641 if (err)
642 return err;
643
644 qlcnic_dcb_data_cee_param_map(adapter);
645
646 return err;
647}
648
649static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter,
650 bool flag)
651{
652 u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC);
653 struct qlcnic_cmd_args cmd;
654 int err;
655
656 err = qlcnic_alloc_mbx_args(&cmd, adapter, val);
657 if (err)
658 return err;
659
660 cmd.req.arg[1] = QLC_DCB_AEN_BIT;
661
662 err = qlcnic_issue_cmd(adapter, &cmd);
663 if (err)
664 dev_err(&adapter->pdev->dev, "Failed to %s DCBX AEN, err %d\n",
665 (flag ? "register" : "unregister"), err);
666
667 qlcnic_free_mbx_args(&cmd);
668
669 return err;
670}
671
672static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
673 void *data)
674{
675 struct qlcnic_dcb *dcb = adapter->dcb;
676 u32 *val = data;
677
678 if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
679 return;
680
681 if (*val & BIT_8)
682 set_bit(__QLCNIC_DCB_STATE, &adapter->state);
683 else
684 clear_bit(__QLCNIC_DCB_STATE, &adapter->state);
685
686 queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
687}
688
689static void qlcnic_dcb_fill_cee_tc_params(struct qlcnic_dcb_mbx_params *mbx,
690 struct qlcnic_dcb_param *each,
691 struct qlcnic_dcb_cee *type)
692{
693 struct qlcnic_dcb_tc_cfg *tc_cfg;
694 u8 i, tc, pgid;
695
696 for (i = 0; i < QLC_DCB_MAX_PRIO; i++) {
697 tc = QLC_DCB_GET_TC_PRIO(mbx->prio_tc_map, i);
698 tc_cfg = &type->tc_cfg[tc];
699 tc_cfg->valid = true;
700 tc_cfg->up_tc_map |= QLC_DCB_GET_MAP(i);
701
702 if (QLC_DCB_GET_PFC_PRIO(each->hdr_prio_pfc_map[1], i) &&
703 type->pfc_mode_enable) {
704 tc_cfg->prio_cfg[i].valid = true;
705 tc_cfg->prio_cfg[i].pfc_type = QLC_PFC_FULL;
706 }
707
708 if (i < 4)
709 pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[0], i);
710 else
711 pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[1], i);
712
713 tc_cfg->pgid = pgid;
714
715 tc_cfg->prio_type = QLC_PRIO_LINK;
716 type->pg_cfg[tc_cfg->pgid].prio_count++;
717 }
718}
719
720static void qlcnic_dcb_fill_cee_pg_params(struct qlcnic_dcb_param *each,
721 struct qlcnic_dcb_cee *type)
722{
723 struct qlcnic_dcb_pg_cfg *pg_cfg;
724 u8 i, tsa, bw_per;
725
726 for (i = 0; i < QLC_DCB_MAX_PG; i++) {
727 pg_cfg = &type->pg_cfg[i];
728 pg_cfg->valid = true;
729
730 if (i < 4) {
731 bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[0], i);
732 tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[0], i);
733 } else {
734 bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[1], i);
735 tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[1], i);
736 }
737
738 pg_cfg->total_bw_percent = bw_per;
739 pg_cfg->tsa_type = tsa;
740 }
741}
742
743static void
744qlcnic_dcb_fill_cee_app_params(struct qlcnic_adapter *adapter, u8 idx,
745 struct qlcnic_dcb_param *each,
746 struct qlcnic_dcb_cee *type)
747{
748 struct qlcnic_dcb_app *app;
749 u8 i, num_app, map, cnt;
750 struct dcb_app new_app;
751
752 num_app = qlcnic_dcb_get_num_app(adapter, each->hdr_prio_pfc_map[0]);
753 for (i = 0; i < num_app; i++) {
754 app = &type->app[i];
755 app->valid = true;
756
757 /* Only for CEE (-1) */
758 app->selector = QLC_DCB_GET_SELECTOR_APP(each->app[i]) - 1;
759 new_app.selector = app->selector;
760 app->protocol = QLC_DCB_GET_PROTO_ID_APP(each->app[i]);
761 new_app.protocol = app->protocol;
762 map = qlcnic_dcb_get_prio_map_app(adapter, each->app[i]);
763 cnt = qlcnic_dcb_prio_count(map);
764
765 if (cnt >= QLC_DCB_MAX_TC)
766 cnt = 0;
767
768 app->priority = cnt;
769 new_app.priority = cnt;
770
771 if (idx == QLC_DCB_OPER_IDX && adapter->netdev->dcbnl_ops)
772 dcb_setapp(adapter->netdev, &new_app);
773 }
774}
775
776static void qlcnic_dcb_map_cee_params(struct qlcnic_adapter *adapter, u8 idx)
777{
778 struct qlcnic_dcb_mbx_params *mbx = adapter->dcb->param;
779 struct qlcnic_dcb_param *each = &mbx->type[idx];
780 struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
781 struct qlcnic_dcb_cee *type = &cfg->type[idx];
782
783 type->tc_param_valid = false;
784 type->pfc_mode_enable = false;
785 memset(type->tc_cfg, 0,
786 sizeof(struct qlcnic_dcb_tc_cfg) * QLC_DCB_MAX_TC);
787 memset(type->pg_cfg, 0,
788 sizeof(struct qlcnic_dcb_pg_cfg) * QLC_DCB_MAX_TC);
789
790 if (qlcnic_dcb_pfc_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
791 cfg->capability.max_pfc_tc)
792 type->pfc_mode_enable = true;
793
794 if (qlcnic_dcb_tsa_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
795 cfg->capability.max_ets_tc)
796 type->tc_param_valid = true;
797
798 qlcnic_dcb_fill_cee_tc_params(mbx, each, type);
799 qlcnic_dcb_fill_cee_pg_params(each, type);
800 qlcnic_dcb_fill_cee_app_params(adapter, idx, each, type);
801}
802
803static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *adapter)
804{
805 int i;
806
807 for (i = 0; i < QLC_DCB_NUM_PARAM; i++)
808 qlcnic_dcb_map_cee_params(adapter, i);
809
810 dcbnl_cee_notify(adapter->netdev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
811}
812
813static u8 qlcnic_dcb_get_state(struct net_device *netdev)
814{
815 struct qlcnic_adapter *adapter = netdev_priv(netdev);
816
817 return test_bit(__QLCNIC_DCB_STATE, &adapter->state);
818}
819
820static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr)
821{
822 memcpy(addr, netdev->dev_addr, netdev->addr_len);
823}
824
825static void
826qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
827 u8 *pgid, u8 *bw_per, u8 *up_tc_map)
828{
829 struct qlcnic_adapter *adapter = netdev_priv(netdev);
830 struct qlcnic_dcb_tc_cfg *tc_cfg, *temp;
831 struct qlcnic_dcb_cee *type;
832 u8 i, cnt, pg;
833
834 type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
835 *prio = *pgid = *bw_per = *up_tc_map = 0;
836
837 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
838 !type->tc_param_valid)
839 return;
840
841 if (tc < 0 || (tc > QLC_DCB_MAX_TC))
842 return;
843
844 tc_cfg = &type->tc_cfg[tc];
845 if (!tc_cfg->valid)
846 return;
847
848 *pgid = tc_cfg->pgid;
849 *prio = tc_cfg->prio_type;
850 *up_tc_map = tc_cfg->up_tc_map;
851 pg = *pgid;
852
853 for (i = 0, cnt = 0; i < QLC_DCB_MAX_TC; i++) {
854 temp = &type->tc_cfg[i];
855 if (temp->valid && (pg == temp->pgid))
856 cnt++;
857 }
858
859 tc_cfg->bwg_percent = (100 / cnt);
860 *bw_per = tc_cfg->bwg_percent;
861}
862
863static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
864 u8 *bw_pct)
865{
866 struct qlcnic_adapter *adapter = netdev_priv(netdev);
867 struct qlcnic_dcb_pg_cfg *pgcfg;
868 struct qlcnic_dcb_cee *type;
869
870 *bw_pct = 0;
871 type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
872
873 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
874 !type->tc_param_valid)
875 return;
876
877 if (pgid < 0 || pgid > QLC_DCB_MAX_PG)
878 return;
879
880 pgcfg = &type->pg_cfg[pgid];
881 if (!pgcfg->valid)
882 return;
883
884 *bw_pct = pgcfg->total_bw_percent;
885}
886
887static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio,
888 u8 *setting)
889{
890 struct qlcnic_adapter *adapter = netdev_priv(netdev);
891 struct qlcnic_dcb_tc_cfg *tc_cfg;
892 u8 val = QLC_DCB_GET_MAP(prio);
893 struct qlcnic_dcb_cee *type;
894 u8 i;
895
896 *setting = 0;
897 type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
898
899 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
900 !type->pfc_mode_enable)
901 return;
902
903 for (i = 0; i < QLC_DCB_MAX_TC; i++) {
904 tc_cfg = &type->tc_cfg[i];
905 if (!tc_cfg->valid)
906 continue;
907
908 if ((val & tc_cfg->up_tc_map) && (tc_cfg->prio_cfg[prio].valid))
909 *setting = tc_cfg->prio_cfg[prio].pfc_type;
910 }
911}
912
913static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
914 u8 *cap)
915{
916 struct qlcnic_adapter *adapter = netdev_priv(netdev);
917
918 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
919 return 0;
920
921 switch (capid) {
922 case DCB_CAP_ATTR_PG:
923 case DCB_CAP_ATTR_UP2TC:
924 case DCB_CAP_ATTR_PFC:
925 case DCB_CAP_ATTR_GSP:
926 *cap = true;
927 break;
928 case DCB_CAP_ATTR_PG_TCS:
929 case DCB_CAP_ATTR_PFC_TCS:
930 *cap = 0x80; /* 8 priorities for PGs */
931 break;
932 case DCB_CAP_ATTR_DCBX:
933 *cap = adapter->dcb->cfg->capability.dcb_capability;
934 break;
935 default:
936 *cap = false;
937 }
938
939 return 0;
940}
941
942static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num)
943{
944 struct qlcnic_adapter *adapter = netdev_priv(netdev);
945 struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
946
947 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
948 return -EINVAL;
949
950 switch (attr) {
951 case DCB_NUMTCS_ATTR_PG:
952 *num = cfg->capability.max_ets_tc;
953 return 0;
954 case DCB_NUMTCS_ATTR_PFC:
955 *num = cfg->capability.max_pfc_tc;
956 return 0;
957 default:
958 return -EINVAL;
959 }
960}
961
962static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
963{
964 struct qlcnic_adapter *adapter = netdev_priv(netdev);
965 struct dcb_app app = {
966 .selector = idtype,
967 .protocol = id,
968 };
969
970 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
971 return 0;
972
973 return dcb_getapp(netdev, &app);
974}
975
976static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev)
977{
978 struct qlcnic_adapter *adapter = netdev_priv(netdev);
979 struct qlcnic_dcb *dcb = adapter->dcb;
980
981 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
982 return 0;
983
984 return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable;
985}
986
987static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev)
988{
989 struct qlcnic_adapter *adapter = netdev_priv(netdev);
990 struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
991
992 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
993 return 0;
994
995 return cfg->capability.dcb_capability;
996}
997
998static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag)
999{
1000 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1001 struct qlcnic_dcb_cee *type;
1002
1003 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
1004 return 1;
1005
1006 type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
1007 *flag = 0;
1008
1009 switch (fid) {
1010 case DCB_FEATCFG_ATTR_PG:
1011 if (type->tc_param_valid)
1012 *flag |= DCB_FEATCFG_ENABLE;
1013 else
1014 *flag |= DCB_FEATCFG_ERROR;
1015 break;
1016 case DCB_FEATCFG_ATTR_PFC:
1017 if (type->pfc_mode_enable) {
1018 if (type->tc_cfg[0].prio_cfg[0].pfc_type)
1019 *flag |= DCB_FEATCFG_ENABLE;
1020 } else {
1021 *flag |= DCB_FEATCFG_ERROR;
1022 }
1023 break;
1024 case DCB_FEATCFG_ATTR_APP:
1025 *flag |= DCB_FEATCFG_ENABLE;
1026 break;
1027 default:
1028 netdev_err(netdev, "Invalid Feature ID %d\n", fid);
1029 return 1;
1030 }
1031
1032 return 0;
1033}
1034
1035static inline void
1036qlcnic_dcb_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, u8 *prio_type,
1037 u8 *pgid, u8 *bw_pct, u8 *up_map)
1038{
1039 *prio_type = *pgid = *bw_pct = *up_map = 0;
1040}
1041
1042static inline void
1043qlcnic_dcb_get_pg_bwg_cfg_rx(struct net_device *netdev, int pgid, u8 *bw_pct)
1044{
1045 *bw_pct = 0;
1046}
1047
1048static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
1049 struct dcb_peer_app_info *info,
1050 u16 *app_count)
1051{
1052 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1053 struct qlcnic_dcb_cee *peer;
1054 int i;
1055
1056 *app_count = 0;
1057
1058 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
1059 return 0;
1060
1061 peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
1062
1063 for (i = 0; i < QLC_DCB_MAX_APP; i++) {
1064 if (peer->app[i].valid)
1065 (*app_count)++;
1066 }
1067
1068 return 0;
1069}
1070
1071static int qlcnic_dcb_peer_app_table(struct net_device *netdev,
1072 struct dcb_app *table)
1073{
1074 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1075 struct qlcnic_dcb_cee *peer;
1076 struct qlcnic_dcb_app *app;
1077 int i, j;
1078
1079 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
1080 return 0;
1081
1082 peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
1083
1084 for (i = 0, j = 0; i < QLC_DCB_MAX_APP; i++) {
1085 app = &peer->app[i];
1086 if (!app->valid)
1087 continue;
1088
1089 table[j].selector = app->selector;
1090 table[j].priority = app->priority;
1091 table[j++].protocol = app->protocol;
1092 }
1093
1094 return 0;
1095}
1096
1097static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev,
1098 struct cee_pg *pg)
1099{
1100 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1101 struct qlcnic_dcb_cee *peer;
1102 u8 i, j, k, map;
1103
1104 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
1105 return 0;
1106
1107 peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
1108
1109 for (i = 0, j = 0; i < QLC_DCB_MAX_PG; i++) {
1110 if (!peer->pg_cfg[i].valid)
1111 continue;
1112
1113 pg->pg_bw[j] = peer->pg_cfg[i].total_bw_percent;
1114
1115 for (k = 0; k < QLC_DCB_MAX_TC; k++) {
1116 if (peer->tc_cfg[i].valid &&
1117 (peer->tc_cfg[i].pgid == i)) {
1118 map = peer->tc_cfg[i].up_tc_map;
1119 pg->prio_pg[j++] = map;
1120 break;
1121 }
1122 }
1123 }
1124
1125 return 0;
1126}
1127
1128static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev,
1129 struct cee_pfc *pfc)
1130{
1131 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1132 struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
1133 struct qlcnic_dcb_tc_cfg *tc;
1134 struct qlcnic_dcb_cee *peer;
1135 u8 i, setting, prio;
1136
1137 pfc->pfc_en = 0;
1138
1139 if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
1140 return 0;
1141
1142 peer = &cfg->type[QLC_DCB_PEER_IDX];
1143
1144 for (i = 0; i < QLC_DCB_MAX_TC; i++) {
1145 tc = &peer->tc_cfg[i];
1146 prio = qlcnic_dcb_prio_count(tc->up_tc_map);
1147
1148 setting = 0;
1149 qlcnic_dcb_get_pfc_cfg(netdev, prio, &setting);
1150 if (setting)
1151 pfc->pfc_en |= QLC_DCB_GET_MAP(i);
1152 }
1153
1154 pfc->tcs_supported = cfg->capability.max_pfc_tc;
1155
1156 return 0;
1157}
1158
1159static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops = {
1160 .getstate = qlcnic_dcb_get_state,
1161 .getpermhwaddr = qlcnic_dcb_get_perm_hw_addr,
1162 .getpgtccfgtx = qlcnic_dcb_get_pg_tc_cfg_tx,
1163 .getpgbwgcfgtx = qlcnic_dcb_get_pg_bwg_cfg_tx,
1164 .getpfccfg = qlcnic_dcb_get_pfc_cfg,
1165 .getcap = qlcnic_dcb_get_capability,
1166 .getnumtcs = qlcnic_dcb_get_num_tcs,
1167 .getapp = qlcnic_dcb_get_app,
1168 .getpfcstate = qlcnic_dcb_get_pfc_state,
1169 .getdcbx = qlcnic_dcb_get_dcbx,
1170 .getfeatcfg = qlcnic_dcb_get_feat_cfg,
1171
1172 .getpgtccfgrx = qlcnic_dcb_get_pg_tc_cfg_rx,
1173 .getpgbwgcfgrx = qlcnic_dcb_get_pg_bwg_cfg_rx,
1174
1175 .peer_getappinfo = qlcnic_dcb_peer_app_info,
1176 .peer_getapptable = qlcnic_dcb_peer_app_table,
1177 .cee_peer_getpg = qlcnic_dcb_cee_peer_get_pg,
1178 .cee_peer_getpfc = qlcnic_dcb_cee_peer_get_pfc,
1179};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
new file mode 100644
index 000000000000..b87ce9fb503e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -0,0 +1,41 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#ifndef __QLCNIC_DCBX_H
9#define __QLCNIC_DCBX_H
10
11void qlcnic_clear_dcb_ops(struct qlcnic_adapter *);
12
13#ifdef CONFIG_QLCNIC_DCB
14int __qlcnic_register_dcb(struct qlcnic_adapter *);
15#else
16static inline int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
17{ return 0; }
18#endif
19
20struct qlcnic_dcb_ops {
21 void (*init_dcbnl_ops) (struct qlcnic_adapter *);
22 void (*free) (struct qlcnic_adapter *);
23 int (*attach) (struct qlcnic_adapter *);
24 int (*query_hw_capability) (struct qlcnic_adapter *, char *);
25 int (*get_hw_capability) (struct qlcnic_adapter *);
26 void (*get_info) (struct qlcnic_adapter *);
27 int (*query_cee_param) (struct qlcnic_adapter *, char *, u8);
28 int (*get_cee_cfg) (struct qlcnic_adapter *);
29 int (*register_aen) (struct qlcnic_adapter *, bool);
30 void (*handle_aen) (struct qlcnic_adapter *, void *);
31};
32
33struct qlcnic_dcb {
34 struct qlcnic_dcb_mbx_params *param;
35 struct qlcnic_adapter *adapter;
36 struct delayed_work aen_work;
37 struct workqueue_struct *wq;
38 struct qlcnic_dcb_ops *ops;
39 struct qlcnic_dcb_cfg *cfg;
40};
41#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 7aac23ab31d1..4d7ad0074d1c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -125,6 +125,14 @@ static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
125}; 125};
126 126
127#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) 127#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
128
129static const char qlcnic_tx_ring_stats_strings[][ETH_GSTRING_LEN] = {
130 "xmit_on",
131 "xmit_off",
132 "xmit_called",
133 "xmit_finished",
134};
135
128static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = { 136static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
129 "ctx_rx_bytes", 137 "ctx_rx_bytes",
130 "ctx_rx_pkts", 138 "ctx_rx_pkts",
@@ -630,15 +638,15 @@ qlcnic_set_ringparam(struct net_device *dev,
630static void qlcnic_get_channels(struct net_device *dev, 638static void qlcnic_get_channels(struct net_device *dev,
631 struct ethtool_channels *channel) 639 struct ethtool_channels *channel)
632{ 640{
633 int min;
634 struct qlcnic_adapter *adapter = netdev_priv(dev); 641 struct qlcnic_adapter *adapter = netdev_priv(dev);
642 int min;
635 643
636 min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus()); 644 min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus());
637 channel->max_rx = rounddown_pow_of_two(min); 645 channel->max_rx = rounddown_pow_of_two(min);
638 channel->max_tx = adapter->ahw->max_tx_ques; 646 channel->max_tx = min_t(int, QLCNIC_MAX_TX_RINGS, num_online_cpus());
639 647
640 channel->rx_count = adapter->max_sds_rings; 648 channel->rx_count = adapter->max_sds_rings;
641 channel->tx_count = adapter->ahw->max_tx_ques; 649 channel->tx_count = adapter->max_drv_tx_rings;
642} 650}
643 651
644static int qlcnic_set_channels(struct net_device *dev, 652static int qlcnic_set_channels(struct net_device *dev,
@@ -646,18 +654,27 @@ static int qlcnic_set_channels(struct net_device *dev,
646{ 654{
647 struct qlcnic_adapter *adapter = netdev_priv(dev); 655 struct qlcnic_adapter *adapter = netdev_priv(dev);
648 int err; 656 int err;
657 int txq = 0;
649 658
650 if (channel->other_count || channel->combined_count || 659 if (channel->other_count || channel->combined_count)
651 channel->tx_count != channel->max_tx)
652 return -EINVAL; 660 return -EINVAL;
653 661
654 err = qlcnic_validate_max_rss(adapter, channel->rx_count); 662 if (channel->rx_count) {
655 if (err) 663 err = qlcnic_validate_max_rss(adapter, channel->rx_count);
656 return err; 664 if (err)
665 return err;
666 }
667
668 if (channel->tx_count) {
669 err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
670 if (err)
671 return err;
672 txq = channel->tx_count;
673 }
657 674
658 err = qlcnic_set_max_rss(adapter, channel->rx_count, 0); 675 err = qlcnic_set_max_rss(adapter, channel->rx_count, txq);
659 netdev_info(dev, "allocated 0x%x sds rings\n", 676 netdev_info(dev, "allocated 0x%x sds rings and 0x%x tx rings\n",
660 adapter->max_sds_rings); 677 adapter->max_sds_rings, adapter->max_drv_tx_rings);
661 return err; 678 return err;
662} 679}
663 680
@@ -893,6 +910,7 @@ free_diag_res:
893clear_diag_irq: 910clear_diag_irq:
894 adapter->max_sds_rings = max_sds_rings; 911 adapter->max_sds_rings = max_sds_rings;
895 clear_bit(__QLCNIC_RESETTING, &adapter->state); 912 clear_bit(__QLCNIC_RESETTING, &adapter->state);
913
896 return ret; 914 return ret;
897} 915}
898 916
@@ -966,6 +984,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
966int qlcnic_loopback_test(struct net_device *netdev, u8 mode) 984int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
967{ 985{
968 struct qlcnic_adapter *adapter = netdev_priv(netdev); 986 struct qlcnic_adapter *adapter = netdev_priv(netdev);
987 int max_drv_tx_rings = adapter->max_drv_tx_rings;
969 int max_sds_rings = adapter->max_sds_rings; 988 int max_sds_rings = adapter->max_sds_rings;
970 struct qlcnic_host_sds_ring *sds_ring; 989 struct qlcnic_host_sds_ring *sds_ring;
971 struct qlcnic_hardware_context *ahw = adapter->ahw; 990 struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -1006,9 +1025,9 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
1006 msleep(500); 1025 msleep(500);
1007 qlcnic_process_rcv_ring_diag(sds_ring); 1026 qlcnic_process_rcv_ring_diag(sds_ring);
1008 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { 1027 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
1009 netdev_info(netdev, "firmware didnt respond to loopback" 1028 netdev_info(netdev,
1010 " configure request\n"); 1029 "Firmware didn't sent link up event to loopback request\n");
1011 ret = -QLCNIC_FW_NOT_RESPOND; 1030 ret = -ETIMEDOUT;
1012 goto free_res; 1031 goto free_res;
1013 } else if (adapter->ahw->diag_cnt) { 1032 } else if (adapter->ahw->diag_cnt) {
1014 ret = adapter->ahw->diag_cnt; 1033 ret = adapter->ahw->diag_cnt;
@@ -1025,6 +1044,7 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
1025 1044
1026 clear_it: 1045 clear_it:
1027 adapter->max_sds_rings = max_sds_rings; 1046 adapter->max_sds_rings = max_sds_rings;
1047 adapter->max_drv_tx_rings = max_drv_tx_rings;
1028 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1048 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1029 return ret; 1049 return ret;
1030} 1050}
@@ -1077,11 +1097,21 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1077 QLCNIC_TEST_LEN * ETH_GSTRING_LEN); 1097 QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
1078 break; 1098 break;
1079 case ETH_SS_STATS: 1099 case ETH_SS_STATS:
1100 num_stats = ARRAY_SIZE(qlcnic_tx_ring_stats_strings);
1101 for (i = 0; i < adapter->max_drv_tx_rings; i++) {
1102 for (index = 0; index < num_stats; index++) {
1103 sprintf(data, "tx_ring_%d %s", i,
1104 qlcnic_tx_ring_stats_strings[index]);
1105 data += ETH_GSTRING_LEN;
1106 }
1107 }
1108
1080 for (index = 0; index < QLCNIC_STATS_LEN; index++) { 1109 for (index = 0; index < QLCNIC_STATS_LEN; index++) {
1081 memcpy(data + index * ETH_GSTRING_LEN, 1110 memcpy(data + index * ETH_GSTRING_LEN,
1082 qlcnic_gstrings_stats[index].stat_string, 1111 qlcnic_gstrings_stats[index].stat_string,
1083 ETH_GSTRING_LEN); 1112 ETH_GSTRING_LEN);
1084 } 1113 }
1114
1085 if (qlcnic_83xx_check(adapter)) { 1115 if (qlcnic_83xx_check(adapter)) {
1086 num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings); 1116 num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings);
1087 for (i = 0; i < num_stats; i++, index++) 1117 for (i = 0; i < num_stats; i++, index++)
@@ -1173,11 +1203,22 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
1173 struct ethtool_stats *stats, u64 *data) 1203 struct ethtool_stats *stats, u64 *data)
1174{ 1204{
1175 struct qlcnic_adapter *adapter = netdev_priv(dev); 1205 struct qlcnic_adapter *adapter = netdev_priv(dev);
1206 struct qlcnic_host_tx_ring *tx_ring;
1176 struct qlcnic_esw_statistics port_stats; 1207 struct qlcnic_esw_statistics port_stats;
1177 struct qlcnic_mac_statistics mac_stats; 1208 struct qlcnic_mac_statistics mac_stats;
1178 int index, ret, length, size; 1209 int index, ret, length, size, ring;
1179 char *p; 1210 char *p;
1180 1211
1212 memset(data, 0, adapter->max_drv_tx_rings * 4 * sizeof(u64));
1213 for (ring = 0, index = 0; ring < adapter->max_drv_tx_rings; ring++) {
1214 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1215 tx_ring = &adapter->tx_ring[ring];
1216 *data++ = tx_ring->xmit_on;
1217 *data++ = tx_ring->xmit_off;
1218 *data++ = tx_ring->xmit_called;
1219 *data++ = tx_ring->xmit_finished;
1220 }
1221 }
1181 memset(data, 0, stats->n_stats * sizeof(u64)); 1222 memset(data, 0, stats->n_stats * sizeof(u64));
1182 length = QLCNIC_STATS_LEN; 1223 length = QLCNIC_STATS_LEN;
1183 for (index = 0; index < length; index++) { 1224 for (index = 0; index < length; index++) {
@@ -1468,6 +1509,68 @@ static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
1468 adapter->ahw->msg_enable = msglvl; 1509 adapter->ahw->msg_enable = msglvl;
1469} 1510}
1470 1511
1512int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *adapter)
1513{
1514 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1515 u32 val;
1516
1517 if (qlcnic_84xx_check(adapter)) {
1518 if (qlcnic_83xx_lock_driver(adapter))
1519 return -EBUSY;
1520
1521 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
1522 val &= ~QLC_83XX_IDC_DISABLE_FW_DUMP;
1523 QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
1524
1525 qlcnic_83xx_unlock_driver(adapter);
1526 } else {
1527 fw_dump->enable = true;
1528 }
1529
1530 dev_info(&adapter->pdev->dev, "FW dump enabled\n");
1531
1532 return 0;
1533}
1534
1535static int qlcnic_disable_fw_dump_state(struct qlcnic_adapter *adapter)
1536{
1537 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1538 u32 val;
1539
1540 if (qlcnic_84xx_check(adapter)) {
1541 if (qlcnic_83xx_lock_driver(adapter))
1542 return -EBUSY;
1543
1544 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
1545 val |= QLC_83XX_IDC_DISABLE_FW_DUMP;
1546 QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
1547
1548 qlcnic_83xx_unlock_driver(adapter);
1549 } else {
1550 fw_dump->enable = false;
1551 }
1552
1553 dev_info(&adapter->pdev->dev, "FW dump disabled\n");
1554
1555 return 0;
1556}
1557
1558bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *adapter)
1559{
1560 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1561 bool state;
1562 u32 val;
1563
1564 if (qlcnic_84xx_check(adapter)) {
1565 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
1566 state = (val & QLC_83XX_IDC_DISABLE_FW_DUMP) ? false : true;
1567 } else {
1568 state = fw_dump->enable;
1569 }
1570
1571 return state;
1572}
1573
1471static int 1574static int
1472qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) 1575qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
1473{ 1576{
@@ -1484,7 +1587,7 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
1484 else 1587 else
1485 dump->len = 0; 1588 dump->len = 0;
1486 1589
1487 if (!fw_dump->enable) 1590 if (!qlcnic_check_fw_dump_state(adapter))
1488 dump->flag = ETH_FW_DUMP_DISABLE; 1591 dump->flag = ETH_FW_DUMP_DISABLE;
1489 else 1592 else
1490 dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; 1593 dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
@@ -1532,77 +1635,111 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1532 return 0; 1635 return 0;
1533} 1636}
1534 1637
1638static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask)
1639{
1640 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1641 struct net_device *netdev = adapter->netdev;
1642
1643 if (!qlcnic_check_fw_dump_state(adapter)) {
1644 netdev_info(netdev,
1645 "Can not change driver mask to 0x%x. FW dump not enabled\n",
1646 mask);
1647 return -EOPNOTSUPP;
1648 }
1649
1650 fw_dump->tmpl_hdr->drv_cap_mask = mask;
1651 netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask);
1652 return 0;
1653}
1654
1535static int 1655static int
1536qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) 1656qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
1537{ 1657{
1538 int i;
1539 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1658 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1540 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; 1659 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1660 bool valid_mask = false;
1661 int i, ret = 0;
1541 u32 state; 1662 u32 state;
1542 1663
1543 switch (val->flag) { 1664 switch (val->flag) {
1544 case QLCNIC_FORCE_FW_DUMP_KEY: 1665 case QLCNIC_FORCE_FW_DUMP_KEY:
1545 if (!fw_dump->tmpl_hdr) { 1666 if (!fw_dump->tmpl_hdr) {
1546 netdev_err(netdev, "FW dump not supported\n"); 1667 netdev_err(netdev, "FW dump not supported\n");
1547 return -ENOTSUPP; 1668 ret = -EOPNOTSUPP;
1669 break;
1548 } 1670 }
1549 if (!fw_dump->enable) { 1671
1672 if (!qlcnic_check_fw_dump_state(adapter)) {
1550 netdev_info(netdev, "FW dump not enabled\n"); 1673 netdev_info(netdev, "FW dump not enabled\n");
1551 return 0; 1674 ret = -EOPNOTSUPP;
1675 break;
1552 } 1676 }
1677
1553 if (fw_dump->clr) { 1678 if (fw_dump->clr) {
1554 netdev_info(netdev, 1679 netdev_info(netdev,
1555 "Previous dump not cleared, not forcing dump\n"); 1680 "Previous dump not cleared, not forcing dump\n");
1556 return 0; 1681 break;
1557 } 1682 }
1683
1558 netdev_info(netdev, "Forcing a FW dump\n"); 1684 netdev_info(netdev, "Forcing a FW dump\n");
1559 qlcnic_dev_request_reset(adapter, val->flag); 1685 qlcnic_dev_request_reset(adapter, val->flag);
1560 break; 1686 break;
1561 case QLCNIC_DISABLE_FW_DUMP: 1687 case QLCNIC_DISABLE_FW_DUMP:
1562 if (fw_dump->enable && fw_dump->tmpl_hdr) { 1688 if (!fw_dump->tmpl_hdr) {
1563 netdev_info(netdev, "Disabling FW dump\n"); 1689 netdev_err(netdev, "FW dump not supported\n");
1564 fw_dump->enable = 0; 1690 ret = -EOPNOTSUPP;
1691 break;
1565 } 1692 }
1566 return 0; 1693
1694 ret = qlcnic_disable_fw_dump_state(adapter);
1695 break;
1696
1567 case QLCNIC_ENABLE_FW_DUMP: 1697 case QLCNIC_ENABLE_FW_DUMP:
1568 if (!fw_dump->tmpl_hdr) { 1698 if (!fw_dump->tmpl_hdr) {
1569 netdev_err(netdev, "FW dump not supported\n"); 1699 netdev_err(netdev, "FW dump not supported\n");
1570 return -ENOTSUPP; 1700 ret = -EOPNOTSUPP;
1571 } 1701 break;
1572 if (!fw_dump->enable) {
1573 netdev_info(netdev, "Enabling FW dump\n");
1574 fw_dump->enable = 1;
1575 } 1702 }
1576 return 0; 1703
1704 ret = qlcnic_enable_fw_dump_state(adapter);
1705 break;
1706
1577 case QLCNIC_FORCE_FW_RESET: 1707 case QLCNIC_FORCE_FW_RESET:
1578 netdev_info(netdev, "Forcing a FW reset\n"); 1708 netdev_info(netdev, "Forcing a FW reset\n");
1579 qlcnic_dev_request_reset(adapter, val->flag); 1709 qlcnic_dev_request_reset(adapter, val->flag);
1580 adapter->flags &= ~QLCNIC_FW_RESET_OWNER; 1710 adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
1581 return 0; 1711 break;
1712
1582 case QLCNIC_SET_QUIESCENT: 1713 case QLCNIC_SET_QUIESCENT:
1583 case QLCNIC_RESET_QUIESCENT: 1714 case QLCNIC_RESET_QUIESCENT:
1584 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); 1715 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
1585 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) 1716 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
1586 netdev_info(netdev, "Device in FAILED state\n"); 1717 netdev_info(netdev, "Device in FAILED state\n");
1587 return 0; 1718 break;
1719
1588 default: 1720 default:
1589 if (!fw_dump->tmpl_hdr) { 1721 if (!fw_dump->tmpl_hdr) {
1590 netdev_err(netdev, "FW dump not supported\n"); 1722 netdev_err(netdev, "FW dump not supported\n");
1591 return -ENOTSUPP; 1723 ret = -EOPNOTSUPP;
1724 break;
1592 } 1725 }
1726
1593 for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) { 1727 for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) {
1594 if (val->flag == qlcnic_fw_dump_level[i]) { 1728 if (val->flag == qlcnic_fw_dump_level[i]) {
1595 fw_dump->tmpl_hdr->drv_cap_mask = 1729 valid_mask = true;
1596 val->flag; 1730 break;
1597 netdev_info(netdev, "Driver mask changed to: 0x%x\n",
1598 fw_dump->tmpl_hdr->drv_cap_mask);
1599 return 0;
1600 } 1731 }
1601 } 1732 }
1602 netdev_info(netdev, "Invalid dump level: 0x%x\n", val->flag); 1733
1603 return -EINVAL; 1734 if (valid_mask) {
1735 ret = qlcnic_set_dump_mask(adapter, val->flag);
1736 } else {
1737 netdev_info(netdev, "Invalid dump level: 0x%x\n",
1738 val->flag);
1739 ret = -EINVAL;
1740 }
1604 } 1741 }
1605 return 0; 1742 return ret;
1606} 1743}
1607 1744
1608const struct ethtool_ops qlcnic_ethtool_ops = { 1745const struct ethtool_ops qlcnic_ethtool_ops = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 4d5f59b2d153..f8adc7b01f1f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -387,7 +387,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
387 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) 387 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
388 return -EIO; 388 return -EIO;
389 389
390 tx_ring = adapter->tx_ring; 390 tx_ring = &adapter->tx_ring[0];
391 __netif_tx_lock_bh(tx_ring->txq); 391 __netif_tx_lock_bh(tx_ring->txq);
392 392
393 producer = tx_ring->producer; 393 producer = tx_ring->producer;
@@ -740,6 +740,22 @@ int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
740 return 0; 740 return 0;
741} 741}
742 742
743int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter)
744{
745 u8 mac[ETH_ALEN];
746 int ret;
747
748 ret = qlcnic_get_mac_address(adapter, mac,
749 adapter->ahw->physical_port);
750 if (ret)
751 return ret;
752
753 memcpy(adapter->ahw->phys_port_id, mac, ETH_ALEN);
754 adapter->flags |= QLCNIC_HAS_PHYS_PORT_ID;
755
756 return 0;
757}
758
743/* 759/*
744 * Send the interrupt coalescing parameter set by ethtool to the card. 760 * Send the interrupt coalescing parameter set by ethtool to the card.
745 */ 761 */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 4a71b28effcb..272c356cf9b2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -85,8 +85,11 @@ enum qlcnic_regs {
85#define QLCNIC_CMD_GET_TEMP_HDR 0x30 85#define QLCNIC_CMD_GET_TEMP_HDR 0x30
86#define QLCNIC_CMD_BC_EVENT_SETUP 0x31 86#define QLCNIC_CMD_BC_EVENT_SETUP 0x31
87#define QLCNIC_CMD_CONFIG_VPORT 0x32 87#define QLCNIC_CMD_CONFIG_VPORT 0x32
88#define QLCNIC_CMD_DCB_QUERY_CAP 0x34
89#define QLCNIC_CMD_DCB_QUERY_PARAM 0x35
88#define QLCNIC_CMD_GET_MAC_STATS 0x37 90#define QLCNIC_CMD_GET_MAC_STATS 0x37
89#define QLCNIC_CMD_82XX_SET_DRV_VER 0x38 91#define QLCNIC_CMD_82XX_SET_DRV_VER 0x38
92#define QLCNIC_CMD_MQ_TX_CONFIG_INTR 0x39
90#define QLCNIC_CMD_GET_LED_STATUS 0x3C 93#define QLCNIC_CMD_GET_LED_STATUS 0x3C
91#define QLCNIC_CMD_CONFIGURE_RSS 0x41 94#define QLCNIC_CMD_CONFIGURE_RSS 0x41
92#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43 95#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43
@@ -122,6 +125,7 @@ enum qlcnic_regs {
122#define QLCNIC_MBX_COMP_EVENT 0x8100 125#define QLCNIC_MBX_COMP_EVENT 0x8100
123#define QLCNIC_MBX_REQUEST_EVENT 0x8101 126#define QLCNIC_MBX_REQUEST_EVENT 0x8101
124#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102 127#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102
128#define QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT 0x8110
125#define QLCNIC_MBX_SFP_INSERT_EVENT 0x8130 129#define QLCNIC_MBX_SFP_INSERT_EVENT 0x8130
126#define QLCNIC_MBX_SFP_REMOVE_EVENT 0x8131 130#define QLCNIC_MBX_SFP_REMOVE_EVENT 0x8131
127 131
@@ -149,7 +153,6 @@ struct ethtool_stats;
149struct pci_device_id; 153struct pci_device_id;
150struct qlcnic_host_sds_ring; 154struct qlcnic_host_sds_ring;
151struct qlcnic_host_tx_ring; 155struct qlcnic_host_tx_ring;
152struct qlcnic_host_tx_ring;
153struct qlcnic_hardware_context; 156struct qlcnic_hardware_context;
154struct qlcnic_adapter; 157struct qlcnic_adapter;
155 158
@@ -173,10 +176,12 @@ int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
173void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t); 176void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
174void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t); 177void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
175void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32); 178void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
176int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8); 179int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8, int);
177irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *); 180irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
178int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, 181int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
179 struct qlcnic_cmd_args *); 182 struct qlcnic_cmd_args *);
183int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *, int);
184int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *, u8);
180int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *); 185int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
181int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *, 186int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
182 struct qlcnic_host_tx_ring *tx_ring, int); 187 struct qlcnic_host_tx_ring *tx_ring, int);
@@ -184,7 +189,7 @@ void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *);
184void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *, 189void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *,
185 struct qlcnic_host_tx_ring *); 190 struct qlcnic_host_tx_ring *);
186int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8); 191int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
187int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*); 192int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*, u8);
188int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); 193int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
189int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); 194int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
190int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*); 195int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 974d62607e13..66c26cf7a2b8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -127,12 +127,12 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
127 } 127 }
128} 128}
129 129
130void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter) 130void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
131 struct qlcnic_host_tx_ring *tx_ring)
131{ 132{
132 struct qlcnic_cmd_buffer *cmd_buf; 133 struct qlcnic_cmd_buffer *cmd_buf;
133 struct qlcnic_skb_frag *buffrag; 134 struct qlcnic_skb_frag *buffrag;
134 int i, j; 135 int i, j;
135 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
136 136
137 cmd_buf = tx_ring->cmd_buf_arr; 137 cmd_buf = tx_ring->cmd_buf_arr;
138 for (i = 0; i < tx_ring->num_desc; i++) { 138 for (i = 0; i < tx_ring->num_desc; i++) {
@@ -241,7 +241,13 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
241 sds_ring->irq = adapter->msix_entries[ring].vector; 241 sds_ring->irq = adapter->msix_entries[ring].vector;
242 sds_ring->adapter = adapter; 242 sds_ring->adapter = adapter;
243 sds_ring->num_desc = adapter->num_rxd; 243 sds_ring->num_desc = adapter->num_rxd;
244 244 if (qlcnic_82xx_check(adapter)) {
245 if (qlcnic_check_multi_tx(adapter) &&
246 !adapter->ahw->diag_test)
247 sds_ring->tx_ring = &adapter->tx_ring[ring];
248 else
249 sds_ring->tx_ring = &adapter->tx_ring[0];
250 }
245 for (i = 0; i < NUM_RCV_DESC_RINGS; i++) 251 for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
246 INIT_LIST_HEAD(&sds_ring->free_list[i]); 252 INIT_LIST_HEAD(&sds_ring->free_list[i]);
247 } 253 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 6946d354f44f..b7b245b43b87 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -127,6 +127,23 @@
127struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *, 127struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
128 struct qlcnic_host_rds_ring *, u16, u16); 128 struct qlcnic_host_rds_ring *, u16, u16);
129 129
130inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
131 struct qlcnic_host_tx_ring *tx_ring)
132{
133 if (qlcnic_check_multi_tx(adapter) &&
134 !adapter->ahw->diag_test)
135 writel(0x0, tx_ring->crb_intr_mask);
136}
137
138
139static inline void qlcnic_disable_tx_int(struct qlcnic_adapter *adapter,
140 struct qlcnic_host_tx_ring *tx_ring)
141{
142 if (qlcnic_check_multi_tx(adapter) &&
143 !adapter->ahw->diag_test)
144 writel(1, tx_ring->crb_intr_mask);
145}
146
130inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter, 147inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
131 struct qlcnic_host_tx_ring *tx_ring) 148 struct qlcnic_host_tx_ring *tx_ring)
132{ 149{
@@ -147,10 +164,7 @@ static inline u8 qlcnic_mac_hash(u64 mac)
147static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter, 164static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
148 u16 handle, u8 ring_id) 165 u16 handle, u8 ring_id)
149{ 166{
150 unsigned short device = adapter->pdev->device; 167 if (qlcnic_83xx_check(adapter))
151
152 if ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
153 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X))
154 return handle | (ring_id << 15); 168 return handle | (ring_id << 15);
155 else 169 else
156 return handle; 170 return handle;
@@ -357,14 +371,14 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
357} 371}
358 372
359static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, 373static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
360 struct cmd_desc_type0 *first_desc, struct sk_buff *skb) 374 struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
375 struct qlcnic_host_tx_ring *tx_ring)
361{ 376{
362 u8 l4proto, opcode = 0, hdr_len = 0; 377 u8 l4proto, opcode = 0, hdr_len = 0;
363 u16 flags = 0, vlan_tci = 0; 378 u16 flags = 0, vlan_tci = 0;
364 int copied, offset, copy_len, size; 379 int copied, offset, copy_len, size;
365 struct cmd_desc_type0 *hwdesc; 380 struct cmd_desc_type0 *hwdesc;
366 struct vlan_ethhdr *vh; 381 struct vlan_ethhdr *vh;
367 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
368 u16 protocol = ntohs(skb->protocol); 382 u16 protocol = ntohs(skb->protocol);
369 u32 producer = tx_ring->producer; 383 u32 producer = tx_ring->producer;
370 384
@@ -547,7 +561,7 @@ static inline void qlcnic_clear_cmddesc(u64 *desc)
547netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 561netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
548{ 562{
549 struct qlcnic_adapter *adapter = netdev_priv(netdev); 563 struct qlcnic_adapter *adapter = netdev_priv(netdev);
550 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; 564 struct qlcnic_host_tx_ring *tx_ring;
551 struct qlcnic_cmd_buffer *pbuf; 565 struct qlcnic_cmd_buffer *pbuf;
552 struct qlcnic_skb_frag *buffrag; 566 struct qlcnic_skb_frag *buffrag;
553 struct cmd_desc_type0 *hwdesc, *first_desc; 567 struct cmd_desc_type0 *hwdesc, *first_desc;
@@ -556,10 +570,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
556 int i, k, frag_count, delta = 0; 570 int i, k, frag_count, delta = 0;
557 u32 producer, num_txd; 571 u32 producer, num_txd;
558 572
559 num_txd = tx_ring->num_desc;
560
561 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 573 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
562 netif_stop_queue(netdev); 574 netif_tx_stop_all_queues(netdev);
563 return NETDEV_TX_BUSY; 575 return NETDEV_TX_BUSY;
564 } 576 }
565 577
@@ -569,7 +581,14 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
569 goto drop_packet; 581 goto drop_packet;
570 } 582 }
571 583
584 if (qlcnic_check_multi_tx(adapter))
585 tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
586 else
587 tx_ring = &adapter->tx_ring[0];
588 num_txd = tx_ring->num_desc;
589
572 frag_count = skb_shinfo(skb)->nr_frags + 1; 590 frag_count = skb_shinfo(skb)->nr_frags + 1;
591
573 /* 14 frags supported for normal packet and 592 /* 14 frags supported for normal packet and
574 * 32 frags supported for TSO packet 593 * 32 frags supported for TSO packet
575 */ 594 */
@@ -584,11 +603,12 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
584 } 603 }
585 604
586 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { 605 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
587 netif_stop_queue(netdev); 606 netif_tx_stop_queue(tx_ring->txq);
588 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { 607 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
589 netif_start_queue(netdev); 608 netif_tx_start_queue(tx_ring->txq);
590 } else { 609 } else {
591 adapter->stats.xmit_off++; 610 adapter->stats.xmit_off++;
611 tx_ring->xmit_off++;
592 return NETDEV_TX_BUSY; 612 return NETDEV_TX_BUSY;
593 } 613 }
594 } 614 }
@@ -643,7 +663,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
643 tx_ring->producer = get_next_index(producer, num_txd); 663 tx_ring->producer = get_next_index(producer, num_txd);
644 smp_mb(); 664 smp_mb();
645 665
646 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb))) 666 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring)))
647 goto unwind_buff; 667 goto unwind_buff;
648 668
649 if (adapter->drv_mac_learn) 669 if (adapter->drv_mac_learn)
@@ -651,6 +671,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
651 671
652 adapter->stats.txbytes += skb->len; 672 adapter->stats.txbytes += skb->len;
653 adapter->stats.xmitcalled++; 673 adapter->stats.xmitcalled++;
674 tx_ring->xmit_called++;
654 675
655 qlcnic_update_cmd_producer(tx_ring); 676 qlcnic_update_cmd_producer(tx_ring);
656 677
@@ -673,7 +694,7 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
673 adapter->ahw->linkup = 0; 694 adapter->ahw->linkup = 0;
674 if (netif_running(netdev)) { 695 if (netif_running(netdev)) {
675 netif_carrier_off(netdev); 696 netif_carrier_off(netdev);
676 netif_stop_queue(netdev); 697 netif_tx_stop_all_queues(netdev);
677 } 698 }
678 } else if (!adapter->ahw->linkup && linkup) { 699 } else if (!adapter->ahw->linkup && linkup) {
679 netdev_info(netdev, "NIC Link is up\n"); 700 netdev_info(netdev, "NIC Link is up\n");
@@ -768,9 +789,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
768 struct net_device *netdev = adapter->netdev; 789 struct net_device *netdev = adapter->netdev;
769 struct qlcnic_skb_frag *frag; 790 struct qlcnic_skb_frag *frag;
770 791
771 if (!spin_trylock(&adapter->tx_clean_lock))
772 return 1;
773
774 sw_consumer = tx_ring->sw_consumer; 792 sw_consumer = tx_ring->sw_consumer;
775 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 793 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
776 794
@@ -788,6 +806,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
788 frag->dma = 0ULL; 806 frag->dma = 0ULL;
789 } 807 }
790 adapter->stats.xmitfinished++; 808 adapter->stats.xmitfinished++;
809 tx_ring->xmit_finished++;
791 dev_kfree_skb_any(buffer->skb); 810 dev_kfree_skb_any(buffer->skb);
792 buffer->skb = NULL; 811 buffer->skb = NULL;
793 } 812 }
@@ -800,10 +819,12 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
800 if (count && netif_running(netdev)) { 819 if (count && netif_running(netdev)) {
801 tx_ring->sw_consumer = sw_consumer; 820 tx_ring->sw_consumer = sw_consumer;
802 smp_mb(); 821 smp_mb();
803 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { 822 if (netif_tx_queue_stopped(tx_ring->txq) &&
823 netif_carrier_ok(netdev)) {
804 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { 824 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
805 netif_wake_queue(netdev); 825 netif_tx_wake_queue(tx_ring->txq);
806 adapter->stats.xmit_on++; 826 adapter->stats.xmit_on++;
827 tx_ring->xmit_on++;
807 } 828 }
808 } 829 }
809 adapter->tx_timeo_cnt = 0; 830 adapter->tx_timeo_cnt = 0;
@@ -823,7 +844,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
823 */ 844 */
824 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 845 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
825 done = (sw_consumer == hw_consumer); 846 done = (sw_consumer == hw_consumer);
826 spin_unlock(&adapter->tx_clean_lock);
827 847
828 return done; 848 return done;
829} 849}
@@ -833,16 +853,40 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
833 int tx_complete, work_done; 853 int tx_complete, work_done;
834 struct qlcnic_host_sds_ring *sds_ring; 854 struct qlcnic_host_sds_ring *sds_ring;
835 struct qlcnic_adapter *adapter; 855 struct qlcnic_adapter *adapter;
856 struct qlcnic_host_tx_ring *tx_ring;
836 857
837 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); 858 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
838 adapter = sds_ring->adapter; 859 adapter = sds_ring->adapter;
839 tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring, 860 tx_ring = sds_ring->tx_ring;
861
862 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
840 budget); 863 budget);
841 work_done = qlcnic_process_rcv_ring(sds_ring, budget); 864 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
842 if ((work_done < budget) && tx_complete) { 865 if ((work_done < budget) && tx_complete) {
843 napi_complete(&sds_ring->napi); 866 napi_complete(&sds_ring->napi);
844 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) 867 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
845 qlcnic_enable_int(sds_ring); 868 qlcnic_enable_int(sds_ring);
869 qlcnic_enable_tx_intr(adapter, tx_ring);
870 }
871 }
872
873 return work_done;
874}
875
876static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
877{
878 struct qlcnic_host_tx_ring *tx_ring;
879 struct qlcnic_adapter *adapter;
880 int work_done;
881
882 tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
883 adapter = tx_ring->adapter;
884
885 work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
886 if (work_done) {
887 napi_complete(&tx_ring->napi);
888 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
889 qlcnic_enable_tx_intr(adapter, tx_ring);
846 } 890 }
847 891
848 return work_done; 892 return work_done;
@@ -952,20 +996,23 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
952 break; 996 break;
953 case 1: 997 case 1:
954 dev_info(dev, "loopback already in progress\n"); 998 dev_info(dev, "loopback already in progress\n");
955 adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS; 999 adapter->ahw->diag_cnt = -EINPROGRESS;
956 break; 1000 break;
957 case 2: 1001 case 2:
958 dev_info(dev, "loopback cable is not connected\n"); 1002 dev_info(dev, "loopback cable is not connected\n");
959 adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN; 1003 adapter->ahw->diag_cnt = -ENODEV;
960 break; 1004 break;
961 default: 1005 default:
962 dev_info(dev, 1006 dev_info(dev,
963 "loopback configure request failed, err %x\n", 1007 "loopback configure request failed, err %x\n",
964 ret); 1008 ret);
965 adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR; 1009 adapter->ahw->diag_cnt = -EIO;
966 break; 1010 break;
967 } 1011 }
968 break; 1012 break;
1013 case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
1014 qlcnic_dcb_handle_aen(adapter, (void *)&msg);
1015 break;
969 default: 1016 default:
970 break; 1017 break;
971 } 1018 }
@@ -1411,23 +1458,31 @@ void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1411int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, 1458int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
1412 struct net_device *netdev) 1459 struct net_device *netdev)
1413{ 1460{
1414 int ring, max_sds_rings; 1461 int ring;
1415 struct qlcnic_host_sds_ring *sds_ring; 1462 struct qlcnic_host_sds_ring *sds_ring;
1416 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1463 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1464 struct qlcnic_host_tx_ring *tx_ring;
1417 1465
1418 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) 1466 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
1419 return -ENOMEM; 1467 return -ENOMEM;
1420 1468
1421 max_sds_rings = adapter->max_sds_rings;
1422
1423 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1469 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1424 sds_ring = &recv_ctx->sds_rings[ring]; 1470 sds_ring = &recv_ctx->sds_rings[ring];
1425 if (ring == adapter->max_sds_rings - 1) 1471 if (qlcnic_check_multi_tx(adapter) &&
1426 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll, 1472 !adapter->ahw->diag_test &&
1427 QLCNIC_NETDEV_WEIGHT / max_sds_rings); 1473 (adapter->max_drv_tx_rings > 1)) {
1428 else
1429 netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll, 1474 netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
1430 QLCNIC_NETDEV_WEIGHT*2); 1475 NAPI_POLL_WEIGHT);
1476 } else {
1477 if (ring == (adapter->max_sds_rings - 1))
1478 netif_napi_add(netdev, &sds_ring->napi,
1479 qlcnic_poll,
1480 NAPI_POLL_WEIGHT);
1481 else
1482 netif_napi_add(netdev, &sds_ring->napi,
1483 qlcnic_rx_poll,
1484 NAPI_POLL_WEIGHT);
1485 }
1431 } 1486 }
1432 1487
1433 if (qlcnic_alloc_tx_rings(adapter, netdev)) { 1488 if (qlcnic_alloc_tx_rings(adapter, netdev)) {
@@ -1435,6 +1490,14 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
1435 return -ENOMEM; 1490 return -ENOMEM;
1436 } 1491 }
1437 1492
1493 if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
1494 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1495 tx_ring = &adapter->tx_ring[ring];
1496 netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
1497 NAPI_POLL_WEIGHT);
1498 }
1499 }
1500
1438 return 0; 1501 return 0;
1439} 1502}
1440 1503
@@ -1443,6 +1506,7 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
1443 int ring; 1506 int ring;
1444 struct qlcnic_host_sds_ring *sds_ring; 1507 struct qlcnic_host_sds_ring *sds_ring;
1445 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1508 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1509 struct qlcnic_host_tx_ring *tx_ring;
1446 1510
1447 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1511 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1448 sds_ring = &recv_ctx->sds_rings[ring]; 1512 sds_ring = &recv_ctx->sds_rings[ring];
@@ -1450,6 +1514,14 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
1450 } 1514 }
1451 1515
1452 qlcnic_free_sds_rings(adapter->recv_ctx); 1516 qlcnic_free_sds_rings(adapter->recv_ctx);
1517
1518 if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
1519 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1520 tx_ring = &adapter->tx_ring[ring];
1521 netif_napi_del(&tx_ring->napi);
1522 }
1523 }
1524
1453 qlcnic_free_tx_rings(adapter); 1525 qlcnic_free_tx_rings(adapter);
1454} 1526}
1455 1527
@@ -1457,6 +1529,7 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
1457{ 1529{
1458 int ring; 1530 int ring;
1459 struct qlcnic_host_sds_ring *sds_ring; 1531 struct qlcnic_host_sds_ring *sds_ring;
1532 struct qlcnic_host_tx_ring *tx_ring;
1460 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1533 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1461 1534
1462 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 1535 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1467,12 +1540,24 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
1467 napi_enable(&sds_ring->napi); 1540 napi_enable(&sds_ring->napi);
1468 qlcnic_enable_int(sds_ring); 1541 qlcnic_enable_int(sds_ring);
1469 } 1542 }
1543
1544 if (qlcnic_check_multi_tx(adapter) &&
1545 (adapter->flags & QLCNIC_MSIX_ENABLED) &&
1546 !adapter->ahw->diag_test &&
1547 (adapter->max_drv_tx_rings > 1)) {
1548 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1549 tx_ring = &adapter->tx_ring[ring];
1550 napi_enable(&tx_ring->napi);
1551 qlcnic_enable_tx_intr(adapter, tx_ring);
1552 }
1553 }
1470} 1554}
1471 1555
1472void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter) 1556void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
1473{ 1557{
1474 int ring; 1558 int ring;
1475 struct qlcnic_host_sds_ring *sds_ring; 1559 struct qlcnic_host_sds_ring *sds_ring;
1560 struct qlcnic_host_tx_ring *tx_ring;
1476 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1561 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1477 1562
1478 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 1563 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1484,6 +1569,17 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
1484 napi_synchronize(&sds_ring->napi); 1569 napi_synchronize(&sds_ring->napi);
1485 napi_disable(&sds_ring->napi); 1570 napi_disable(&sds_ring->napi);
1486 } 1571 }
1572
1573 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1574 !adapter->ahw->diag_test &&
1575 qlcnic_check_multi_tx(adapter)) {
1576 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1577 tx_ring = &adapter->tx_ring[ring];
1578 qlcnic_disable_tx_int(adapter, tx_ring);
1579 napi_synchronize(&tx_ring->napi);
1580 napi_disable(&tx_ring->napi);
1581 }
1582 }
1487} 1583}
1488 1584
1489#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36) 1585#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
@@ -1864,7 +1960,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
1864int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, 1960int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1865 struct net_device *netdev) 1961 struct net_device *netdev)
1866{ 1962{
1867 int ring, max_sds_rings, temp; 1963 int ring;
1868 struct qlcnic_host_sds_ring *sds_ring; 1964 struct qlcnic_host_sds_ring *sds_ring;
1869 struct qlcnic_host_tx_ring *tx_ring; 1965 struct qlcnic_host_tx_ring *tx_ring;
1870 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1966 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
@@ -1872,25 +1968,22 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1872 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) 1968 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
1873 return -ENOMEM; 1969 return -ENOMEM;
1874 1970
1875 max_sds_rings = adapter->max_sds_rings;
1876 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1971 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1877 sds_ring = &recv_ctx->sds_rings[ring]; 1972 sds_ring = &recv_ctx->sds_rings[ring];
1878 if (adapter->flags & QLCNIC_MSIX_ENABLED) { 1973 if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1879 if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) { 1974 if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
1880 netif_napi_add(netdev, &sds_ring->napi, 1975 netif_napi_add(netdev, &sds_ring->napi,
1881 qlcnic_83xx_rx_poll, 1976 qlcnic_83xx_rx_poll,
1882 QLCNIC_NETDEV_WEIGHT * 2); 1977 NAPI_POLL_WEIGHT);
1883 } else { 1978 else
1884 temp = QLCNIC_NETDEV_WEIGHT / max_sds_rings;
1885 netif_napi_add(netdev, &sds_ring->napi, 1979 netif_napi_add(netdev, &sds_ring->napi,
1886 qlcnic_83xx_msix_sriov_vf_poll, 1980 qlcnic_83xx_msix_sriov_vf_poll,
1887 temp); 1981 NAPI_POLL_WEIGHT);
1888 }
1889 1982
1890 } else { 1983 } else {
1891 netif_napi_add(netdev, &sds_ring->napi, 1984 netif_napi_add(netdev, &sds_ring->napi,
1892 qlcnic_83xx_poll, 1985 qlcnic_83xx_poll,
1893 QLCNIC_NETDEV_WEIGHT / max_sds_rings); 1986 NAPI_POLL_WEIGHT);
1894 } 1987 }
1895 } 1988 }
1896 1989
@@ -1905,7 +1998,7 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1905 tx_ring = &adapter->tx_ring[ring]; 1998 tx_ring = &adapter->tx_ring[ring];
1906 netif_napi_add(netdev, &tx_ring->napi, 1999 netif_napi_add(netdev, &tx_ring->napi,
1907 qlcnic_83xx_msix_tx_poll, 2000 qlcnic_83xx_msix_tx_poll,
1908 QLCNIC_NETDEV_WEIGHT); 2001 NAPI_POLL_WEIGHT);
1909 } 2002 }
1910 } 2003 }
1911 2004
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index bc05d016c859..c4c5023e1fdf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -100,6 +100,8 @@ static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
100 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), 100 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
101 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X), 101 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
102 ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X), 102 ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
103 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
104 ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
103 {0,} 105 {0,}
104}; 106};
105 107
@@ -146,6 +148,11 @@ static const u32 qlcnic_reg_tbl[] = {
146 148
147static const struct qlcnic_board_info qlcnic_boards[] = { 149static const struct qlcnic_board_info qlcnic_boards[] = {
148 { PCI_VENDOR_ID_QLOGIC, 150 { PCI_VENDOR_ID_QLOGIC,
151 PCI_DEVICE_ID_QLOGIC_QLE844X,
152 0x0,
153 0x0,
154 "8400 series 10GbE Converged Network Adapter (TCP/IP Networking)" },
155 { PCI_VENDOR_ID_QLOGIC,
149 PCI_DEVICE_ID_QLOGIC_QLE834X, 156 PCI_DEVICE_ID_QLOGIC_QLE834X,
150 PCI_VENDOR_ID_QLOGIC, 157 PCI_VENDOR_ID_QLOGIC,
151 0x24e, 158 0x24e,
@@ -254,7 +261,6 @@ static const struct qlcnic_board_info qlcnic_boards[] = {
254}; 261};
255 262
256#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards) 263#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
257#define QLC_MAX_SDS_RINGS 8
258 264
259static const 265static const
260struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG; 266struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
@@ -278,12 +284,15 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
278 284
279int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) 285int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
280{ 286{
281 u8 mac_addr[ETH_ALEN];
282 struct net_device *netdev = adapter->netdev; 287 struct net_device *netdev = adapter->netdev;
283 struct pci_dev *pdev = adapter->pdev; 288 struct pci_dev *pdev = adapter->pdev;
289 u8 mac_addr[ETH_ALEN];
290 int ret;
284 291
285 if (qlcnic_get_mac_address(adapter, mac_addr) != 0) 292 ret = qlcnic_get_mac_address(adapter, mac_addr,
286 return -EIO; 293 adapter->ahw->pci_func);
294 if (ret)
295 return ret;
287 296
288 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); 297 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
289 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); 298 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
@@ -425,6 +434,21 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
425 cancel_delayed_work_sync(&adapter->fw_work); 434 cancel_delayed_work_sync(&adapter->fw_work);
426} 435}
427 436
437static int qlcnic_get_phys_port_id(struct net_device *netdev,
438 struct netdev_phys_port_id *ppid)
439{
440 struct qlcnic_adapter *adapter = netdev_priv(netdev);
441 struct qlcnic_hardware_context *ahw = adapter->ahw;
442
443 if (!(adapter->flags & QLCNIC_HAS_PHYS_PORT_ID))
444 return -EOPNOTSUPP;
445
446 ppid->id_len = sizeof(ahw->phys_port_id);
447 memcpy(ppid->id, ahw->phys_port_id, ppid->id_len);
448
449 return 0;
450}
451
428static const struct net_device_ops qlcnic_netdev_ops = { 452static const struct net_device_ops qlcnic_netdev_ops = {
429 .ndo_open = qlcnic_open, 453 .ndo_open = qlcnic_open,
430 .ndo_stop = qlcnic_close, 454 .ndo_stop = qlcnic_close,
@@ -442,6 +466,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
442 .ndo_fdb_add = qlcnic_fdb_add, 466 .ndo_fdb_add = qlcnic_fdb_add,
443 .ndo_fdb_del = qlcnic_fdb_del, 467 .ndo_fdb_del = qlcnic_fdb_del,
444 .ndo_fdb_dump = qlcnic_fdb_dump, 468 .ndo_fdb_dump = qlcnic_fdb_dump,
469 .ndo_get_phys_port_id = qlcnic_get_phys_port_id,
445#ifdef CONFIG_NET_POLL_CONTROLLER 470#ifdef CONFIG_NET_POLL_CONTROLLER
446 .ndo_poll_controller = qlcnic_poll_controller, 471 .ndo_poll_controller = qlcnic_poll_controller,
447#endif 472#endif
@@ -514,13 +539,36 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
514 .get_board_info = qlcnic_82xx_get_board_info, 539 .get_board_info = qlcnic_82xx_get_board_info,
515 .set_mac_filter_count = qlcnic_82xx_set_mac_filter_count, 540 .set_mac_filter_count = qlcnic_82xx_set_mac_filter_count,
516 .free_mac_list = qlcnic_82xx_free_mac_list, 541 .free_mac_list = qlcnic_82xx_free_mac_list,
542 .read_phys_port_id = qlcnic_82xx_read_phys_port_id,
543 .io_error_detected = qlcnic_82xx_io_error_detected,
544 .io_slot_reset = qlcnic_82xx_io_slot_reset,
545 .io_resume = qlcnic_82xx_io_resume,
517}; 546};
518 547
548static void qlcnic_get_multiq_capability(struct qlcnic_adapter *adapter)
549{
550 struct qlcnic_hardware_context *ahw = adapter->ahw;
551 int num_tx_q;
552
553 if (ahw->msix_supported &&
554 (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) {
555 num_tx_q = min_t(int, QLCNIC_DEF_NUM_TX_RINGS,
556 num_online_cpus());
557 if (num_tx_q > 1) {
558 test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE,
559 &adapter->state);
560 adapter->max_drv_tx_rings = num_tx_q;
561 }
562 } else {
563 adapter->max_drv_tx_rings = 1;
564 }
565}
566
519int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) 567int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
520{ 568{
521 struct pci_dev *pdev = adapter->pdev; 569 struct pci_dev *pdev = adapter->pdev;
570 int max_tx_rings, max_sds_rings, tx_vector;
522 int err = -1, i; 571 int err = -1, i;
523 int max_tx_rings, tx_vector;
524 572
525 if (adapter->flags & QLCNIC_TX_INTR_SHARED) { 573 if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
526 max_tx_rings = 0; 574 max_tx_rings = 0;
@@ -554,7 +602,15 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
554 adapter->max_sds_rings = num_msix - 602 adapter->max_sds_rings = num_msix -
555 max_tx_rings - 1; 603 max_tx_rings - 1;
556 } else { 604 } else {
557 adapter->max_sds_rings = num_msix; 605 adapter->ahw->num_msix = num_msix;
606 if (qlcnic_check_multi_tx(adapter) &&
607 !adapter->ahw->diag_test &&
608 (adapter->max_drv_tx_rings > 1))
609 max_sds_rings = num_msix - max_tx_rings;
610 else
611 max_sds_rings = num_msix;
612
613 adapter->max_sds_rings = max_sds_rings;
558 } 614 }
559 dev_info(&pdev->dev, "using msi-x interrupts\n"); 615 dev_info(&pdev->dev, "using msi-x interrupts\n");
560 return err; 616 return err;
@@ -570,6 +626,8 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
570 num_msix += (max_tx_rings + 1); 626 num_msix += (max_tx_rings + 1);
571 } else { 627 } else {
572 num_msix = rounddown_pow_of_two(err); 628 num_msix = rounddown_pow_of_two(err);
629 if (qlcnic_check_multi_tx(adapter))
630 num_msix += max_tx_rings;
573 } 631 }
574 632
575 if (num_msix) { 633 if (num_msix) {
@@ -605,6 +663,7 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
605 adapter->msix_entries[0].vector = pdev->irq; 663 adapter->msix_entries[0].vector = pdev->irq;
606 return err; 664 return err;
607 } 665 }
666
608 if (qlcnic_use_msi || qlcnic_use_msi_x) 667 if (qlcnic_use_msi || qlcnic_use_msi_x)
609 return -EOPNOTSUPP; 668 return -EOPNOTSUPP;
610 669
@@ -621,28 +680,69 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
621 return err; 680 return err;
622} 681}
623 682
624int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr) 683int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
625{ 684{
685 struct qlcnic_hardware_context *ahw = adapter->ahw;
626 int num_msix, err = 0; 686 int num_msix, err = 0;
627 687
628 if (!num_intr) 688 if (!num_intr)
629 num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS; 689 num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
630 690
631 if (adapter->ahw->msix_supported) 691 if (ahw->msix_supported) {
632 num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(), 692 num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
633 num_intr)); 693 num_intr));
634 else 694 if (qlcnic_check_multi_tx(adapter)) {
695 if (txq)
696 adapter->max_drv_tx_rings = txq;
697 num_msix += adapter->max_drv_tx_rings;
698 }
699 } else {
635 num_msix = 1; 700 num_msix = 1;
701 }
636 702
637 err = qlcnic_enable_msix(adapter, num_msix); 703 err = qlcnic_enable_msix(adapter, num_msix);
638 if (err == -ENOMEM || !err) 704 if (err == -ENOMEM)
639 return err; 705 return err;
640 706
641 err = qlcnic_enable_msi_legacy(adapter); 707 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
642 if (!err) 708 qlcnic_disable_multi_tx(adapter);
709
710 err = qlcnic_enable_msi_legacy(adapter);
711 if (!err)
712 return err;
713 }
714
715 return 0;
716}
717
718int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *adapter, int op_type)
719{
720 struct qlcnic_hardware_context *ahw = adapter->ahw;
721 int err, i;
722
723 if (qlcnic_check_multi_tx(adapter) &&
724 !ahw->diag_test &&
725 (adapter->flags & QLCNIC_MSIX_ENABLED)) {
726 ahw->intr_tbl = vzalloc(ahw->num_msix *
727 sizeof(struct qlcnic_intrpt_config));
728 if (!ahw->intr_tbl)
729 return -ENOMEM;
730
731 for (i = 0; i < ahw->num_msix; i++) {
732 ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
733 ahw->intr_tbl[i].id = i;
734 ahw->intr_tbl[i].src = 0;
735 }
736
737 err = qlcnic_82xx_config_intrpt(adapter, 1);
738 if (err)
739 dev_err(&adapter->pdev->dev,
740 "Failed to configure Interrupt for %d vector\n",
741 ahw->num_msix);
643 return err; 742 return err;
743 }
644 744
645 return -EIO; 745 return 0;
646} 746}
647 747
648void qlcnic_teardown_intr(struct qlcnic_adapter *adapter) 748void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
@@ -696,6 +796,23 @@ static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
696 return ret; 796 return ret;
697} 797}
698 798
799static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
800{
801 bool ret = false;
802
803 if (qlcnic_84xx_check(adapter)) {
804 ret = true;
805 } else if (qlcnic_83xx_check(adapter)) {
806 if (adapter->ahw->extra_capability[0] &
807 QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG)
808 ret = true;
809 else
810 ret = false;
811 }
812
813 return ret;
814}
815
699int qlcnic_init_pci_info(struct qlcnic_adapter *adapter) 816int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
700{ 817{
701 struct qlcnic_pci_info *pci_info; 818 struct qlcnic_pci_info *pci_info;
@@ -739,18 +856,30 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
739 (pci_info[i].type != QLCNIC_TYPE_NIC)) 856 (pci_info[i].type != QLCNIC_TYPE_NIC))
740 continue; 857 continue;
741 858
859 if (qlcnic_port_eswitch_cfg_capability(adapter)) {
860 if (!qlcnic_83xx_enable_port_eswitch(adapter, pfn))
861 adapter->npars[j].eswitch_status = true;
862 else
863 continue;
864 } else {
865 adapter->npars[j].eswitch_status = true;
866 }
867
742 adapter->npars[j].pci_func = pfn; 868 adapter->npars[j].pci_func = pfn;
743 adapter->npars[j].active = (u8)pci_info[i].active; 869 adapter->npars[j].active = (u8)pci_info[i].active;
744 adapter->npars[j].type = (u8)pci_info[i].type; 870 adapter->npars[j].type = (u8)pci_info[i].type;
745 adapter->npars[j].phy_port = (u8)pci_info[i].default_port; 871 adapter->npars[j].phy_port = (u8)pci_info[i].default_port;
746 adapter->npars[j].min_bw = pci_info[i].tx_min_bw; 872 adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
747 adapter->npars[j].max_bw = pci_info[i].tx_max_bw; 873 adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
874
748 j++; 875 j++;
749 } 876 }
750 877
751 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) { 878 if (qlcnic_82xx_check(adapter)) {
752 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE; 879 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
753 if (qlcnic_83xx_check(adapter)) 880 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
881 } else if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
882 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
754 qlcnic_enable_eswitch(adapter, i, 1); 883 qlcnic_enable_eswitch(adapter, i, 1);
755 } 884 }
756 885
@@ -829,7 +958,9 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
829 *bar = QLCNIC_82XX_BAR0_LENGTH; 958 *bar = QLCNIC_82XX_BAR0_LENGTH;
830 break; 959 break;
831 case PCI_DEVICE_ID_QLOGIC_QLE834X: 960 case PCI_DEVICE_ID_QLOGIC_QLE834X:
961 case PCI_DEVICE_ID_QLOGIC_QLE844X:
832 case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: 962 case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
963 case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
833 *bar = QLCNIC_83XX_BAR0_LENGTH; 964 *bar = QLCNIC_83XX_BAR0_LENGTH;
834 break; 965 break;
835 default: 966 default:
@@ -870,8 +1001,8 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
870 return 0; 1001 return 0;
871} 1002}
872 1003
873static inline bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter, 1004static bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter,
874 int index) 1005 int index)
875{ 1006{
876 struct pci_dev *pdev = adapter->pdev; 1007 struct pci_dev *pdev = adapter->pdev;
877 unsigned short subsystem_vendor; 1008 unsigned short subsystem_vendor;
@@ -1173,6 +1304,9 @@ int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
1173 return 0; 1304 return 0;
1174 1305
1175 for (i = 0; i < adapter->ahw->act_pci_func; i++) { 1306 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
1307 if (!adapter->npars[i].eswitch_status)
1308 continue;
1309
1176 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg)); 1310 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
1177 esw_cfg.pci_func = adapter->npars[i].pci_func; 1311 esw_cfg.pci_func = adapter->npars[i].pci_func;
1178 esw_cfg.mac_override = BIT_0; 1312 esw_cfg.mac_override = BIT_0;
@@ -1235,6 +1369,9 @@ int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
1235 for (i = 0; i < adapter->ahw->act_pci_func; i++) { 1369 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
1236 npar = &adapter->npars[i]; 1370 npar = &adapter->npars[i];
1237 pci_func = npar->pci_func; 1371 pci_func = npar->pci_func;
1372 if (!adapter->npars[i].eswitch_status)
1373 continue;
1374
1238 memset(&nic_info, 0, sizeof(struct qlcnic_info)); 1375 memset(&nic_info, 0, sizeof(struct qlcnic_info));
1239 err = qlcnic_get_nic_info(adapter, &nic_info, pci_func); 1376 err = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
1240 if (err) 1377 if (err)
@@ -1413,6 +1550,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
1413 for (ring = 0; ring < num_sds_rings; ring++) { 1550 for (ring = 0; ring < num_sds_rings; ring++) {
1414 sds_ring = &recv_ctx->sds_rings[ring]; 1551 sds_ring = &recv_ctx->sds_rings[ring];
1415 if (qlcnic_82xx_check(adapter) && 1552 if (qlcnic_82xx_check(adapter) &&
1553 !qlcnic_check_multi_tx(adapter) &&
1416 (ring == (num_sds_rings - 1))) { 1554 (ring == (num_sds_rings - 1))) {
1417 if (!(adapter->flags & 1555 if (!(adapter->flags &
1418 QLCNIC_MSIX_ENABLED)) 1556 QLCNIC_MSIX_ENABLED))
@@ -1436,9 +1574,11 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
1436 return err; 1574 return err;
1437 } 1575 }
1438 } 1576 }
1439 if (qlcnic_83xx_check(adapter) && 1577 if ((qlcnic_82xx_check(adapter) &&
1440 (adapter->flags & QLCNIC_MSIX_ENABLED) && 1578 qlcnic_check_multi_tx(adapter)) ||
1441 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { 1579 (qlcnic_83xx_check(adapter) &&
1580 (adapter->flags & QLCNIC_MSIX_ENABLED) &&
1581 !(adapter->flags & QLCNIC_TX_INTR_SHARED))) {
1442 handler = qlcnic_msix_tx_intr; 1582 handler = qlcnic_msix_tx_intr;
1443 for (ring = 0; ring < adapter->max_drv_tx_rings; 1583 for (ring = 0; ring < adapter->max_drv_tx_rings;
1444 ring++) { 1584 ring++) {
@@ -1473,8 +1613,10 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
1473 free_irq(sds_ring->irq, sds_ring); 1613 free_irq(sds_ring->irq, sds_ring);
1474 } 1614 }
1475 } 1615 }
1476 if (qlcnic_83xx_check(adapter) && 1616 if ((qlcnic_83xx_check(adapter) &&
1477 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { 1617 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
1618 (qlcnic_82xx_check(adapter) &&
1619 qlcnic_check_multi_tx(adapter))) {
1478 for (ring = 0; ring < adapter->max_drv_tx_rings; 1620 for (ring = 0; ring < adapter->max_drv_tx_rings;
1479 ring++) { 1621 ring++) {
1480 tx_ring = &adapter->tx_ring[ring]; 1622 tx_ring = &adapter->tx_ring[ring];
@@ -1510,8 +1652,10 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1510 1652
1511 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) 1653 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1512 return 0; 1654 return 0;
1655
1513 if (qlcnic_set_eswitch_port_config(adapter)) 1656 if (qlcnic_set_eswitch_port_config(adapter))
1514 return -EIO; 1657 return -EIO;
1658
1515 qlcnic_get_lro_mss_capability(adapter); 1659 qlcnic_get_lro_mss_capability(adapter);
1516 1660
1517 if (qlcnic_fw_create_ctx(adapter)) 1661 if (qlcnic_fw_create_ctx(adapter))
@@ -1558,6 +1702,8 @@ int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1558 1702
1559void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) 1703void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1560{ 1704{
1705 int ring;
1706
1561 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 1707 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1562 return; 1708 return;
1563 1709
@@ -1567,7 +1713,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1567 if (qlcnic_sriov_vf_check(adapter)) 1713 if (qlcnic_sriov_vf_check(adapter))
1568 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); 1714 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
1569 smp_mb(); 1715 smp_mb();
1570 spin_lock(&adapter->tx_clean_lock);
1571 netif_carrier_off(netdev); 1716 netif_carrier_off(netdev);
1572 adapter->ahw->linkup = 0; 1717 adapter->ahw->linkup = 0;
1573 netif_tx_disable(netdev); 1718 netif_tx_disable(netdev);
@@ -1585,8 +1730,9 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1585 adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP; 1730 adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
1586 1731
1587 qlcnic_reset_rx_buffers_list(adapter); 1732 qlcnic_reset_rx_buffers_list(adapter);
1588 qlcnic_release_tx_buffers(adapter); 1733
1589 spin_unlock(&adapter->tx_clean_lock); 1734 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
1735 qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
1590} 1736}
1591 1737
1592/* Usage: During suspend and firmware recovery module */ 1738/* Usage: During suspend and firmware recovery module */
@@ -1666,6 +1812,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1666{ 1812{
1667 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1813 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1668 struct qlcnic_host_sds_ring *sds_ring; 1814 struct qlcnic_host_sds_ring *sds_ring;
1815 int max_tx_rings = adapter->max_drv_tx_rings;
1669 int ring; 1816 int ring;
1670 1817
1671 clear_bit(__QLCNIC_DEV_UP, &adapter->state); 1818 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
@@ -1682,6 +1829,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1682 1829
1683 adapter->ahw->diag_test = 0; 1830 adapter->ahw->diag_test = 0;
1684 adapter->max_sds_rings = max_sds_rings; 1831 adapter->max_sds_rings = max_sds_rings;
1832 adapter->max_drv_tx_rings = max_tx_rings;
1685 1833
1686 if (qlcnic_attach(adapter)) 1834 if (qlcnic_attach(adapter))
1687 goto out; 1835 goto out;
@@ -1750,6 +1898,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1750 adapter->max_sds_rings = 1; 1898 adapter->max_sds_rings = 1;
1751 adapter->ahw->diag_test = test; 1899 adapter->ahw->diag_test = test;
1752 adapter->ahw->linkup = 0; 1900 adapter->ahw->linkup = 0;
1901 adapter->max_drv_tx_rings = 1;
1753 1902
1754 ret = qlcnic_attach(adapter); 1903 ret = qlcnic_attach(adapter);
1755 if (ret) { 1904 if (ret) {
@@ -1907,12 +2056,18 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
1907 netdev->priv_flags |= IFF_UNICAST_FLT; 2056 netdev->priv_flags |= IFF_UNICAST_FLT;
1908 netdev->irq = adapter->msix_entries[0].vector; 2057 netdev->irq = adapter->msix_entries[0].vector;
1909 2058
2059 err = qlcnic_set_real_num_queues(adapter, netdev);
2060 if (err)
2061 return err;
2062
1910 err = register_netdev(netdev); 2063 err = register_netdev(netdev);
1911 if (err) { 2064 if (err) {
1912 dev_err(&pdev->dev, "failed to register net device\n"); 2065 dev_err(&pdev->dev, "failed to register net device\n");
1913 return err; 2066 return err;
1914 } 2067 }
1915 2068
2069 qlcnic_dcb_init_dcbnl_ops(adapter);
2070
1916 return 0; 2071 return 0;
1917} 2072}
1918 2073
@@ -1975,7 +2130,8 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
1975 tx_ring->cmd_buf_arr = cmd_buf_arr; 2130 tx_ring->cmd_buf_arr = cmd_buf_arr;
1976 } 2131 }
1977 2132
1978 if (qlcnic_83xx_check(adapter)) { 2133 if (qlcnic_83xx_check(adapter) ||
2134 (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) {
1979 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { 2135 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1980 tx_ring = &adapter->tx_ring[ring]; 2136 tx_ring = &adapter->tx_ring[ring];
1981 tx_ring->adapter = adapter; 2137 tx_ring->adapter = adapter;
@@ -1986,6 +2142,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
1986 } 2142 }
1987 } 2143 }
1988 } 2144 }
2145
1989 return 0; 2146 return 0;
1990} 2147}
1991 2148
@@ -2004,6 +2161,17 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
2004 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd); 2161 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
2005} 2162}
2006 2163
2164static int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
2165{
2166 return __qlcnic_register_dcb(adapter);
2167}
2168
2169void qlcnic_clear_dcb_ops(struct qlcnic_adapter *adapter)
2170{
2171 kfree(adapter->dcb);
2172 adapter->dcb = NULL;
2173}
2174
2007static int 2175static int
2008qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2176qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2009{ 2177{
@@ -2048,9 +2216,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2048 ahw->reg_tbl = (u32 *) qlcnic_reg_tbl; 2216 ahw->reg_tbl = (u32 *) qlcnic_reg_tbl;
2049 break; 2217 break;
2050 case PCI_DEVICE_ID_QLOGIC_QLE834X: 2218 case PCI_DEVICE_ID_QLOGIC_QLE834X:
2219 case PCI_DEVICE_ID_QLOGIC_QLE844X:
2051 qlcnic_83xx_register_map(ahw); 2220 qlcnic_83xx_register_map(ahw);
2052 break; 2221 break;
2053 case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: 2222 case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
2223 case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
2054 qlcnic_sriov_vf_register_map(ahw); 2224 qlcnic_sriov_vf_register_map(ahw);
2055 break; 2225 break;
2056 default: 2226 default:
@@ -2061,7 +2231,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2061 if (err) 2231 if (err)
2062 goto err_out_free_hw_res; 2232 goto err_out_free_hw_res;
2063 2233
2064 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter)); 2234 netdev = alloc_etherdev_mq(sizeof(struct qlcnic_adapter),
2235 QLCNIC_MAX_TX_RINGS);
2065 if (!netdev) { 2236 if (!netdev) {
2066 err = -ENOMEM; 2237 err = -ENOMEM;
2067 goto err_out_iounmap; 2238 goto err_out_iounmap;
@@ -2091,14 +2262,14 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2091 adapter->fdb_mac_learn = true; 2262 adapter->fdb_mac_learn = true;
2092 else if (qlcnic_mac_learn == DRV_MAC_LEARN) 2263 else if (qlcnic_mac_learn == DRV_MAC_LEARN)
2093 adapter->drv_mac_learn = true; 2264 adapter->drv_mac_learn = true;
2094 adapter->max_drv_tx_rings = 1;
2095 2265
2096 rwlock_init(&adapter->ahw->crb_lock); 2266 rwlock_init(&adapter->ahw->crb_lock);
2097 mutex_init(&adapter->ahw->mem_lock); 2267 mutex_init(&adapter->ahw->mem_lock);
2098 2268
2099 spin_lock_init(&adapter->tx_clean_lock);
2100 INIT_LIST_HEAD(&adapter->mac_list); 2269 INIT_LIST_HEAD(&adapter->mac_list);
2101 2270
2271 qlcnic_register_dcb(adapter);
2272
2102 if (qlcnic_82xx_check(adapter)) { 2273 if (qlcnic_82xx_check(adapter)) {
2103 qlcnic_check_vf(adapter, ent); 2274 qlcnic_check_vf(adapter, ent);
2104 adapter->portnum = adapter->ahw->pci_func; 2275 adapter->portnum = adapter->ahw->pci_func;
@@ -2108,12 +2279,31 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2108 goto err_out_free_hw; 2279 goto err_out_free_hw;
2109 } 2280 }
2110 2281
2282 qlcnic_get_multiq_capability(adapter);
2283
2284 if ((adapter->ahw->act_pci_func > 2) &&
2285 qlcnic_check_multi_tx(adapter)) {
2286 adapter->max_drv_tx_rings = QLCNIC_DEF_NUM_TX_RINGS;
2287 dev_info(&adapter->pdev->dev,
2288 "vNIC mode enabled, Set max TX rings = %d\n",
2289 adapter->max_drv_tx_rings);
2290 }
2291
2292 if (!qlcnic_check_multi_tx(adapter)) {
2293 clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
2294 adapter->max_drv_tx_rings = 1;
2295 }
2111 err = qlcnic_setup_idc_param(adapter); 2296 err = qlcnic_setup_idc_param(adapter);
2112 if (err) 2297 if (err)
2113 goto err_out_free_hw; 2298 goto err_out_free_hw;
2114 2299
2115 adapter->flags |= QLCNIC_NEED_FLR; 2300 adapter->flags |= QLCNIC_NEED_FLR;
2301
2302 if (adapter->dcb && qlcnic_dcb_attach(adapter))
2303 qlcnic_clear_dcb_ops(adapter);
2304
2116 } else if (qlcnic_83xx_check(adapter)) { 2305 } else if (qlcnic_83xx_check(adapter)) {
2306 adapter->max_drv_tx_rings = 1;
2117 qlcnic_83xx_check_vf(adapter, ent); 2307 qlcnic_83xx_check_vf(adapter, ent);
2118 adapter->portnum = adapter->ahw->pci_func; 2308 adapter->portnum = adapter->ahw->pci_func;
2119 err = qlcnic_83xx_init(adapter, pci_using_dac); 2309 err = qlcnic_83xx_init(adapter, pci_using_dac);
@@ -2132,6 +2322,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2132 if (qlcnic_read_mac_addr(adapter)) 2322 if (qlcnic_read_mac_addr(adapter))
2133 dev_warn(&pdev->dev, "failed to read mac addr\n"); 2323 dev_warn(&pdev->dev, "failed to read mac addr\n");
2134 2324
2325 qlcnic_read_phys_port_id(adapter);
2326
2135 if (adapter->portnum == 0) { 2327 if (adapter->portnum == 0) {
2136 qlcnic_get_board_name(adapter, board_name); 2328 qlcnic_get_board_name(adapter, board_name);
2137 2329
@@ -2145,16 +2337,12 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2145 dev_warn(&pdev->dev, 2337 dev_warn(&pdev->dev,
2146 "Device does not support MSI interrupts\n"); 2338 "Device does not support MSI interrupts\n");
2147 2339
2148 err = qlcnic_setup_intr(adapter, 0); 2340 if (qlcnic_82xx_check(adapter)) {
2149 if (err) { 2341 err = qlcnic_setup_intr(adapter, 0, 0);
2150 dev_err(&pdev->dev, "Failed to setup interrupt\n"); 2342 if (err) {
2151 goto err_out_disable_msi; 2343 dev_err(&pdev->dev, "Failed to setup interrupt\n");
2152 }
2153
2154 if (qlcnic_83xx_check(adapter)) {
2155 err = qlcnic_83xx_setup_mbx_intr(adapter);
2156 if (err)
2157 goto err_out_disable_msi; 2344 goto err_out_disable_msi;
2345 }
2158 } 2346 }
2159 2347
2160 err = qlcnic_get_act_pci_func(adapter); 2348 err = qlcnic_get_act_pci_func(adapter);
@@ -2238,13 +2426,18 @@ static void qlcnic_remove(struct pci_dev *pdev)
2238 qlcnic_cancel_idc_work(adapter); 2426 qlcnic_cancel_idc_work(adapter);
2239 ahw = adapter->ahw; 2427 ahw = adapter->ahw;
2240 2428
2429 qlcnic_dcb_free(adapter);
2430
2241 unregister_netdev(netdev); 2431 unregister_netdev(netdev);
2242 qlcnic_sriov_cleanup(adapter); 2432 qlcnic_sriov_cleanup(adapter);
2243 2433
2244 if (qlcnic_83xx_check(adapter)) { 2434 if (qlcnic_83xx_check(adapter)) {
2245 qlcnic_83xx_free_mbx_intr(adapter);
2246 qlcnic_83xx_register_nic_idc_func(adapter, 0); 2435 qlcnic_83xx_register_nic_idc_func(adapter, 0);
2247 cancel_delayed_work_sync(&adapter->idc_aen_work); 2436 cancel_delayed_work_sync(&adapter->idc_aen_work);
2437 qlcnic_83xx_free_mbx_intr(adapter);
2438 qlcnic_83xx_detach_mailbox_work(adapter);
2439 qlcnic_83xx_free_mailbox(ahw->mailbox);
2440 kfree(ahw->fw_info);
2248 } 2441 }
2249 2442
2250 qlcnic_detach(adapter); 2443 qlcnic_detach(adapter);
@@ -2278,6 +2471,7 @@ static void qlcnic_remove(struct pci_dev *pdev)
2278 destroy_workqueue(adapter->qlcnic_wq); 2471 destroy_workqueue(adapter->qlcnic_wq);
2279 adapter->qlcnic_wq = NULL; 2472 adapter->qlcnic_wq = NULL;
2280 } 2473 }
2474
2281 qlcnic_free_adapter_resources(adapter); 2475 qlcnic_free_adapter_resources(adapter);
2282 kfree(ahw); 2476 kfree(ahw);
2283 free_netdev(netdev); 2477 free_netdev(netdev);
@@ -2336,7 +2530,7 @@ static int qlcnic_open(struct net_device *netdev)
2336 if (err) 2530 if (err)
2337 goto err_out; 2531 goto err_out;
2338 2532
2339 netif_start_queue(netdev); 2533 netif_tx_start_all_queues(netdev);
2340 2534
2341 return 0; 2535 return 0;
2342 2536
@@ -2468,6 +2662,8 @@ int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2468static void qlcnic_tx_timeout(struct net_device *netdev) 2662static void qlcnic_tx_timeout(struct net_device *netdev)
2469{ 2663{
2470 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2664 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2665 struct qlcnic_host_tx_ring *tx_ring;
2666 int ring;
2471 2667
2472 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) 2668 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2473 return; 2669 return;
@@ -2481,6 +2677,25 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
2481 QLCNIC_FORCE_FW_DUMP_KEY); 2677 QLCNIC_FORCE_FW_DUMP_KEY);
2482 } else { 2678 } else {
2483 netdev_info(netdev, "Tx timeout, reset adapter context.\n"); 2679 netdev_info(netdev, "Tx timeout, reset adapter context.\n");
2680 if (qlcnic_82xx_check(adapter)) {
2681 for (ring = 0; ring < adapter->max_drv_tx_rings;
2682 ring++) {
2683 tx_ring = &adapter->tx_ring[ring];
2684 dev_info(&netdev->dev, "ring=%d\n", ring);
2685 dev_info(&netdev->dev, "crb_intr_mask=%d\n",
2686 readl(tx_ring->crb_intr_mask));
2687 dev_info(&netdev->dev, "producer=%d\n",
2688 readl(tx_ring->crb_cmd_producer));
2689 dev_info(&netdev->dev, "sw_consumer = %d\n",
2690 tx_ring->sw_consumer);
2691 dev_info(&netdev->dev, "hw_consumer = %d\n",
2692 le32_to_cpu(*(tx_ring->hw_consumer)));
2693 dev_info(&netdev->dev, "xmit-on=%llu\n",
2694 tx_ring->xmit_on);
2695 dev_info(&netdev->dev, "xmit-off=%llu\n",
2696 tx_ring->xmit_off);
2697 }
2698 }
2484 adapter->ahw->reset_context = 1; 2699 adapter->ahw->reset_context = 1;
2485 } 2700 }
2486} 2701}
@@ -2869,7 +3084,7 @@ skip_ack_check:
2869 qlcnic_api_unlock(adapter); 3084 qlcnic_api_unlock(adapter);
2870 3085
2871 rtnl_lock(); 3086 rtnl_lock();
2872 if (adapter->ahw->fw_dump.enable && 3087 if (qlcnic_check_fw_dump_state(adapter) &&
2873 (adapter->flags & QLCNIC_FW_RESET_OWNER)) { 3088 (adapter->flags & QLCNIC_FW_RESET_OWNER)) {
2874 QLCDB(adapter, DRV, "Take FW dump\n"); 3089 QLCDB(adapter, DRV, "Take FW dump\n");
2875 qlcnic_dump_fw(adapter); 3090 qlcnic_dump_fw(adapter);
@@ -3074,6 +3289,8 @@ qlcnic_attach_work(struct work_struct *work)
3074 return; 3289 return;
3075 } 3290 }
3076attach: 3291attach:
3292 qlcnic_dcb_get_info(adapter);
3293
3077 if (netif_running(netdev)) { 3294 if (netif_running(netdev)) {
3078 if (qlcnic_up(adapter, netdev)) 3295 if (qlcnic_up(adapter, netdev))
3079 goto done; 3296 goto done;
@@ -3245,7 +3462,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
3245 qlcnic_clr_drv_state(adapter); 3462 qlcnic_clr_drv_state(adapter);
3246 kfree(adapter->msix_entries); 3463 kfree(adapter->msix_entries);
3247 adapter->msix_entries = NULL; 3464 adapter->msix_entries = NULL;
3248 err = qlcnic_setup_intr(adapter, 0); 3465 err = qlcnic_setup_intr(adapter, 0, 0);
3249 3466
3250 if (err) { 3467 if (err) {
3251 kfree(adapter->msix_entries); 3468 kfree(adapter->msix_entries);
@@ -3253,19 +3470,6 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
3253 return err; 3470 return err;
3254 } 3471 }
3255 3472
3256 if (qlcnic_83xx_check(adapter)) {
3257 /* register for NIC IDC AEN Events */
3258 qlcnic_83xx_register_nic_idc_func(adapter, 1);
3259 err = qlcnic_83xx_setup_mbx_intr(adapter);
3260 if (err) {
3261 dev_err(&adapter->pdev->dev,
3262 "failed to setup mbx interrupt\n");
3263 qlcnic_clr_all_drv_state(adapter, 1);
3264 clear_bit(__QLCNIC_AER, &adapter->state);
3265 goto done;
3266 }
3267 }
3268
3269 if (netif_running(netdev)) { 3473 if (netif_running(netdev)) {
3270 err = qlcnic_attach(adapter); 3474 err = qlcnic_attach(adapter);
3271 if (err) { 3475 if (err) {
@@ -3286,8 +3490,8 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
3286 return err; 3490 return err;
3287} 3491}
3288 3492
3289static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev, 3493pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
3290 pci_channel_state_t state) 3494 pci_channel_state_t state)
3291{ 3495{
3292 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); 3496 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3293 struct net_device *netdev = adapter->netdev; 3497 struct net_device *netdev = adapter->netdev;
@@ -3306,12 +3510,6 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3306 if (netif_running(netdev)) 3510 if (netif_running(netdev))
3307 qlcnic_down(adapter, netdev); 3511 qlcnic_down(adapter, netdev);
3308 3512
3309 if (qlcnic_83xx_check(adapter)) {
3310 qlcnic_83xx_free_mbx_intr(adapter);
3311 qlcnic_83xx_register_nic_idc_func(adapter, 0);
3312 cancel_delayed_work_sync(&adapter->idc_aen_work);
3313 }
3314
3315 qlcnic_detach(adapter); 3513 qlcnic_detach(adapter);
3316 qlcnic_teardown_intr(adapter); 3514 qlcnic_teardown_intr(adapter);
3317 3515
@@ -3323,13 +3521,13 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3323 return PCI_ERS_RESULT_NEED_RESET; 3521 return PCI_ERS_RESULT_NEED_RESET;
3324} 3522}
3325 3523
3326static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev) 3524pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
3327{ 3525{
3328 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT : 3526 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3329 PCI_ERS_RESULT_RECOVERED; 3527 PCI_ERS_RESULT_RECOVERED;
3330} 3528}
3331 3529
3332static void qlcnic_io_resume(struct pci_dev *pdev) 3530void qlcnic_82xx_io_resume(struct pci_dev *pdev)
3333{ 3531{
3334 u32 state; 3532 u32 state;
3335 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); 3533 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
@@ -3339,9 +3537,48 @@ static void qlcnic_io_resume(struct pci_dev *pdev)
3339 if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER, 3537 if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER,
3340 &adapter->state)) 3538 &adapter->state))
3341 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, 3539 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3342 FW_POLL_DELAY); 3540 FW_POLL_DELAY);
3541}
3542
3543static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3544 pci_channel_state_t state)
3545{
3546 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3547 struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
3548
3549 if (hw_ops->io_error_detected) {
3550 return hw_ops->io_error_detected(pdev, state);
3551 } else {
3552 dev_err(&pdev->dev, "AER error_detected handler not registered.\n");
3553 return PCI_ERS_RESULT_DISCONNECT;
3554 }
3555}
3556
3557static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3558{
3559 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3560 struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
3561
3562 if (hw_ops->io_slot_reset) {
3563 return hw_ops->io_slot_reset(pdev);
3564 } else {
3565 dev_err(&pdev->dev, "AER slot_reset handler not registered.\n");
3566 return PCI_ERS_RESULT_DISCONNECT;
3567 }
3568}
3569
3570static void qlcnic_io_resume(struct pci_dev *pdev)
3571{
3572 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3573 struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
3574
3575 if (hw_ops->io_resume)
3576 hw_ops->io_resume(pdev);
3577 else
3578 dev_err(&pdev->dev, "AER resume handler not registered.\n");
3343} 3579}
3344 3580
3581
3345static int 3582static int
3346qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) 3583qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3347{ 3584{
@@ -3370,16 +3607,65 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3370 return err; 3607 return err;
3371} 3608}
3372 3609
3610int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq)
3611{
3612 struct net_device *netdev = adapter->netdev;
3613 u8 max_hw = QLCNIC_MAX_TX_RINGS;
3614 u32 max_allowed;
3615
3616 if (!qlcnic_82xx_check(adapter)) {
3617 netdev_err(netdev, "No Multi TX-Q support\n");
3618 return -EINVAL;
3619 }
3620
3621 if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
3622 netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
3623 return -EINVAL;
3624 }
3625
3626 if (!qlcnic_check_multi_tx(adapter)) {
3627 netdev_err(netdev, "No Multi TX-Q support\n");
3628 return -EINVAL;
3629 }
3630
3631 if (txq > QLCNIC_MAX_TX_RINGS) {
3632 netdev_err(netdev, "Invalid ring count\n");
3633 return -EINVAL;
3634 }
3635
3636 max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
3637 num_online_cpus()));
3638 if ((txq > max_allowed) || !is_power_of_2(txq)) {
3639 if (!is_power_of_2(txq))
3640 netdev_err(netdev,
3641 "TX queue should be a power of 2\n");
3642 if (txq > num_online_cpus())
3643 netdev_err(netdev,
3644 "Tx queue should not be higher than [%u], number of online CPUs in the system\n",
3645 num_online_cpus());
3646 netdev_err(netdev, "Unable to configure %u Tx rings\n", txq);
3647 return -EINVAL;
3648 }
3649
3650 return 0;
3651}
3652
3373int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter, 3653int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
3374 __u32 val) 3654 __u32 val)
3375{ 3655{
3376 struct net_device *netdev = adapter->netdev; 3656 struct net_device *netdev = adapter->netdev;
3377 u8 max_hw = adapter->ahw->max_rx_ques; 3657 u8 max_hw = adapter->ahw->max_rx_ques;
3378 u32 max_allowed; 3658 u32 max_allowed;
3379 3659
3380 if (val > QLC_MAX_SDS_RINGS) { 3660 if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
3661 !qlcnic_use_msi) {
3662 netdev_err(netdev, "No RSS support in INT-x mode\n");
3663 return -EINVAL;
3664 }
3665
3666 if (val > QLCNIC_MAX_SDS_RINGS) {
3381 netdev_err(netdev, "RSS value should not be higher than %u\n", 3667 netdev_err(netdev, "RSS value should not be higher than %u\n",
3382 QLC_MAX_SDS_RINGS); 3668 QLCNIC_MAX_SDS_RINGS);
3383 return -EINVAL; 3669 return -EINVAL;
3384 } 3670 }
3385 3671
@@ -3409,27 +3695,48 @@ int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
3409 return 0; 3695 return 0;
3410} 3696}
3411 3697
3412int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len) 3698int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, int txq)
3413{ 3699{
3414 int err; 3700 int err;
3415 struct net_device *netdev = adapter->netdev; 3701 struct net_device *netdev = adapter->netdev;
3702 int num_msix;
3416 3703
3417 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) 3704 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3418 return -EBUSY; 3705 return -EBUSY;
3419 3706
3707 if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
3708 !qlcnic_use_msi) {
3709 netdev_err(netdev, "No RSS support in INT-x mode\n");
3710 return -EINVAL;
3711 }
3712
3420 netif_device_detach(netdev); 3713 netif_device_detach(netdev);
3421 if (netif_running(netdev)) 3714 if (netif_running(netdev))
3422 __qlcnic_down(adapter, netdev); 3715 __qlcnic_down(adapter, netdev);
3423 3716
3424 qlcnic_detach(adapter); 3717 qlcnic_detach(adapter);
3425 3718
3719 if (qlcnic_82xx_check(adapter)) {
3720 if (txq != 0)
3721 adapter->max_drv_tx_rings = txq;
3722
3723 if (qlcnic_check_multi_tx(adapter) &&
3724 (txq > adapter->max_drv_tx_rings))
3725 num_msix = adapter->max_drv_tx_rings;
3726 else
3727 num_msix = data;
3728 }
3729
3426 if (qlcnic_83xx_check(adapter)) { 3730 if (qlcnic_83xx_check(adapter)) {
3427 qlcnic_83xx_free_mbx_intr(adapter); 3731 qlcnic_83xx_free_mbx_intr(adapter);
3428 qlcnic_83xx_enable_mbx_poll(adapter); 3732 qlcnic_83xx_enable_mbx_poll(adapter);
3429 } 3733 }
3430 3734
3735 netif_set_real_num_tx_queues(netdev, adapter->max_drv_tx_rings);
3736
3431 qlcnic_teardown_intr(adapter); 3737 qlcnic_teardown_intr(adapter);
3432 err = qlcnic_setup_intr(adapter, data); 3738
3739 err = qlcnic_setup_intr(adapter, data, txq);
3433 if (err) { 3740 if (err) {
3434 kfree(adapter->msix_entries); 3741 kfree(adapter->msix_entries);
3435 netdev_err(netdev, "failed to setup interrupt\n"); 3742 netdev_err(netdev, "failed to setup interrupt\n");
@@ -3457,8 +3764,7 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
3457 goto done; 3764 goto done;
3458 qlcnic_restore_indev_addr(netdev, NETDEV_UP); 3765 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3459 } 3766 }
3460 err = len; 3767done:
3461 done:
3462 netif_device_attach(netdev); 3768 netif_device_attach(netdev);
3463 clear_bit(__QLCNIC_RESETTING, &adapter->state); 3769 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3464 return err; 3770 return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 79e54efe07b9..15513608d480 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1082,14 +1082,17 @@ flash_temp:
1082 } 1082 }
1083 1083
1084 tmpl_hdr = ahw->fw_dump.tmpl_hdr; 1084 tmpl_hdr = ahw->fw_dump.tmpl_hdr;
1085 tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; 1085 tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask;
1086 dev_info(&adapter->pdev->dev,
1087 "Default minidump capture mask 0x%x\n",
1088 tmpl_hdr->cap_mask);
1086 1089
1087 if ((tmpl_hdr->version & 0xfffff) >= 0x20001) 1090 if ((tmpl_hdr->version & 0xfffff) >= 0x20001)
1088 ahw->fw_dump.use_pex_dma = true; 1091 ahw->fw_dump.use_pex_dma = true;
1089 else 1092 else
1090 ahw->fw_dump.use_pex_dma = false; 1093 ahw->fw_dump.use_pex_dma = false;
1091 1094
1092 ahw->fw_dump.enable = 1; 1095 qlcnic_enable_fw_dump_state(adapter);
1093 1096
1094 return 0; 1097 return 0;
1095} 1098}
@@ -1112,7 +1115,11 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1112 1115
1113 ahw = adapter->ahw; 1116 ahw = adapter->ahw;
1114 1117
1115 if (!fw_dump->enable) { 1118 /* Return if we don't have firmware dump template header */
1119 if (!tmpl_hdr)
1120 return -EIO;
1121
1122 if (!qlcnic_check_fw_dump_state(adapter)) {
1116 dev_info(&adapter->pdev->dev, "Dump not enabled\n"); 1123 dev_info(&adapter->pdev->dev, "Dump not enabled\n");
1117 return -EIO; 1124 return -EIO;
1118 } 1125 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 5d40045b3cea..652cc13c5023 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -33,7 +33,7 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
33static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); 33static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
34static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *); 34static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
35static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *); 35static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
36static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *, 36static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
37 struct qlcnic_cmd_args *); 37 struct qlcnic_cmd_args *);
38static void qlcnic_sriov_process_bc_cmd(struct work_struct *); 38static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
39 39
@@ -45,7 +45,7 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
45 .get_mac_address = qlcnic_83xx_get_mac_address, 45 .get_mac_address = qlcnic_83xx_get_mac_address,
46 .setup_intr = qlcnic_83xx_setup_intr, 46 .setup_intr = qlcnic_83xx_setup_intr,
47 .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, 47 .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
48 .mbx_cmd = qlcnic_sriov_vf_mbx_op, 48 .mbx_cmd = qlcnic_sriov_issue_cmd,
49 .get_func_no = qlcnic_83xx_get_func_no, 49 .get_func_no = qlcnic_83xx_get_func_no,
50 .api_lock = qlcnic_83xx_cam_lock, 50 .api_lock = qlcnic_83xx_cam_lock,
51 .api_unlock = qlcnic_83xx_cam_unlock, 51 .api_unlock = qlcnic_83xx_cam_unlock,
@@ -286,96 +286,38 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
286static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, 286static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
287 u32 *pay, u8 pci_func, u8 size) 287 u32 *pay, u8 pci_func, u8 size)
288{ 288{
289 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0;
290 struct qlcnic_hardware_context *ahw = adapter->ahw; 289 struct qlcnic_hardware_context *ahw = adapter->ahw;
291 unsigned long flags; 290 struct qlcnic_mailbox *mbx = ahw->mailbox;
292 u16 opcode; 291 struct qlcnic_cmd_args cmd;
293 u8 mbx_err_code; 292 unsigned long timeout;
294 int i, j; 293 int err;
295
296 opcode = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
297
298 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
299 dev_info(&adapter->pdev->dev,
300 "Mailbox cmd attempted, 0x%x\n", opcode);
301 dev_info(&adapter->pdev->dev, "Mailbox detached\n");
302 return 0;
303 }
304
305 spin_lock_irqsave(&ahw->mbx_lock, flags);
306
307 mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
308 if (mbx_val) {
309 QLCDB(adapter, DRV, "Mailbox cmd attempted, 0x%x\n", opcode);
310 spin_unlock_irqrestore(&ahw->mbx_lock, flags);
311 return QLCNIC_RCODE_TIMEOUT;
312 }
313 /* Fill in mailbox registers */
314 val = size + (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
315 mbx_cmd = 0x31 | (val << 16) | (adapter->ahw->fw_hal_version << 29);
316
317 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
318 mbx_cmd = 0x1 | (1 << 4);
319 294
320 if (qlcnic_sriov_pf_check(adapter)) 295 memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
321 mbx_cmd |= (pci_func << 5); 296 cmd.hdr = hdr;
297 cmd.pay = pay;
298 cmd.pay_size = size;
299 cmd.func_num = pci_func;
300 cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
301 cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
322 302
323 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1)); 303 err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
324 for (i = 2, j = 0; j < (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); 304 if (err) {
325 i++, j++) { 305 dev_err(&adapter->pdev->dev,
326 writel(*(hdr++), QLCNIC_MBX_HOST(ahw, i)); 306 "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
307 __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
308 ahw->op_mode);
309 return err;
327 } 310 }
328 for (j = 0; j < size; j++, i++)
329 writel(*(pay++), QLCNIC_MBX_HOST(ahw, i));
330 311
331 /* Signal FW about the impending command */ 312 if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
332 QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); 313 dev_err(&adapter->pdev->dev,
333 314 "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
334 /* Waiting for the mailbox cmd to complete and while waiting here 315 __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
335 * some AEN might arrive. If more than 5 seconds expire we can 316 ahw->op_mode);
336 * assume something is wrong. 317 flush_workqueue(mbx->work_q);
337 */
338poll:
339 rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
340 if (rsp != QLCNIC_RCODE_TIMEOUT) {
341 /* Get the FW response data */
342 fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
343 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
344 __qlcnic_83xx_process_aen(adapter);
345 goto poll;
346 }
347 mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
348 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
349 opcode = QLCNIC_MBX_RSP(fw_data);
350
351 switch (mbx_err_code) {
352 case QLCNIC_MBX_RSP_OK:
353 case QLCNIC_MBX_PORT_RSP_OK:
354 rsp = QLCNIC_RCODE_SUCCESS;
355 break;
356 default:
357 if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
358 rsp = qlcnic_83xx_mac_rcode(adapter);
359 if (!rsp)
360 goto out;
361 }
362 dev_err(&adapter->pdev->dev,
363 "MBX command 0x%x failed with err:0x%x\n",
364 opcode, mbx_err_code);
365 rsp = mbx_err_code;
366 break;
367 }
368 goto out;
369 } 318 }
370 319
371 dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n", 320 return cmd.rsp_opcode;
372 QLCNIC_MBX_RSP(mbx_cmd));
373 rsp = QLCNIC_RCODE_TIMEOUT;
374out:
375 /* clear fw mbx control register */
376 QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
377 spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
378 return rsp;
379} 321}
380 322
381static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter) 323static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
@@ -458,7 +400,7 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
458static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter, 400static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
459 struct qlcnic_cmd_args *cmd) 401 struct qlcnic_cmd_args *cmd)
460{ 402{
461 adapter->rx_pvid = (cmd->rsp.arg[1] >> 16) & 0xffff; 403 adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
462 adapter->flags &= ~QLCNIC_TAGGING_ENABLED; 404 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
463 return 0; 405 return 0;
464} 406}
@@ -490,11 +432,12 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
490 return 0; 432 return 0;
491} 433}
492 434
493static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter) 435static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter,
436 struct qlcnic_info *info)
494{ 437{
495 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 438 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
496 struct qlcnic_cmd_args cmd; 439 struct qlcnic_cmd_args cmd;
497 int ret; 440 int ret = 0;
498 441
499 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL); 442 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
500 if (ret) 443 if (ret)
@@ -522,8 +465,8 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
522 465
523static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter) 466static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
524{ 467{
525 struct qlcnic_info nic_info;
526 struct qlcnic_hardware_context *ahw = adapter->ahw; 468 struct qlcnic_hardware_context *ahw = adapter->ahw;
469 struct qlcnic_info nic_info;
527 int err; 470 int err;
528 471
529 err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0); 472 err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
@@ -534,7 +477,7 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
534 if (err) 477 if (err)
535 return -EIO; 478 return -EIO;
536 479
537 err = qlcnic_sriov_get_vf_acl(adapter); 480 err = qlcnic_sriov_get_vf_acl(adapter, &nic_info);
538 if (err) 481 if (err)
539 return err; 482 return err;
540 483
@@ -564,7 +507,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
564 dev_warn(&adapter->pdev->dev, 507 dev_warn(&adapter->pdev->dev,
565 "Device does not support MSI interrupts\n"); 508 "Device does not support MSI interrupts\n");
566 509
567 err = qlcnic_setup_intr(adapter, 1); 510 err = qlcnic_setup_intr(adapter, 1, 0);
568 if (err) { 511 if (err) {
569 dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); 512 dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
570 goto err_out_disable_msi; 513 goto err_out_disable_msi;
@@ -590,6 +533,9 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
590 if (err) 533 if (err)
591 goto err_out_send_channel_term; 534 goto err_out_send_channel_term;
592 535
536 if (adapter->dcb && qlcnic_dcb_attach(adapter))
537 qlcnic_clear_dcb_ops(adapter);
538
593 err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac); 539 err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
594 if (err) 540 if (err)
595 goto err_out_send_channel_term; 541 goto err_out_send_channel_term;
@@ -597,6 +543,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
597 pci_set_drvdata(adapter->pdev, adapter); 543 pci_set_drvdata(adapter->pdev, adapter);
598 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", 544 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
599 adapter->netdev->name); 545 adapter->netdev->name);
546
600 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, 547 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
601 adapter->ahw->idc.delay); 548 adapter->ahw->idc.delay);
602 return 0; 549 return 0;
@@ -637,8 +584,6 @@ int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
637 struct qlcnic_hardware_context *ahw = adapter->ahw; 584 struct qlcnic_hardware_context *ahw = adapter->ahw;
638 int err; 585 int err;
639 586
640 spin_lock_init(&ahw->mbx_lock);
641 set_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
642 set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status); 587 set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
643 ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; 588 ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
644 ahw->reset_context = 0; 589 ahw->reset_context = 0;
@@ -1085,6 +1030,7 @@ static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
1085 if (test_bit(QLC_BC_VF_FLR, &vf->state)) 1030 if (test_bit(QLC_BC_VF_FLR, &vf->state))
1086 return; 1031 return;
1087 1032
1033 memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1088 trans = list_first_entry(&vf->rcv_act.wait_list, 1034 trans = list_first_entry(&vf->rcv_act.wait_list,
1089 struct qlcnic_bc_trans, list); 1035 struct qlcnic_bc_trans, list);
1090 adapter = vf->adapter; 1036 adapter = vf->adapter;
@@ -1234,6 +1180,7 @@ static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
1234 return; 1180 return;
1235 } 1181 }
1236 1182
1183 memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1237 cmd_op = hdr->cmd_op; 1184 cmd_op = hdr->cmd_op;
1238 if (qlcnic_sriov_alloc_bc_trans(&trans)) 1185 if (qlcnic_sriov_alloc_bc_trans(&trans))
1239 return; 1186 return;
@@ -1359,7 +1306,7 @@ int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
1359 if (enable) 1306 if (enable)
1360 cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7); 1307 cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
1361 1308
1362 err = qlcnic_83xx_mbx_op(adapter, &cmd); 1309 err = qlcnic_83xx_issue_cmd(adapter, &cmd);
1363 1310
1364 if (err != QLCNIC_RCODE_SUCCESS) { 1311 if (err != QLCNIC_RCODE_SUCCESS) {
1365 dev_err(&adapter->pdev->dev, 1312 dev_err(&adapter->pdev->dev,
@@ -1391,10 +1338,11 @@ static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
1391 return -EIO; 1338 return -EIO;
1392} 1339}
1393 1340
1394static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter, 1341static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1395 struct qlcnic_cmd_args *cmd) 1342 struct qlcnic_cmd_args *cmd)
1396{ 1343{
1397 struct qlcnic_hardware_context *ahw = adapter->ahw; 1344 struct qlcnic_hardware_context *ahw = adapter->ahw;
1345 struct qlcnic_mailbox *mbx = ahw->mailbox;
1398 struct device *dev = &adapter->pdev->dev; 1346 struct device *dev = &adapter->pdev->dev;
1399 struct qlcnic_bc_trans *trans; 1347 struct qlcnic_bc_trans *trans;
1400 int err; 1348 int err;
@@ -1411,7 +1359,7 @@ static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter,
1411 goto cleanup_transaction; 1359 goto cleanup_transaction;
1412 1360
1413retry: 1361retry:
1414 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { 1362 if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
1415 rsp = -EIO; 1363 rsp = -EIO;
1416 QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n", 1364 QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
1417 QLCNIC_MBX_RSP(cmd->req.arg[0]), func); 1365 QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
@@ -1454,7 +1402,7 @@ err_out:
1454 if (rsp == QLCNIC_RCODE_TIMEOUT) { 1402 if (rsp == QLCNIC_RCODE_TIMEOUT) {
1455 ahw->reset_context = 1; 1403 ahw->reset_context = 1;
1456 adapter->need_fw_reset = 1; 1404 adapter->need_fw_reset = 1;
1457 clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status); 1405 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1458 } 1406 }
1459 1407
1460cleanup_transaction: 1408cleanup_transaction:
@@ -1613,8 +1561,8 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
1613{ 1561{
1614 int err; 1562 int err;
1615 1563
1616 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); 1564 qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
1617 qlcnic_83xx_enable_mbx_intrpt(adapter); 1565 qlcnic_83xx_enable_mbx_interrupt(adapter);
1618 1566
1619 err = qlcnic_sriov_cfg_bc_intr(adapter, 1); 1567 err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
1620 if (err) 1568 if (err)
@@ -1628,6 +1576,8 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
1628 if (err) 1576 if (err)
1629 goto err_out_term_channel; 1577 goto err_out_term_channel;
1630 1578
1579 qlcnic_dcb_get_info(adapter);
1580
1631 return 0; 1581 return 0;
1632 1582
1633err_out_term_channel: 1583err_out_term_channel:
@@ -1657,8 +1607,10 @@ static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
1657 struct net_device *netdev = adapter->netdev; 1607 struct net_device *netdev = adapter->netdev;
1658 u8 i, max_ints = ahw->num_msix - 1; 1608 u8 i, max_ints = ahw->num_msix - 1;
1659 1609
1660 qlcnic_83xx_disable_mbx_intr(adapter);
1661 netif_device_detach(netdev); 1610 netif_device_detach(netdev);
1611 qlcnic_83xx_detach_mailbox_work(adapter);
1612 qlcnic_83xx_disable_mbx_intr(adapter);
1613
1662 if (netif_running(netdev)) 1614 if (netif_running(netdev))
1663 qlcnic_down(adapter, netdev); 1615 qlcnic_down(adapter, netdev);
1664 1616
@@ -1702,6 +1654,7 @@ static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
1702static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) 1654static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
1703{ 1655{
1704 struct qlcnic_hardware_context *ahw = adapter->ahw; 1656 struct qlcnic_hardware_context *ahw = adapter->ahw;
1657 struct qlcnic_mailbox *mbx = ahw->mailbox;
1705 struct device *dev = &adapter->pdev->dev; 1658 struct device *dev = &adapter->pdev->dev;
1706 struct qlc_83xx_idc *idc = &ahw->idc; 1659 struct qlc_83xx_idc *idc = &ahw->idc;
1707 u8 func = ahw->pci_func; 1660 u8 func = ahw->pci_func;
@@ -1712,7 +1665,7 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
1712 /* Skip the context reset and check if FW is hung */ 1665 /* Skip the context reset and check if FW is hung */
1713 if (adapter->reset_ctx_cnt < 3) { 1666 if (adapter->reset_ctx_cnt < 3) {
1714 adapter->need_fw_reset = 1; 1667 adapter->need_fw_reset = 1;
1715 clear_bit(QLC_83XX_MBX_READY, &idc->status); 1668 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1716 dev_info(dev, 1669 dev_info(dev,
1717 "Resetting context, wait here to check if FW is in failed state\n"); 1670 "Resetting context, wait here to check if FW is in failed state\n");
1718 return 0; 1671 return 0;
@@ -1737,7 +1690,7 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
1737 __func__, adapter->reset_ctx_cnt, func); 1690 __func__, adapter->reset_ctx_cnt, func);
1738 set_bit(__QLCNIC_RESETTING, &adapter->state); 1691 set_bit(__QLCNIC_RESETTING, &adapter->state);
1739 adapter->need_fw_reset = 1; 1692 adapter->need_fw_reset = 1;
1740 clear_bit(QLC_83XX_MBX_READY, &idc->status); 1693 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1741 qlcnic_sriov_vf_detach(adapter); 1694 qlcnic_sriov_vf_detach(adapter);
1742 adapter->need_fw_reset = 0; 1695 adapter->need_fw_reset = 0;
1743 1696
@@ -1787,6 +1740,7 @@ static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
1787static int 1740static int
1788qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter) 1741qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
1789{ 1742{
1743 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1790 struct qlc_83xx_idc *idc = &adapter->ahw->idc; 1744 struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1791 1745
1792 dev_info(&adapter->pdev->dev, "Device is in quiescent state\n"); 1746 dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
@@ -1794,7 +1748,7 @@ qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
1794 set_bit(__QLCNIC_RESETTING, &adapter->state); 1748 set_bit(__QLCNIC_RESETTING, &adapter->state);
1795 adapter->tx_timeo_cnt = 0; 1749 adapter->tx_timeo_cnt = 0;
1796 adapter->reset_ctx_cnt = 0; 1750 adapter->reset_ctx_cnt = 0;
1797 clear_bit(QLC_83XX_MBX_READY, &idc->status); 1751 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1798 qlcnic_sriov_vf_detach(adapter); 1752 qlcnic_sriov_vf_detach(adapter);
1799 } 1753 }
1800 1754
@@ -1803,6 +1757,7 @@ qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
1803 1757
1804static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter) 1758static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
1805{ 1759{
1760 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1806 struct qlc_83xx_idc *idc = &adapter->ahw->idc; 1761 struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1807 u8 func = adapter->ahw->pci_func; 1762 u8 func = adapter->ahw->pci_func;
1808 1763
@@ -1812,7 +1767,7 @@ static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
1812 set_bit(__QLCNIC_RESETTING, &adapter->state); 1767 set_bit(__QLCNIC_RESETTING, &adapter->state);
1813 adapter->tx_timeo_cnt = 0; 1768 adapter->tx_timeo_cnt = 0;
1814 adapter->reset_ctx_cnt = 0; 1769 adapter->reset_ctx_cnt = 0;
1815 clear_bit(QLC_83XX_MBX_READY, &idc->status); 1770 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1816 qlcnic_sriov_vf_detach(adapter); 1771 qlcnic_sriov_vf_detach(adapter);
1817 } 1772 }
1818 return 0; 1773 return 0;
@@ -1990,7 +1945,7 @@ int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
1990 int err; 1945 int err;
1991 1946
1992 set_bit(QLC_83XX_MODULE_LOADED, &idc->status); 1947 set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1993 qlcnic_83xx_enable_mbx_intrpt(adapter); 1948 qlcnic_83xx_enable_mbx_interrupt(adapter);
1994 err = qlcnic_sriov_cfg_bc_intr(adapter, 1); 1949 err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
1995 if (err) 1950 if (err)
1996 return err; 1951 return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index eb49cd65378c..330d9a8774ad 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -1183,10 +1183,19 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
1183 struct qlcnic_vf_info *vf = trans->vf; 1183 struct qlcnic_vf_info *vf = trans->vf;
1184 struct qlcnic_vport *vp = vf->vp; 1184 struct qlcnic_vport *vp = vf->vp;
1185 u8 cmd_op, mode = vp->vlan_mode; 1185 u8 cmd_op, mode = vp->vlan_mode;
1186 struct qlcnic_adapter *adapter;
1187
1188 adapter = vf->adapter;
1186 1189
1187 cmd_op = trans->req_hdr->cmd_op; 1190 cmd_op = trans->req_hdr->cmd_op;
1188 cmd->rsp.arg[0] |= 1 << 25; 1191 cmd->rsp.arg[0] |= 1 << 25;
1189 1192
1193 /* For 84xx adapter in case of PVID , PFD should send vlan mode as
1194 * QLC_NO_VLAN_MODE to VFD which is zero in mailbox response
1195 */
1196 if (qlcnic_84xx_check(adapter) && mode == QLC_PVID_MODE)
1197 return 0;
1198
1190 switch (mode) { 1199 switch (mode) {
1191 case QLC_GUEST_VLAN_MODE: 1200 case QLC_GUEST_VLAN_MODE:
1192 cmd->rsp.arg[1] = mode | 1 << 8; 1201 cmd->rsp.arg[1] = mode | 1 << 8;
@@ -1284,6 +1293,10 @@ static const int qlcnic_pf_passthru_supp_cmds[] = {
1284 QLCNIC_CMD_GET_STATISTICS, 1293 QLCNIC_CMD_GET_STATISTICS,
1285 QLCNIC_CMD_GET_PORT_CONFIG, 1294 QLCNIC_CMD_GET_PORT_CONFIG,
1286 QLCNIC_CMD_GET_LINK_STATUS, 1295 QLCNIC_CMD_GET_LINK_STATUS,
1296 QLCNIC_CMD_DCB_QUERY_CAP,
1297 QLCNIC_CMD_DCB_QUERY_PARAM,
1298 QLCNIC_CMD_INIT_NIC_FUNC,
1299 QLCNIC_CMD_STOP_NIC_FUNC,
1287}; 1300};
1288 1301
1289static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = { 1302static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = {
@@ -1639,14 +1652,14 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1639 if (!is_valid_ether_addr(mac) || vf >= num_vfs) 1652 if (!is_valid_ether_addr(mac) || vf >= num_vfs)
1640 return -EINVAL; 1653 return -EINVAL;
1641 1654
1642 if (!compare_ether_addr(adapter->mac_addr, mac)) { 1655 if (ether_addr_equal(adapter->mac_addr, mac)) {
1643 netdev_err(netdev, "MAC address is already in use by the PF\n"); 1656 netdev_err(netdev, "MAC address is already in use by the PF\n");
1644 return -EINVAL; 1657 return -EINVAL;
1645 } 1658 }
1646 1659
1647 for (i = 0; i < num_vfs; i++) { 1660 for (i = 0; i < num_vfs; i++) {
1648 vf_info = &sriov->vf_info[i]; 1661 vf_info = &sriov->vf_info[i];
1649 if (!compare_ether_addr(vf_info->vp->mac, mac)) { 1662 if (ether_addr_equal(vf_info->vp->mac, mac)) {
1650 netdev_err(netdev, 1663 netdev_err(netdev,
1651 "MAC address is already in use by VF %d\n", 1664 "MAC address is already in use by VF %d\n",
1652 i); 1665 i);
@@ -1768,8 +1781,8 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
1768 return 0; 1781 return 0;
1769} 1782}
1770 1783
1771static inline __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter, 1784static __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
1772 struct qlcnic_vport *vp, int vf) 1785 struct qlcnic_vport *vp, int vf)
1773{ 1786{
1774 __u32 vlan = 0; 1787 __u32 vlan = 0;
1775 1788
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 660c3f5b2237..c6165d05cc13 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -465,8 +465,14 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
465 memset(&pm_cfg, 0, 465 memset(&pm_cfg, 0,
466 sizeof(struct qlcnic_pm_func_cfg) * QLCNIC_MAX_PCI_FUNC); 466 sizeof(struct qlcnic_pm_func_cfg) * QLCNIC_MAX_PCI_FUNC);
467 467
468 for (i = 0; i < adapter->ahw->act_pci_func; i++) { 468 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
469 pci_func = adapter->npars[i].pci_func; 469 pci_func = adapter->npars[i].pci_func;
470 if (!adapter->npars[i].active)
471 continue;
472
473 if (!adapter->npars[i].eswitch_status)
474 continue;
475
470 pm_cfg[pci_func].action = adapter->npars[i].enable_pm; 476 pm_cfg[pci_func].action = adapter->npars[i].enable_pm;
471 pm_cfg[pci_func].dest_npar = 0; 477 pm_cfg[pci_func].dest_npar = 0;
472 pm_cfg[pci_func].pci_func = i; 478 pm_cfg[pci_func].pci_func = i;
@@ -632,8 +638,14 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
632 memset(&esw_cfg, 0, 638 memset(&esw_cfg, 0,
633 sizeof(struct qlcnic_esw_func_cfg) * QLCNIC_MAX_PCI_FUNC); 639 sizeof(struct qlcnic_esw_func_cfg) * QLCNIC_MAX_PCI_FUNC);
634 640
635 for (i = 0; i < adapter->ahw->act_pci_func; i++) { 641 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
636 pci_func = adapter->npars[i].pci_func; 642 pci_func = adapter->npars[i].pci_func;
643 if (!adapter->npars[i].active)
644 continue;
645
646 if (!adapter->npars[i].eswitch_status)
647 continue;
648
637 esw_cfg[pci_func].pci_func = pci_func; 649 esw_cfg[pci_func].pci_func = pci_func;
638 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func])) 650 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
639 return QL_STATUS_INVALID_PARAM; 651 return QL_STATUS_INVALID_PARAM;
@@ -732,6 +744,9 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
732 if (ret) 744 if (ret)
733 return ret; 745 return ret;
734 746
747 if (!adapter->npars[i].eswitch_status)
748 continue;
749
735 np_cfg[i].pci_func = i; 750 np_cfg[i].pci_func = i;
736 np_cfg[i].op_mode = (u8)nic_info.op_mode; 751 np_cfg[i].op_mode = (u8)nic_info.op_mode;
737 np_cfg[i].port_num = nic_info.phys_port; 752 np_cfg[i].port_num = nic_info.phys_port;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 7e8d68263963..899433778466 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -2149,7 +2149,7 @@ struct ql_adapter {
2149 struct timer_list timer; 2149 struct timer_list timer;
2150 atomic_t lb_count; 2150 atomic_t lb_count;
2151 /* Keep local copy of current mac address. */ 2151 /* Keep local copy of current mac address. */
2152 char current_mac_addr[6]; 2152 char current_mac_addr[ETH_ALEN];
2153}; 2153};
2154 2154
2155/* 2155/*
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 85e5c97191dd..6f87f2cde647 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1897,12 +1897,13 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1897 void *p) 1897 void *p)
1898{ 1898{
1899 struct rtl8169_private *tp = netdev_priv(dev); 1899 struct rtl8169_private *tp = netdev_priv(dev);
1900 1900 u32 __iomem *data = tp->mmio_addr;
1901 if (regs->len > R8169_REGS_SIZE) 1901 u32 *dw = p;
1902 regs->len = R8169_REGS_SIZE; 1902 int i;
1903 1903
1904 rtl_lock_work(tp); 1904 rtl_lock_work(tp);
1905 memcpy_fromio(p, tp->mmio_addr, regs->len); 1905 for (i = 0; i < R8169_REGS_SIZE; i += 4)
1906 memcpy_fromio(dw++, data++, 4);
1906 rtl_unlock_work(tp); 1907 rtl_unlock_work(tp);
1907} 1908}
1908 1909
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 19a8a045e077..a30c4395b232 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -13,4 +13,4 @@ config SH_ETH
13 Renesas SuperH Ethernet device driver. 13 Renesas SuperH Ethernet device driver.
14 This driver supporting CPUs are: 14 This driver supporting CPUs are:
15 - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757, 15 - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
16 R8A7740 and R8A7779. 16 R8A7740, R8A777x and R8A7790.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a753928bab9c..5cd831ebfa83 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -189,6 +189,7 @@ static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
189 [RMCR] = 0x0258, 189 [RMCR] = 0x0258,
190 [TFUCR] = 0x0264, 190 [TFUCR] = 0x0264,
191 [RFOCR] = 0x0268, 191 [RFOCR] = 0x0268,
192 [RMIIMODE] = 0x026c,
192 [FCFTR] = 0x0270, 193 [FCFTR] = 0x0270,
193 [TRIMD] = 0x027c, 194 [TRIMD] = 0x027c,
194}; 195};
@@ -377,6 +378,8 @@ static struct sh_eth_cpu_data r8a777x_data = {
377 .set_duplex = sh_eth_set_duplex, 378 .set_duplex = sh_eth_set_duplex,
378 .set_rate = sh_eth_set_rate_r8a777x, 379 .set_rate = sh_eth_set_rate_r8a777x,
379 380
381 .register_type = SH_ETH_REG_FAST_RCAR,
382
380 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 383 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
381 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 384 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
382 .eesipr_value = 0x01ff009f, 385 .eesipr_value = 0x01ff009f,
@@ -392,6 +395,30 @@ static struct sh_eth_cpu_data r8a777x_data = {
392 .hw_swap = 1, 395 .hw_swap = 1,
393}; 396};
394 397
398/* R8A7790 */
399static struct sh_eth_cpu_data r8a7790_data = {
400 .set_duplex = sh_eth_set_duplex,
401 .set_rate = sh_eth_set_rate_r8a777x,
402
403 .register_type = SH_ETH_REG_FAST_RCAR,
404
405 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
406 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
407 .eesipr_value = 0x01ff009f,
408
409 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
410 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
411 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
412 EESR_ECI,
413
414 .apr = 1,
415 .mpr = 1,
416 .tpauser = 1,
417 .hw_swap = 1,
418 .rmiimode = 1,
419 .shift_rd0 = 1,
420};
421
395static void sh_eth_set_rate_sh7724(struct net_device *ndev) 422static void sh_eth_set_rate_sh7724(struct net_device *ndev)
396{ 423{
397 struct sh_eth_private *mdp = netdev_priv(ndev); 424 struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -413,6 +440,8 @@ static struct sh_eth_cpu_data sh7724_data = {
413 .set_duplex = sh_eth_set_duplex, 440 .set_duplex = sh_eth_set_duplex,
414 .set_rate = sh_eth_set_rate_sh7724, 441 .set_rate = sh_eth_set_rate_sh7724,
415 442
443 .register_type = SH_ETH_REG_FAST_SH4,
444
416 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 445 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
417 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 446 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
418 .eesipr_value = 0x01ff009f, 447 .eesipr_value = 0x01ff009f,
@@ -451,6 +480,8 @@ static struct sh_eth_cpu_data sh7757_data = {
451 .set_duplex = sh_eth_set_duplex, 480 .set_duplex = sh_eth_set_duplex,
452 .set_rate = sh_eth_set_rate_sh7757, 481 .set_rate = sh_eth_set_rate_sh7757,
453 482
483 .register_type = SH_ETH_REG_FAST_SH4,
484
454 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 485 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
455 .rmcr_value = 0x00000001, 486 .rmcr_value = 0x00000001,
456 487
@@ -519,6 +550,8 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
519 .set_duplex = sh_eth_set_duplex, 550 .set_duplex = sh_eth_set_duplex,
520 .set_rate = sh_eth_set_rate_giga, 551 .set_rate = sh_eth_set_rate_giga,
521 552
553 .register_type = SH_ETH_REG_GIGABIT,
554
522 .ecsr_value = ECSR_ICD | ECSR_MPD, 555 .ecsr_value = ECSR_ICD | ECSR_MPD,
523 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 556 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
524 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 557 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -577,6 +610,8 @@ static struct sh_eth_cpu_data sh7734_data = {
577 .set_duplex = sh_eth_set_duplex, 610 .set_duplex = sh_eth_set_duplex,
578 .set_rate = sh_eth_set_rate_gether, 611 .set_rate = sh_eth_set_rate_gether,
579 612
613 .register_type = SH_ETH_REG_GIGABIT,
614
580 .ecsr_value = ECSR_ICD | ECSR_MPD, 615 .ecsr_value = ECSR_ICD | ECSR_MPD,
581 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 616 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
582 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 617 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -604,6 +639,8 @@ static struct sh_eth_cpu_data sh7763_data = {
604 .set_duplex = sh_eth_set_duplex, 639 .set_duplex = sh_eth_set_duplex,
605 .set_rate = sh_eth_set_rate_gether, 640 .set_rate = sh_eth_set_rate_gether,
606 641
642 .register_type = SH_ETH_REG_GIGABIT,
643
607 .ecsr_value = ECSR_ICD | ECSR_MPD, 644 .ecsr_value = ECSR_ICD | ECSR_MPD,
608 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 645 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
609 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 646 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -641,6 +678,8 @@ static struct sh_eth_cpu_data r8a7740_data = {
641 .set_duplex = sh_eth_set_duplex, 678 .set_duplex = sh_eth_set_duplex,
642 .set_rate = sh_eth_set_rate_gether, 679 .set_rate = sh_eth_set_rate_gether,
643 680
681 .register_type = SH_ETH_REG_GIGABIT,
682
644 .ecsr_value = ECSR_ICD | ECSR_MPD, 683 .ecsr_value = ECSR_ICD | ECSR_MPD,
645 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 684 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
646 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 685 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -663,6 +702,8 @@ static struct sh_eth_cpu_data r8a7740_data = {
663}; 702};
664 703
665static struct sh_eth_cpu_data sh7619_data = { 704static struct sh_eth_cpu_data sh7619_data = {
705 .register_type = SH_ETH_REG_FAST_SH3_SH2,
706
666 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 707 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
667 708
668 .apr = 1, 709 .apr = 1,
@@ -672,6 +713,8 @@ static struct sh_eth_cpu_data sh7619_data = {
672}; 713};
673 714
674static struct sh_eth_cpu_data sh771x_data = { 715static struct sh_eth_cpu_data sh771x_data = {
716 .register_type = SH_ETH_REG_FAST_SH3_SH2,
717
675 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 718 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
676 .tsu = 1, 719 .tsu = 1,
677}; 720};
@@ -1124,6 +1167,9 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1124 if (ret) 1167 if (ret)
1125 goto out; 1168 goto out;
1126 1169
1170 if (mdp->cd->rmiimode)
1171 sh_eth_write(ndev, 0x1, RMIIMODE);
1172
1127 /* Descriptor format */ 1173 /* Descriptor format */
1128 sh_eth_ring_format(ndev); 1174 sh_eth_ring_format(ndev);
1129 if (mdp->cd->rpadir) 1175 if (mdp->cd->rpadir)
@@ -1297,9 +1343,12 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1297 mdp->rx_skbuff[entry] = NULL; 1343 mdp->rx_skbuff[entry] = NULL;
1298 if (mdp->cd->rpadir) 1344 if (mdp->cd->rpadir)
1299 skb_reserve(skb, NET_IP_ALIGN); 1345 skb_reserve(skb, NET_IP_ALIGN);
1346 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
1347 mdp->rx_buf_sz,
1348 DMA_FROM_DEVICE);
1300 skb_put(skb, pkt_len); 1349 skb_put(skb, pkt_len);
1301 skb->protocol = eth_type_trans(skb, ndev); 1350 skb->protocol = eth_type_trans(skb, ndev);
1302 netif_rx(skb); 1351 netif_receive_skb(skb);
1303 ndev->stats.rx_packets++; 1352 ndev->stats.rx_packets++;
1304 ndev->stats.rx_bytes += pkt_len; 1353 ndev->stats.rx_bytes += pkt_len;
1305 } 1354 }
@@ -1857,11 +1906,13 @@ static int sh_eth_open(struct net_device *ndev)
1857 1906
1858 pm_runtime_get_sync(&mdp->pdev->dev); 1907 pm_runtime_get_sync(&mdp->pdev->dev);
1859 1908
1909 napi_enable(&mdp->napi);
1910
1860 ret = request_irq(ndev->irq, sh_eth_interrupt, 1911 ret = request_irq(ndev->irq, sh_eth_interrupt,
1861 mdp->cd->irq_flags, ndev->name, ndev); 1912 mdp->cd->irq_flags, ndev->name, ndev);
1862 if (ret) { 1913 if (ret) {
1863 dev_err(&ndev->dev, "Can not assign IRQ number\n"); 1914 dev_err(&ndev->dev, "Can not assign IRQ number\n");
1864 return ret; 1915 goto out_napi_off;
1865 } 1916 }
1866 1917
1867 /* Descriptor set */ 1918 /* Descriptor set */
@@ -1879,12 +1930,12 @@ static int sh_eth_open(struct net_device *ndev)
1879 if (ret) 1930 if (ret)
1880 goto out_free_irq; 1931 goto out_free_irq;
1881 1932
1882 napi_enable(&mdp->napi);
1883
1884 return ret; 1933 return ret;
1885 1934
1886out_free_irq: 1935out_free_irq:
1887 free_irq(ndev->irq, ndev); 1936 free_irq(ndev->irq, ndev);
1937out_napi_off:
1938 napi_disable(&mdp->napi);
1888 pm_runtime_put_sync(&mdp->pdev->dev); 1939 pm_runtime_put_sync(&mdp->pdev->dev);
1889 return ret; 1940 return ret;
1890} 1941}
@@ -1976,8 +2027,6 @@ static int sh_eth_close(struct net_device *ndev)
1976{ 2027{
1977 struct sh_eth_private *mdp = netdev_priv(ndev); 2028 struct sh_eth_private *mdp = netdev_priv(ndev);
1978 2029
1979 napi_disable(&mdp->napi);
1980
1981 netif_stop_queue(ndev); 2030 netif_stop_queue(ndev);
1982 2031
1983 /* Disable interrupts by clearing the interrupt mask. */ 2032 /* Disable interrupts by clearing the interrupt mask. */
@@ -1995,6 +2044,8 @@ static int sh_eth_close(struct net_device *ndev)
1995 2044
1996 free_irq(ndev->irq, ndev); 2045 free_irq(ndev->irq, ndev);
1997 2046
2047 napi_disable(&mdp->napi);
2048
1998 /* Free all the skbuffs in the Rx queue. */ 2049 /* Free all the skbuffs in the Rx queue. */
1999 sh_eth_ring_free(ndev); 2050 sh_eth_ring_free(ndev);
2000 2051
@@ -2561,7 +2612,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2561 struct resource *res; 2612 struct resource *res;
2562 struct net_device *ndev = NULL; 2613 struct net_device *ndev = NULL;
2563 struct sh_eth_private *mdp = NULL; 2614 struct sh_eth_private *mdp = NULL;
2564 struct sh_eth_plat_data *pd = pdev->dev.platform_data; 2615 struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
2565 const struct platform_device_id *id = platform_get_device_id(pdev); 2616 const struct platform_device_id *id = platform_get_device_id(pdev);
2566 2617
2567 /* get base addr */ 2618 /* get base addr */
@@ -2594,9 +2645,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2594 2645
2595 SET_NETDEV_DEV(ndev, &pdev->dev); 2646 SET_NETDEV_DEV(ndev, &pdev->dev);
2596 2647
2597 /* Fill in the fields of the device structure with ethernet values. */
2598 ether_setup(ndev);
2599
2600 mdp = netdev_priv(ndev); 2648 mdp = netdev_priv(ndev);
2601 mdp->num_tx_ring = TX_RING_SIZE; 2649 mdp->num_tx_ring = TX_RING_SIZE;
2602 mdp->num_rx_ring = RX_RING_SIZE; 2650 mdp->num_rx_ring = RX_RING_SIZE;
@@ -2618,10 +2666,10 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2618 mdp->edmac_endian = pd->edmac_endian; 2666 mdp->edmac_endian = pd->edmac_endian;
2619 mdp->no_ether_link = pd->no_ether_link; 2667 mdp->no_ether_link = pd->no_ether_link;
2620 mdp->ether_link_active_low = pd->ether_link_active_low; 2668 mdp->ether_link_active_low = pd->ether_link_active_low;
2621 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
2622 2669
2623 /* set cpu data */ 2670 /* set cpu data */
2624 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; 2671 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
2672 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
2625 sh_eth_set_default_cpu_data(mdp->cd); 2673 sh_eth_set_default_cpu_data(mdp->cd);
2626 2674
2627 /* set function */ 2675 /* set function */
@@ -2749,6 +2797,7 @@ static struct platform_device_id sh_eth_id_table[] = {
2749 { "sh7763-gether", (kernel_ulong_t)&sh7763_data }, 2797 { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
2750 { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data }, 2798 { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
2751 { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data }, 2799 { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
2800 { "r8a7790-ether", (kernel_ulong_t)&r8a7790_data },
2752 { } 2801 { }
2753}; 2802};
2754MODULE_DEVICE_TABLE(platform, sh_eth_id_table); 2803MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 99995bf38c40..a0db02c63b11 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -60,6 +60,7 @@ enum {
60 EDOCR, 60 EDOCR,
61 TFUCR, 61 TFUCR,
62 RFOCR, 62 RFOCR,
63 RMIIMODE,
63 FCFTR, 64 FCFTR,
64 RPADIR, 65 RPADIR,
65 TRIMD, 66 TRIMD,
@@ -156,6 +157,13 @@ enum {
156 SH_ETH_MAX_REGISTER_OFFSET, 157 SH_ETH_MAX_REGISTER_OFFSET,
157}; 158};
158 159
160enum {
161 SH_ETH_REG_GIGABIT,
162 SH_ETH_REG_FAST_RCAR,
163 SH_ETH_REG_FAST_SH4,
164 SH_ETH_REG_FAST_SH3_SH2
165};
166
159/* Driver's parameters */ 167/* Driver's parameters */
160#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 168#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
161#define SH4_SKB_RX_ALIGN 32 169#define SH4_SKB_RX_ALIGN 32
@@ -453,6 +461,7 @@ struct sh_eth_cpu_data {
453 void (*set_rate)(struct net_device *ndev); 461 void (*set_rate)(struct net_device *ndev);
454 462
455 /* mandatory initialize value */ 463 /* mandatory initialize value */
464 int register_type;
456 unsigned long eesipr_value; 465 unsigned long eesipr_value;
457 466
458 /* optional initialize value */ 467 /* optional initialize value */
@@ -482,6 +491,7 @@ struct sh_eth_cpu_data {
482 unsigned hw_crc:1; /* E-DMAC have CSMR */ 491 unsigned hw_crc:1; /* E-DMAC have CSMR */
483 unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ 492 unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
484 unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */ 493 unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */
494 unsigned rmiimode:1; /* EtherC has RMIIMODE register */
485}; 495};
486 496
487struct sh_eth_private { 497struct sh_eth_private {
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 856e523ac936..c76571886011 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -721,7 +721,7 @@ static const struct net_device_ops sgiseeq_netdev_ops = {
721 721
722static int sgiseeq_probe(struct platform_device *pdev) 722static int sgiseeq_probe(struct platform_device *pdev)
723{ 723{
724 struct sgiseeq_platform_data *pd = pdev->dev.platform_data; 724 struct sgiseeq_platform_data *pd = dev_get_platdata(&pdev->dev);
725 struct hpc3_regs *hpcregs = pd->hpc; 725 struct hpc3_regs *hpcregs = pd->hpc;
726 struct sgiseeq_init_block *sr; 726 struct sgiseeq_init_block *sr;
727 unsigned int irq = pd->irq; 727 unsigned int irq = pd->irq;
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 4136ccc4a954..8b7152565c5e 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -1,5 +1,5 @@
1config SFC 1config SFC
2 tristate "Solarflare SFC4000/SFC9000-family support" 2 tristate "Solarflare SFC4000/SFC9000/SFC9100-family support"
3 depends on PCI 3 depends on PCI
4 select MDIO 4 select MDIO
5 select CRC32 5 select CRC32
@@ -8,12 +8,13 @@ config SFC
8 select PTP_1588_CLOCK 8 select PTP_1588_CLOCK
9 ---help--- 9 ---help---
10 This driver supports 10-gigabit Ethernet cards based on 10 This driver supports 10-gigabit Ethernet cards based on
11 the Solarflare SFC4000 and SFC9000-family controllers. 11 the Solarflare SFC4000, SFC9000-family and SFC9100-family
12 controllers.
12 13
13 To compile this driver as a module, choose M here. The module 14 To compile this driver as a module, choose M here. The module
14 will be called sfc. 15 will be called sfc.
15config SFC_MTD 16config SFC_MTD
16 bool "Solarflare SFC4000/SFC9000-family MTD support" 17 bool "Solarflare SFC4000/SFC9000/SFC9100-family MTD support"
17 depends on SFC && MTD && !(SFC=y && MTD=m) 18 depends on SFC && MTD && !(SFC=y && MTD=m)
18 default y 19 default y
19 ---help--- 20 ---help---
@@ -21,7 +22,7 @@ config SFC_MTD
21 (e.g. /dev/mtd1). This is required to update the firmware or 22 (e.g. /dev/mtd1). This is required to update the firmware or
22 the boot configuration under Linux. 23 the boot configuration under Linux.
23config SFC_MCDI_MON 24config SFC_MCDI_MON
24 bool "Solarflare SFC9000-family hwmon support" 25 bool "Solarflare SFC9000/SFC9100-family hwmon support"
25 depends on SFC && HWMON && !(SFC=y && HWMON=m) 26 depends on SFC && HWMON && !(SFC=y && HWMON=m)
26 default y 27 default y
27 ---help--- 28 ---help---
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 945bf06e69ef..3a83c0dca8e6 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -1,8 +1,7 @@
1sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \ 1sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
2 falcon_xmac.o mcdi_mac.o \ 2 rx.o selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
4 tenxpress.o txc43128_phy.o falcon_boards.o \ 3 tenxpress.o txc43128_phy.o falcon_boards.o \
5 mcdi.o mcdi_phy.o mcdi_mon.o ptp.o 4 mcdi.o mcdi_port.o mcdi_mon.o ptp.o
6sfc-$(CONFIG_SFC_MTD) += mtd.o 5sfc-$(CONFIG_SFC_MTD) += mtd.o
7sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o 6sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
8 7
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index 5400a33f254f..17d83f37fbf2 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -29,6 +29,10 @@
29/* Lowest bit numbers and widths */ 29/* Lowest bit numbers and widths */
30#define EFX_DUMMY_FIELD_LBN 0 30#define EFX_DUMMY_FIELD_LBN 0
31#define EFX_DUMMY_FIELD_WIDTH 0 31#define EFX_DUMMY_FIELD_WIDTH 0
32#define EFX_WORD_0_LBN 0
33#define EFX_WORD_0_WIDTH 16
34#define EFX_WORD_1_LBN 16
35#define EFX_WORD_1_WIDTH 16
32#define EFX_DWORD_0_LBN 0 36#define EFX_DWORD_0_LBN 0
33#define EFX_DWORD_0_WIDTH 32 37#define EFX_DWORD_0_WIDTH 32
34#define EFX_DWORD_1_LBN 32 38#define EFX_DWORD_1_LBN 32
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
new file mode 100644
index 000000000000..5f42313b4965
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -0,0 +1,3043 @@
1/****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "ef10_regs.h"
12#include "io.h"
13#include "mcdi.h"
14#include "mcdi_pcol.h"
15#include "nic.h"
16#include "workarounds.h"
17#include <linux/in.h>
18#include <linux/jhash.h>
19#include <linux/wait.h>
20#include <linux/workqueue.h>
21
22/* Hardware control for EF10 architecture including 'Huntington'. */
23
24#define EFX_EF10_DRVGEN_EV 7
25enum {
26 EFX_EF10_TEST = 1,
27 EFX_EF10_REFILL,
28};
29
30/* The reserved RSS context value */
31#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
32
33/* The filter table(s) are managed by firmware and we have write-only
34 * access. When removing filters we must identify them to the
35 * firmware by a 64-bit handle, but this is too wide for Linux kernel
36 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
37 * be able to tell in advance whether a requested insertion will
38 * replace an existing filter. Therefore we maintain a software hash
39 * table, which should be at least as large as the hardware hash
40 * table.
41 *
42 * Huntington has a single 8K filter table shared between all filter
43 * types and both ports.
44 */
45#define HUNT_FILTER_TBL_ROWS 8192
46
47struct efx_ef10_filter_table {
48/* The RX match field masks supported by this fw & hw, in order of priority */
49 enum efx_filter_match_flags rx_match_flags[
50 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
51 unsigned int rx_match_count;
52
53 struct {
54 unsigned long spec; /* pointer to spec plus flag bits */
55/* BUSY flag indicates that an update is in progress. STACK_OLD is
56 * used to mark and sweep stack-owned MAC filters.
57 */
58#define EFX_EF10_FILTER_FLAG_BUSY 1UL
59#define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL
60#define EFX_EF10_FILTER_FLAGS 3UL
61 u64 handle; /* firmware handle */
62 } *entry;
63 wait_queue_head_t waitq;
64/* Shadow of net_device address lists, guarded by mac_lock */
65#define EFX_EF10_FILTER_STACK_UC_MAX 32
66#define EFX_EF10_FILTER_STACK_MC_MAX 256
67 struct {
68 u8 addr[ETH_ALEN];
69 u16 id;
70 } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX],
71 stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX];
72 int stack_uc_count; /* negative for PROMISC */
73 int stack_mc_count; /* negative for PROMISC/ALLMULTI */
74};
75
76/* An arbitrary search limit for the software hash table */
77#define EFX_EF10_FILTER_SEARCH_LIMIT 200
78
79static void efx_ef10_rx_push_indir_table(struct efx_nic *efx);
80static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
81static void efx_ef10_filter_table_remove(struct efx_nic *efx);
82
83static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
84{
85 efx_dword_t reg;
86
87 efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
88 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
89 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
90}
91
92static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
93{
94 return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
95}
96
97static int efx_ef10_init_capabilities(struct efx_nic *efx)
98{
99 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
100 struct efx_ef10_nic_data *nic_data = efx->nic_data;
101 size_t outlen;
102 int rc;
103
104 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
105
106 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
107 outbuf, sizeof(outbuf), &outlen);
108 if (rc)
109 return rc;
110
111 if (outlen >= sizeof(outbuf)) {
112 nic_data->datapath_caps =
113 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
114 if (!(nic_data->datapath_caps &
115 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
116 netif_err(efx, drv, efx->net_dev,
117 "Capabilities don't indicate TSO support.\n");
118 return -ENODEV;
119 }
120 }
121
122 return 0;
123}
124
125static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
126{
127 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
128 int rc;
129
130 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
131 outbuf, sizeof(outbuf), NULL);
132 if (rc)
133 return rc;
134 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
135 return rc > 0 ? rc : -ERANGE;
136}
137
138static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
139{
140 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
141 size_t outlen;
142 int rc;
143
144 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
145
146 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
147 outbuf, sizeof(outbuf), &outlen);
148 if (rc)
149 return rc;
150 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
151 return -EIO;
152
153 memcpy(mac_address,
154 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN);
155 return 0;
156}
157
158static int efx_ef10_probe(struct efx_nic *efx)
159{
160 struct efx_ef10_nic_data *nic_data;
161 int i, rc;
162
163 /* We can have one VI for each 8K region. However we need
164 * multiple TX queues per channel.
165 */
166 efx->max_channels =
167 min_t(unsigned int,
168 EFX_MAX_CHANNELS,
169 resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
170 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
171 BUG_ON(efx->max_channels == 0);
172
173 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
174 if (!nic_data)
175 return -ENOMEM;
176 efx->nic_data = nic_data;
177
178 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
179 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
180 if (rc)
181 goto fail1;
182
183 /* Get the MC's warm boot count. In case it's rebooting right
184 * now, be prepared to retry.
185 */
186 i = 0;
187 for (;;) {
188 rc = efx_ef10_get_warm_boot_count(efx);
189 if (rc >= 0)
190 break;
191 if (++i == 5)
192 goto fail2;
193 ssleep(1);
194 }
195 nic_data->warm_boot_count = rc;
196
197 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
198
199 /* In case we're recovering from a crash (kexec), we want to
200 * cancel any outstanding request by the previous user of this
201 * function. We send a special message using the least
202 * significant bits of the 'high' (doorbell) register.
203 */
204 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
205
206 rc = efx_mcdi_init(efx);
207 if (rc)
208 goto fail2;
209
210 /* Reset (most) configuration for this function */
211 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
212 if (rc)
213 goto fail3;
214
215 /* Enable event logging */
216 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
217 if (rc)
218 goto fail3;
219
220 rc = efx_ef10_init_capabilities(efx);
221 if (rc < 0)
222 goto fail3;
223
224 efx->rx_packet_len_offset =
225 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
226
227 if (!(nic_data->datapath_caps &
228 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
229 netif_err(efx, probe, efx->net_dev,
230 "current firmware does not support an RX prefix\n");
231 rc = -ENODEV;
232 goto fail3;
233 }
234
235 rc = efx_mcdi_port_get_number(efx);
236 if (rc < 0)
237 goto fail3;
238 efx->port_num = rc;
239
240 rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
241 if (rc)
242 goto fail3;
243
244 rc = efx_ef10_get_sysclk_freq(efx);
245 if (rc < 0)
246 goto fail3;
247 efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
248
249 /* Check whether firmware supports bug 35388 workaround */
250 rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
251 if (rc == 0)
252 nic_data->workaround_35388 = true;
253 else if (rc != -ENOSYS && rc != -ENOENT)
254 goto fail3;
255 netif_dbg(efx, probe, efx->net_dev,
256 "workaround for bug 35388 is %sabled\n",
257 nic_data->workaround_35388 ? "en" : "dis");
258
259 rc = efx_mcdi_mon_probe(efx);
260 if (rc)
261 goto fail3;
262
263 efx_ptp_probe(efx);
264
265 return 0;
266
267fail3:
268 efx_mcdi_fini(efx);
269fail2:
270 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
271fail1:
272 kfree(nic_data);
273 efx->nic_data = NULL;
274 return rc;
275}
276
277static int efx_ef10_free_vis(struct efx_nic *efx)
278{
279 int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL);
280
281 /* -EALREADY means nothing to free, so ignore */
282 if (rc == -EALREADY)
283 rc = 0;
284 return rc;
285}
286
287static void efx_ef10_remove(struct efx_nic *efx)
288{
289 struct efx_ef10_nic_data *nic_data = efx->nic_data;
290 int rc;
291
292 efx_mcdi_mon_remove(efx);
293
294 /* This needs to be after efx_ptp_remove_channel() with no filters */
295 efx_ef10_rx_free_indir_table(efx);
296
297 rc = efx_ef10_free_vis(efx);
298 WARN_ON(rc != 0);
299
300 efx_mcdi_fini(efx);
301 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
302 kfree(nic_data);
303}
304
305static int efx_ef10_alloc_vis(struct efx_nic *efx,
306 unsigned int min_vis, unsigned int max_vis)
307{
308 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
309 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
310 struct efx_ef10_nic_data *nic_data = efx->nic_data;
311 size_t outlen;
312 int rc;
313
314 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
315 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
316 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
317 outbuf, sizeof(outbuf), &outlen);
318 if (rc != 0)
319 return rc;
320
321 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
322 return -EIO;
323
324 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
325 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
326
327 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
328 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
329 return 0;
330}
331
332static int efx_ef10_dimension_resources(struct efx_nic *efx)
333{
334 unsigned int n_vis =
335 max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
336
337 return efx_ef10_alloc_vis(efx, n_vis, n_vis);
338}
339
340static int efx_ef10_init_nic(struct efx_nic *efx)
341{
342 struct efx_ef10_nic_data *nic_data = efx->nic_data;
343 int rc;
344
345 if (nic_data->must_realloc_vis) {
346 /* We cannot let the number of VIs change now */
347 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
348 nic_data->n_allocated_vis);
349 if (rc)
350 return rc;
351 nic_data->must_realloc_vis = false;
352 }
353
354 efx_ef10_rx_push_indir_table(efx);
355 return 0;
356}
357
358static int efx_ef10_map_reset_flags(u32 *flags)
359{
360 enum {
361 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
362 ETH_RESET_SHARED_SHIFT),
363 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
364 ETH_RESET_OFFLOAD | ETH_RESET_MAC |
365 ETH_RESET_PHY | ETH_RESET_MGMT) <<
366 ETH_RESET_SHARED_SHIFT)
367 };
368
369 /* We assume for now that our PCI function is permitted to
370 * reset everything.
371 */
372
373 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
374 *flags &= ~EF10_RESET_MC;
375 return RESET_TYPE_WORLD;
376 }
377
378 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
379 *flags &= ~EF10_RESET_PORT;
380 return RESET_TYPE_ALL;
381 }
382
383 /* no invisible reset implemented */
384
385 return -EINVAL;
386}
387
388#define EF10_DMA_STAT(ext_name, mcdi_name) \
389 [EF10_STAT_ ## ext_name] = \
390 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
391#define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
392 [EF10_STAT_ ## int_name] = \
393 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
394#define EF10_OTHER_STAT(ext_name) \
395 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
396
397static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
398 EF10_DMA_STAT(tx_bytes, TX_BYTES),
399 EF10_DMA_STAT(tx_packets, TX_PKTS),
400 EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
401 EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
402 EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
403 EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
404 EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
405 EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
406 EF10_DMA_STAT(tx_64, TX_64_PKTS),
407 EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
408 EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
409 EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
410 EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
411 EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
412 EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
413 EF10_DMA_STAT(rx_bytes, RX_BYTES),
414 EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
415 EF10_OTHER_STAT(rx_good_bytes),
416 EF10_OTHER_STAT(rx_bad_bytes),
417 EF10_DMA_STAT(rx_packets, RX_PKTS),
418 EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
419 EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
420 EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
421 EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
422 EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
423 EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
424 EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
425 EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
426 EF10_DMA_STAT(rx_64, RX_64_PKTS),
427 EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
428 EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
429 EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
430 EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
431 EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
432 EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
433 EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
434 EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
435 EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
436 EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
437 EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
438 EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
439};
440
441#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
442 (1ULL << EF10_STAT_tx_packets) | \
443 (1ULL << EF10_STAT_tx_pause) | \
444 (1ULL << EF10_STAT_tx_unicast) | \
445 (1ULL << EF10_STAT_tx_multicast) | \
446 (1ULL << EF10_STAT_tx_broadcast) | \
447 (1ULL << EF10_STAT_rx_bytes) | \
448 (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
449 (1ULL << EF10_STAT_rx_good_bytes) | \
450 (1ULL << EF10_STAT_rx_bad_bytes) | \
451 (1ULL << EF10_STAT_rx_packets) | \
452 (1ULL << EF10_STAT_rx_good) | \
453 (1ULL << EF10_STAT_rx_bad) | \
454 (1ULL << EF10_STAT_rx_pause) | \
455 (1ULL << EF10_STAT_rx_control) | \
456 (1ULL << EF10_STAT_rx_unicast) | \
457 (1ULL << EF10_STAT_rx_multicast) | \
458 (1ULL << EF10_STAT_rx_broadcast) | \
459 (1ULL << EF10_STAT_rx_lt64) | \
460 (1ULL << EF10_STAT_rx_64) | \
461 (1ULL << EF10_STAT_rx_65_to_127) | \
462 (1ULL << EF10_STAT_rx_128_to_255) | \
463 (1ULL << EF10_STAT_rx_256_to_511) | \
464 (1ULL << EF10_STAT_rx_512_to_1023) | \
465 (1ULL << EF10_STAT_rx_1024_to_15xx) | \
466 (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \
467 (1ULL << EF10_STAT_rx_gtjumbo) | \
468 (1ULL << EF10_STAT_rx_bad_gtjumbo) | \
469 (1ULL << EF10_STAT_rx_overflow) | \
470 (1ULL << EF10_STAT_rx_nodesc_drops))
471
472/* These statistics are only provided by the 10G MAC. For a 10G/40G
473 * switchable port we do not expose these because they might not
474 * include all the packets they should.
475 */
476#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \
477 (1ULL << EF10_STAT_tx_lt64) | \
478 (1ULL << EF10_STAT_tx_64) | \
479 (1ULL << EF10_STAT_tx_65_to_127) | \
480 (1ULL << EF10_STAT_tx_128_to_255) | \
481 (1ULL << EF10_STAT_tx_256_to_511) | \
482 (1ULL << EF10_STAT_tx_512_to_1023) | \
483 (1ULL << EF10_STAT_tx_1024_to_15xx) | \
484 (1ULL << EF10_STAT_tx_15xx_to_jumbo))
485
486/* These statistics are only provided by the 40G MAC. For a 10G/40G
487 * switchable port we do expose these because the errors will otherwise
488 * be silent.
489 */
490#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
491 (1ULL << EF10_STAT_rx_length_error))
492
493#if BITS_PER_LONG == 64
494#define STAT_MASK_BITMAP(bits) (bits)
495#else
496#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32
497#endif
498
499static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx)
500{
501 static const unsigned long hunt_40g_stat_mask[] = {
502 STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
503 HUNT_40G_EXTRA_STAT_MASK)
504 };
505 static const unsigned long hunt_10g_only_stat_mask[] = {
506 STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
507 HUNT_10G_ONLY_STAT_MASK)
508 };
509 u32 port_caps = efx_mcdi_phy_get_caps(efx);
510
511 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
512 return hunt_40g_stat_mask;
513 else
514 return hunt_10g_only_stat_mask;
515}
516
517static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
518{
519 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
520 efx_ef10_stat_mask(efx), names);
521}
522
523static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
524{
525 struct efx_ef10_nic_data *nic_data = efx->nic_data;
526 const unsigned long *stats_mask = efx_ef10_stat_mask(efx);
527 __le64 generation_start, generation_end;
528 u64 *stats = nic_data->stats;
529 __le64 *dma_stats;
530
531 dma_stats = efx->stats_buffer.addr;
532 nic_data = efx->nic_data;
533
534 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
535 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
536 return 0;
537 rmb();
538 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask,
539 stats, efx->stats_buffer.addr, false);
540 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
541 if (generation_end != generation_start)
542 return -EAGAIN;
543
544 /* Update derived statistics */
545 stats[EF10_STAT_rx_good_bytes] =
546 stats[EF10_STAT_rx_bytes] -
547 stats[EF10_STAT_rx_bytes_minus_good_bytes];
548 efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
549 stats[EF10_STAT_rx_bytes_minus_good_bytes]);
550
551 return 0;
552}
553
554
555static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
556 struct rtnl_link_stats64 *core_stats)
557{
558 const unsigned long *mask = efx_ef10_stat_mask(efx);
559 struct efx_ef10_nic_data *nic_data = efx->nic_data;
560 u64 *stats = nic_data->stats;
561 size_t stats_count = 0, index;
562 int retry;
563
564 /* If we're unlucky enough to read statistics during the DMA, wait
565 * up to 10ms for it to finish (typically takes <500us)
566 */
567 for (retry = 0; retry < 100; ++retry) {
568 if (efx_ef10_try_update_nic_stats(efx) == 0)
569 break;
570 udelay(100);
571 }
572
573 if (full_stats) {
574 for_each_set_bit(index, mask, EF10_STAT_COUNT) {
575 if (efx_ef10_stat_desc[index].name) {
576 *full_stats++ = stats[index];
577 ++stats_count;
578 }
579 }
580 }
581
582 if (core_stats) {
583 core_stats->rx_packets = stats[EF10_STAT_rx_packets];
584 core_stats->tx_packets = stats[EF10_STAT_tx_packets];
585 core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
586 core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
587 core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops];
588 core_stats->multicast = stats[EF10_STAT_rx_multicast];
589 core_stats->rx_length_errors =
590 stats[EF10_STAT_rx_gtjumbo] +
591 stats[EF10_STAT_rx_length_error];
592 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
593 core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
594 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
595 core_stats->rx_errors = (core_stats->rx_length_errors +
596 core_stats->rx_crc_errors +
597 core_stats->rx_frame_errors);
598 }
599
600 return stats_count;
601}
602
603static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
604{
605 struct efx_nic *efx = channel->efx;
606 unsigned int mode, value;
607 efx_dword_t timer_cmd;
608
609 if (channel->irq_moderation) {
610 mode = 3;
611 value = channel->irq_moderation - 1;
612 } else {
613 mode = 0;
614 value = 0;
615 }
616
617 if (EFX_EF10_WORKAROUND_35388(efx)) {
618 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
619 EFE_DD_EVQ_IND_TIMER_FLAGS,
620 ERF_DD_EVQ_IND_TIMER_MODE, mode,
621 ERF_DD_EVQ_IND_TIMER_VAL, value);
622 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
623 channel->channel);
624 } else {
625 EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
626 ERF_DZ_TC_TIMER_VAL, value);
627 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
628 channel->channel);
629 }
630}
631
632static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
633{
634 wol->supported = 0;
635 wol->wolopts = 0;
636 memset(&wol->sopass, 0, sizeof(wol->sopass));
637}
638
639static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
640{
641 if (type != 0)
642 return -EINVAL;
643 return 0;
644}
645
646static void efx_ef10_mcdi_request(struct efx_nic *efx,
647 const efx_dword_t *hdr, size_t hdr_len,
648 const efx_dword_t *sdu, size_t sdu_len)
649{
650 struct efx_ef10_nic_data *nic_data = efx->nic_data;
651 u8 *pdu = nic_data->mcdi_buf.addr;
652
653 memcpy(pdu, hdr, hdr_len);
654 memcpy(pdu + hdr_len, sdu, sdu_len);
655 wmb();
656
657 /* The hardware provides 'low' and 'high' (doorbell) registers
658 * for passing the 64-bit address of an MCDI request to
659 * firmware. However the dwords are swapped by firmware. The
660 * least significant bits of the doorbell are then 0 for all
661 * MCDI requests due to alignment.
662 */
663 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
664 ER_DZ_MC_DB_LWRD);
665 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
666 ER_DZ_MC_DB_HWRD);
667}
668
669static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
670{
671 struct efx_ef10_nic_data *nic_data = efx->nic_data;
672 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
673
674 rmb();
675 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
676}
677
678static void
679efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
680 size_t offset, size_t outlen)
681{
682 struct efx_ef10_nic_data *nic_data = efx->nic_data;
683 const u8 *pdu = nic_data->mcdi_buf.addr;
684
685 memcpy(outbuf, pdu + offset, outlen);
686}
687
688static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
689{
690 struct efx_ef10_nic_data *nic_data = efx->nic_data;
691 int rc;
692
693 rc = efx_ef10_get_warm_boot_count(efx);
694 if (rc < 0) {
695 /* The firmware is presumably in the process of
696 * rebooting. However, we are supposed to report each
697 * reboot just once, so we must only do that once we
698 * can read and store the updated warm boot count.
699 */
700 return 0;
701 }
702
703 if (rc == nic_data->warm_boot_count)
704 return 0;
705
706 nic_data->warm_boot_count = rc;
707
708 /* All our allocations have been reset */
709 nic_data->must_realloc_vis = true;
710 nic_data->must_restore_filters = true;
711 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
712
713 return -EIO;
714}
715
716/* Handle an MSI interrupt
717 *
718 * Handle an MSI hardware interrupt. This routine schedules event
719 * queue processing. No interrupt acknowledgement cycle is necessary.
720 * Also, we never need to check that the interrupt is for us, since
721 * MSI interrupts cannot be shared.
722 */
723static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
724{
725 struct efx_msi_context *context = dev_id;
726 struct efx_nic *efx = context->efx;
727
728 netif_vdbg(efx, intr, efx->net_dev,
729 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
730
731 if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
732 /* Note test interrupts */
733 if (context->index == efx->irq_level)
734 efx->last_irq_cpu = raw_smp_processor_id();
735
736 /* Schedule processing of the channel */
737 efx_schedule_channel_irq(efx->channel[context->index]);
738 }
739
740 return IRQ_HANDLED;
741}
742
743static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
744{
745 struct efx_nic *efx = dev_id;
746 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
747 struct efx_channel *channel;
748 efx_dword_t reg;
749 u32 queues;
750
751 /* Read the ISR which also ACKs the interrupts */
752 efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
753 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
754
755 if (queues == 0)
756 return IRQ_NONE;
757
758 if (likely(soft_enabled)) {
759 /* Note test interrupts */
760 if (queues & (1U << efx->irq_level))
761 efx->last_irq_cpu = raw_smp_processor_id();
762
763 efx_for_each_channel(channel, efx) {
764 if (queues & 1)
765 efx_schedule_channel_irq(channel);
766 queues >>= 1;
767 }
768 }
769
770 netif_vdbg(efx, intr, efx->net_dev,
771 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
772 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
773
774 return IRQ_HANDLED;
775}
776
777static void efx_ef10_irq_test_generate(struct efx_nic *efx)
778{
779 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
780
781 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
782
783 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
784 (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
785 inbuf, sizeof(inbuf), NULL, 0, NULL);
786}
787
788static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
789{
790 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
791 (tx_queue->ptr_mask + 1) *
792 sizeof(efx_qword_t),
793 GFP_KERNEL);
794}
795
796/* This writes to the TX_DESC_WPTR and also pushes data */
797static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
798 const efx_qword_t *txd)
799{
800 unsigned int write_ptr;
801 efx_oword_t reg;
802
803 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
804 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
805 reg.qword[0] = *txd;
806 efx_writeo_page(tx_queue->efx, &reg,
807 ER_DZ_TX_DESC_UPD, tx_queue->queue);
808}
809
810static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
811{
812 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
813 EFX_BUF_SIZE));
814 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
815 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
816 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
817 struct efx_channel *channel = tx_queue->channel;
818 struct efx_nic *efx = tx_queue->efx;
819 size_t inlen, outlen;
820 dma_addr_t dma_addr;
821 efx_qword_t *txd;
822 int rc;
823 int i;
824
825 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
826 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
827 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
828 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
829 MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
830 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
831 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
832 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
833 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
834
835 dma_addr = tx_queue->txd.buf.dma_addr;
836
837 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
838 tx_queue->queue, entries, (u64)dma_addr);
839
840 for (i = 0; i < entries; ++i) {
841 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
842 dma_addr += EFX_BUF_SIZE;
843 }
844
845 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
846
847 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
848 outbuf, sizeof(outbuf), &outlen);
849 if (rc)
850 goto fail;
851
852 /* A previous user of this TX queue might have set us up the
853 * bomb by writing a descriptor to the TX push collector but
854 * not the doorbell. (Each collector belongs to a port, not a
855 * queue or function, so cannot easily be reset.) We must
856 * attempt to push a no-op descriptor in its place.
857 */
858 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
859 tx_queue->insert_count = 1;
860 txd = efx_tx_desc(tx_queue, 0);
861 EFX_POPULATE_QWORD_4(*txd,
862 ESF_DZ_TX_DESC_IS_OPT, true,
863 ESF_DZ_TX_OPTION_TYPE,
864 ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
865 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
866 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
867 tx_queue->write_count = 1;
868 wmb();
869 efx_ef10_push_tx_desc(tx_queue, txd);
870
871 return;
872
873fail:
874 WARN_ON(true);
875 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
876}
877
878static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
879{
880 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
881 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
882 struct efx_nic *efx = tx_queue->efx;
883 size_t outlen;
884 int rc;
885
886 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
887 tx_queue->queue);
888
889 rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
890 outbuf, sizeof(outbuf), &outlen);
891
892 if (rc && rc != -EALREADY)
893 goto fail;
894
895 return;
896
897fail:
898 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
899}
900
901static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
902{
903 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
904}
905
906/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
907static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
908{
909 unsigned int write_ptr;
910 efx_dword_t reg;
911
912 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
913 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
914 efx_writed_page(tx_queue->efx, &reg,
915 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
916}
917
918static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
919{
920 unsigned int old_write_count = tx_queue->write_count;
921 struct efx_tx_buffer *buffer;
922 unsigned int write_ptr;
923 efx_qword_t *txd;
924
925 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
926
927 do {
928 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
929 buffer = &tx_queue->buffer[write_ptr];
930 txd = efx_tx_desc(tx_queue, write_ptr);
931 ++tx_queue->write_count;
932
933 /* Create TX descriptor ring entry */
934 if (buffer->flags & EFX_TX_BUF_OPTION) {
935 *txd = buffer->option;
936 } else {
937 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
938 EFX_POPULATE_QWORD_3(
939 *txd,
940 ESF_DZ_TX_KER_CONT,
941 buffer->flags & EFX_TX_BUF_CONT,
942 ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
943 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
944 }
945 } while (tx_queue->write_count != tx_queue->insert_count);
946
947 wmb(); /* Ensure descriptors are written before they are fetched */
948
949 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
950 txd = efx_tx_desc(tx_queue,
951 old_write_count & tx_queue->ptr_mask);
952 efx_ef10_push_tx_desc(tx_queue, txd);
953 ++tx_queue->pushes;
954 } else {
955 efx_ef10_notify_tx_desc(tx_queue);
956 }
957}
958
959static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
960{
961 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
962 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
963 size_t outlen;
964 int rc;
965
966 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
967 EVB_PORT_ID_ASSIGNED);
968 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
969 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
970 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
971 EFX_MAX_CHANNELS);
972
973 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
974 outbuf, sizeof(outbuf), &outlen);
975 if (rc != 0)
976 return rc;
977
978 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
979 return -EIO;
980
981 *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
982
983 return 0;
984}
985
986static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
987{
988 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
989 int rc;
990
991 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
992 context);
993
994 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
995 NULL, 0, NULL);
996 WARN_ON(rc != 0);
997}
998
999static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
1000{
1001 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
1002 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
1003 int i, rc;
1004
1005 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
1006 context);
1007 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1008 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
1009
1010 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
1011 MCDI_PTR(tablebuf,
1012 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
1013 (u8) efx->rx_indir_table[i];
1014
1015 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
1016 sizeof(tablebuf), NULL, 0, NULL);
1017 if (rc != 0)
1018 return rc;
1019
1020 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
1021 context);
1022 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
1023 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
1024 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
1025 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
1026 efx->rx_hash_key[i];
1027
1028 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
1029 sizeof(keybuf), NULL, 0, NULL);
1030}
1031
1032static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
1033{
1034 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1035
1036 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
1037 efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
1038 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1039}
1040
1041static void efx_ef10_rx_push_indir_table(struct efx_nic *efx)
1042{
1043 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1044 int rc;
1045
1046 netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n");
1047
1048 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
1049 rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
1050 if (rc != 0)
1051 goto fail;
1052 }
1053
1054 rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
1055 if (rc != 0)
1056 goto fail;
1057
1058 return;
1059
1060fail:
1061 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1062}
1063
1064static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
1065{
1066 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
1067 (rx_queue->ptr_mask + 1) *
1068 sizeof(efx_qword_t),
1069 GFP_KERNEL);
1070}
1071
1072static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
1073{
1074 MCDI_DECLARE_BUF(inbuf,
1075 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
1076 EFX_BUF_SIZE));
1077 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
1078 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1079 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
1080 struct efx_nic *efx = rx_queue->efx;
1081 size_t inlen, outlen;
1082 dma_addr_t dma_addr;
1083 int rc;
1084 int i;
1085
1086 rx_queue->scatter_n = 0;
1087 rx_queue->scatter_len = 0;
1088
1089 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
1090 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
1091 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
1092 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
1093 efx_rx_queue_index(rx_queue));
1094 MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS,
1095 INIT_RXQ_IN_FLAG_PREFIX, 1);
1096 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
1097 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1098
1099 dma_addr = rx_queue->rxd.buf.dma_addr;
1100
1101 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
1102 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
1103
1104 for (i = 0; i < entries; ++i) {
1105 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
1106 dma_addr += EFX_BUF_SIZE;
1107 }
1108
1109 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
1110
1111 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
1112 outbuf, sizeof(outbuf), &outlen);
1113 if (rc)
1114 goto fail;
1115
1116 return;
1117
1118fail:
1119 WARN_ON(true);
1120 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1121}
1122
1123static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
1124{
1125 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
1126 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
1127 struct efx_nic *efx = rx_queue->efx;
1128 size_t outlen;
1129 int rc;
1130
1131 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
1132 efx_rx_queue_index(rx_queue));
1133
1134 rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
1135 outbuf, sizeof(outbuf), &outlen);
1136
1137 if (rc && rc != -EALREADY)
1138 goto fail;
1139
1140 return;
1141
1142fail:
1143 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1144}
1145
1146static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
1147{
1148 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
1149}
1150
1151/* This creates an entry in the RX descriptor queue */
1152static inline void
1153efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
1154{
1155 struct efx_rx_buffer *rx_buf;
1156 efx_qword_t *rxd;
1157
1158 rxd = efx_rx_desc(rx_queue, index);
1159 rx_buf = efx_rx_buffer(rx_queue, index);
1160 EFX_POPULATE_QWORD_2(*rxd,
1161 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
1162 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
1163}
1164
1165static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
1166{
1167 struct efx_nic *efx = rx_queue->efx;
1168 unsigned int write_count;
1169 efx_dword_t reg;
1170
1171 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
1172 write_count = rx_queue->added_count & ~7;
1173 if (rx_queue->notified_count == write_count)
1174 return;
1175
1176 do
1177 efx_ef10_build_rx_desc(
1178 rx_queue,
1179 rx_queue->notified_count & rx_queue->ptr_mask);
1180 while (++rx_queue->notified_count != write_count);
1181
1182 wmb();
1183 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
1184 write_count & rx_queue->ptr_mask);
1185 efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
1186 efx_rx_queue_index(rx_queue));
1187}
1188
1189static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
1190
1191static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
1192{
1193 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1194 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
1195 efx_qword_t event;
1196
1197 EFX_POPULATE_QWORD_2(event,
1198 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
1199 ESF_DZ_EV_DATA, EFX_EF10_REFILL);
1200
1201 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
1202
1203 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
1204 * already swapped the data to little-endian order.
1205 */
1206 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
1207 sizeof(efx_qword_t));
1208
1209 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
1210 inbuf, sizeof(inbuf), 0,
1211 efx_ef10_rx_defer_refill_complete, 0);
1212}
1213
1214static void
1215efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
1216 int rc, efx_dword_t *outbuf,
1217 size_t outlen_actual)
1218{
1219 /* nothing to do */
1220}
1221
1222static int efx_ef10_ev_probe(struct efx_channel *channel)
1223{
1224 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
1225 (channel->eventq_mask + 1) *
1226 sizeof(efx_qword_t),
1227 GFP_KERNEL);
1228}
1229
1230static int efx_ef10_ev_init(struct efx_channel *channel)
1231{
1232 MCDI_DECLARE_BUF(inbuf,
1233 MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
1234 EFX_BUF_SIZE));
1235 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
1236 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
1237 struct efx_nic *efx = channel->efx;
1238 struct efx_ef10_nic_data *nic_data;
1239 bool supports_rx_merge;
1240 size_t inlen, outlen;
1241 dma_addr_t dma_addr;
1242 int rc;
1243 int i;
1244
1245 nic_data = efx->nic_data;
1246 supports_rx_merge =
1247 !!(nic_data->datapath_caps &
1248 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
1249
1250 /* Fill event queue with all ones (i.e. empty events) */
1251 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1252
1253 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
1254 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
1255 /* INIT_EVQ expects index in vector table, not absolute */
1256 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
1257 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
1258 INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
1259 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
1260 INIT_EVQ_IN_FLAG_TX_MERGE, 1,
1261 INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
1262 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
1263 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
1264 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
1265 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
1266 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
1267 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
1268 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
1269
1270 dma_addr = channel->eventq.buf.dma_addr;
1271 for (i = 0; i < entries; ++i) {
1272 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
1273 dma_addr += EFX_BUF_SIZE;
1274 }
1275
1276 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
1277
1278 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
1279 outbuf, sizeof(outbuf), &outlen);
1280 if (rc)
1281 goto fail;
1282
1283 /* IRQ return is ignored */
1284
1285 return 0;
1286
1287fail:
1288 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1289 return rc;
1290}
1291
1292static void efx_ef10_ev_fini(struct efx_channel *channel)
1293{
1294 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
1295 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
1296 struct efx_nic *efx = channel->efx;
1297 size_t outlen;
1298 int rc;
1299
1300 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
1301
1302 rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
1303 outbuf, sizeof(outbuf), &outlen);
1304
1305 if (rc && rc != -EALREADY)
1306 goto fail;
1307
1308 return;
1309
1310fail:
1311 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1312}
1313
1314static void efx_ef10_ev_remove(struct efx_channel *channel)
1315{
1316 efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
1317}
1318
1319static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
1320 unsigned int rx_queue_label)
1321{
1322 struct efx_nic *efx = rx_queue->efx;
1323
1324 netif_info(efx, hw, efx->net_dev,
1325 "rx event arrived on queue %d labeled as queue %u\n",
1326 efx_rx_queue_index(rx_queue), rx_queue_label);
1327
1328 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1329}
1330
1331static void
1332efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
1333 unsigned int actual, unsigned int expected)
1334{
1335 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
1336 struct efx_nic *efx = rx_queue->efx;
1337
1338 netif_info(efx, hw, efx->net_dev,
1339 "dropped %d events (index=%d expected=%d)\n",
1340 dropped, actual, expected);
1341
1342 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1343}
1344
1345/* partially received RX was aborted. clean up. */
1346static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
1347{
1348 unsigned int rx_desc_ptr;
1349
1350 WARN_ON(rx_queue->scatter_n == 0);
1351
1352 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
1353 "scattered RX aborted (dropping %u buffers)\n",
1354 rx_queue->scatter_n);
1355
1356 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
1357
1358 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
1359 0, EFX_RX_PKT_DISCARD);
1360
1361 rx_queue->removed_count += rx_queue->scatter_n;
1362 rx_queue->scatter_n = 0;
1363 rx_queue->scatter_len = 0;
1364 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
1365}
1366
1367static int efx_ef10_handle_rx_event(struct efx_channel *channel,
1368 const efx_qword_t *event)
1369{
1370 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
1371 unsigned int n_descs, n_packets, i;
1372 struct efx_nic *efx = channel->efx;
1373 struct efx_rx_queue *rx_queue;
1374 bool rx_cont;
1375 u16 flags = 0;
1376
1377 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1378 return 0;
1379
1380 /* Basic packet information */
1381 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
1382 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
1383 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
1384 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
1385 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
1386
1387 WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT));
1388
1389 rx_queue = efx_channel_get_rx_queue(channel);
1390
1391 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
1392 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
1393
1394 n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
1395 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1396
1397 if (n_descs != rx_queue->scatter_n + 1) {
1398 /* detect rx abort */
1399 if (unlikely(n_descs == rx_queue->scatter_n)) {
1400 WARN_ON(rx_bytes != 0);
1401 efx_ef10_handle_rx_abort(rx_queue);
1402 return 0;
1403 }
1404
1405 if (unlikely(rx_queue->scatter_n != 0)) {
1406 /* Scattered packet completions cannot be
1407 * merged, so something has gone wrong.
1408 */
1409 efx_ef10_handle_rx_bad_lbits(
1410 rx_queue, next_ptr_lbits,
1411 (rx_queue->removed_count +
1412 rx_queue->scatter_n + 1) &
1413 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1414 return 0;
1415 }
1416
1417 /* Merged completion for multiple non-scattered packets */
1418 rx_queue->scatter_n = 1;
1419 rx_queue->scatter_len = 0;
1420 n_packets = n_descs;
1421 ++channel->n_rx_merge_events;
1422 channel->n_rx_merge_packets += n_packets;
1423 flags |= EFX_RX_PKT_PREFIX_LEN;
1424 } else {
1425 ++rx_queue->scatter_n;
1426 rx_queue->scatter_len += rx_bytes;
1427 if (rx_cont)
1428 return 0;
1429 n_packets = 1;
1430 }
1431
1432 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
1433 flags |= EFX_RX_PKT_DISCARD;
1434
1435 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
1436 channel->n_rx_ip_hdr_chksum_err += n_packets;
1437 } else if (unlikely(EFX_QWORD_FIELD(*event,
1438 ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
1439 channel->n_rx_tcp_udp_chksum_err += n_packets;
1440 } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
1441 rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
1442 flags |= EFX_RX_PKT_CSUMMED;
1443 }
1444
1445 if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
1446 flags |= EFX_RX_PKT_TCP;
1447
1448 channel->irq_mod_score += 2 * n_packets;
1449
1450 /* Handle received packet(s) */
1451 for (i = 0; i < n_packets; i++) {
1452 efx_rx_packet(rx_queue,
1453 rx_queue->removed_count & rx_queue->ptr_mask,
1454 rx_queue->scatter_n, rx_queue->scatter_len,
1455 flags);
1456 rx_queue->removed_count += rx_queue->scatter_n;
1457 }
1458
1459 rx_queue->scatter_n = 0;
1460 rx_queue->scatter_len = 0;
1461
1462 return n_packets;
1463}
1464
1465static int
1466efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
1467{
1468 struct efx_nic *efx = channel->efx;
1469 struct efx_tx_queue *tx_queue;
1470 unsigned int tx_ev_desc_ptr;
1471 unsigned int tx_ev_q_label;
1472 int tx_descs = 0;
1473
1474 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1475 return 0;
1476
1477 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
1478 return 0;
1479
1480 /* Transmit completion */
1481 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
1482 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
1483 tx_queue = efx_channel_get_tx_queue(channel,
1484 tx_ev_q_label % EFX_TXQ_TYPES);
1485 tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
1486 tx_queue->ptr_mask);
1487 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
1488
1489 return tx_descs;
1490}
1491
1492static void
1493efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1494{
1495 struct efx_nic *efx = channel->efx;
1496 int subcode;
1497
1498 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
1499
1500 switch (subcode) {
1501 case ESE_DZ_DRV_TIMER_EV:
1502 case ESE_DZ_DRV_WAKE_UP_EV:
1503 break;
1504 case ESE_DZ_DRV_START_UP_EV:
1505 /* event queue init complete. ok. */
1506 break;
1507 default:
1508 netif_err(efx, hw, efx->net_dev,
1509 "channel %d unknown driver event type %d"
1510 " (data " EFX_QWORD_FMT ")\n",
1511 channel->channel, subcode,
1512 EFX_QWORD_VAL(*event));
1513
1514 }
1515}
1516
1517static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
1518 efx_qword_t *event)
1519{
1520 struct efx_nic *efx = channel->efx;
1521 u32 subcode;
1522
1523 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
1524
1525 switch (subcode) {
1526 case EFX_EF10_TEST:
1527 channel->event_test_cpu = raw_smp_processor_id();
1528 break;
1529 case EFX_EF10_REFILL:
1530 /* The queue must be empty, so we won't receive any rx
1531 * events, so efx_process_channel() won't refill the
1532 * queue. Refill it here
1533 */
1534 efx_fast_push_rx_descriptors(&channel->rx_queue);
1535 break;
1536 default:
1537 netif_err(efx, hw, efx->net_dev,
1538 "channel %d unknown driver event type %u"
1539 " (data " EFX_QWORD_FMT ")\n",
1540 channel->channel, (unsigned) subcode,
1541 EFX_QWORD_VAL(*event));
1542 }
1543}
1544
1545static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
1546{
1547 struct efx_nic *efx = channel->efx;
1548 efx_qword_t event, *p_event;
1549 unsigned int read_ptr;
1550 int ev_code;
1551 int tx_descs = 0;
1552 int spent = 0;
1553
1554 read_ptr = channel->eventq_read_ptr;
1555
1556 for (;;) {
1557 p_event = efx_event(channel, read_ptr);
1558 event = *p_event;
1559
1560 if (!efx_event_present(&event))
1561 break;
1562
1563 EFX_SET_QWORD(*p_event);
1564
1565 ++read_ptr;
1566
1567 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
1568
1569 netif_vdbg(efx, drv, efx->net_dev,
1570 "processing event on %d " EFX_QWORD_FMT "\n",
1571 channel->channel, EFX_QWORD_VAL(event));
1572
1573 switch (ev_code) {
1574 case ESE_DZ_EV_CODE_MCDI_EV:
1575 efx_mcdi_process_event(channel, &event);
1576 break;
1577 case ESE_DZ_EV_CODE_RX_EV:
1578 spent += efx_ef10_handle_rx_event(channel, &event);
1579 if (spent >= quota) {
1580 /* XXX can we split a merged event to
1581 * avoid going over-quota?
1582 */
1583 spent = quota;
1584 goto out;
1585 }
1586 break;
1587 case ESE_DZ_EV_CODE_TX_EV:
1588 tx_descs += efx_ef10_handle_tx_event(channel, &event);
1589 if (tx_descs > efx->txq_entries) {
1590 spent = quota;
1591 goto out;
1592 } else if (++spent == quota) {
1593 goto out;
1594 }
1595 break;
1596 case ESE_DZ_EV_CODE_DRIVER_EV:
1597 efx_ef10_handle_driver_event(channel, &event);
1598 if (++spent == quota)
1599 goto out;
1600 break;
1601 case EFX_EF10_DRVGEN_EV:
1602 efx_ef10_handle_driver_generated_event(channel, &event);
1603 break;
1604 default:
1605 netif_err(efx, hw, efx->net_dev,
1606 "channel %d unknown event type %d"
1607 " (data " EFX_QWORD_FMT ")\n",
1608 channel->channel, ev_code,
1609 EFX_QWORD_VAL(event));
1610 }
1611 }
1612
1613out:
1614 channel->eventq_read_ptr = read_ptr;
1615 return spent;
1616}
1617
1618static void efx_ef10_ev_read_ack(struct efx_channel *channel)
1619{
1620 struct efx_nic *efx = channel->efx;
1621 efx_dword_t rptr;
1622
1623 if (EFX_EF10_WORKAROUND_35388(efx)) {
1624 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
1625 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
1626 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
1627 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
1628
1629 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
1630 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
1631 ERF_DD_EVQ_IND_RPTR,
1632 (channel->eventq_read_ptr &
1633 channel->eventq_mask) >>
1634 ERF_DD_EVQ_IND_RPTR_WIDTH);
1635 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
1636 channel->channel);
1637 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
1638 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
1639 ERF_DD_EVQ_IND_RPTR,
1640 channel->eventq_read_ptr &
1641 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
1642 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
1643 channel->channel);
1644 } else {
1645 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
1646 channel->eventq_read_ptr &
1647 channel->eventq_mask);
1648 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
1649 }
1650}
1651
1652static void efx_ef10_ev_test_generate(struct efx_channel *channel)
1653{
1654 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
1655 struct efx_nic *efx = channel->efx;
1656 efx_qword_t event;
1657 int rc;
1658
1659 EFX_POPULATE_QWORD_2(event,
1660 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
1661 ESF_DZ_EV_DATA, EFX_EF10_TEST);
1662
1663 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
1664
1665 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
1666 * already swapped the data to little-endian order.
1667 */
1668 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
1669 sizeof(efx_qword_t));
1670
1671 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
1672 NULL, 0, NULL);
1673 if (rc != 0)
1674 goto fail;
1675
1676 return;
1677
1678fail:
1679 WARN_ON(true);
1680 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1681}
1682
1683void efx_ef10_handle_drain_event(struct efx_nic *efx)
1684{
1685 if (atomic_dec_and_test(&efx->active_queues))
1686 wake_up(&efx->flush_wq);
1687
1688 WARN_ON(atomic_read(&efx->active_queues) < 0);
1689}
1690
1691static int efx_ef10_fini_dmaq(struct efx_nic *efx)
1692{
1693 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1694 struct efx_channel *channel;
1695 struct efx_tx_queue *tx_queue;
1696 struct efx_rx_queue *rx_queue;
1697 int pending;
1698
1699 /* If the MC has just rebooted, the TX/RX queues will have already been
1700 * torn down, but efx->active_queues needs to be set to zero.
1701 */
1702 if (nic_data->must_realloc_vis) {
1703 atomic_set(&efx->active_queues, 0);
1704 return 0;
1705 }
1706
1707 /* Do not attempt to write to the NIC during EEH recovery */
1708 if (efx->state != STATE_RECOVERY) {
1709 efx_for_each_channel(channel, efx) {
1710 efx_for_each_channel_rx_queue(rx_queue, channel)
1711 efx_ef10_rx_fini(rx_queue);
1712 efx_for_each_channel_tx_queue(tx_queue, channel)
1713 efx_ef10_tx_fini(tx_queue);
1714 }
1715
1716 wait_event_timeout(efx->flush_wq,
1717 atomic_read(&efx->active_queues) == 0,
1718 msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
1719 pending = atomic_read(&efx->active_queues);
1720 if (pending) {
1721 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
1722 pending);
1723 return -ETIMEDOUT;
1724 }
1725 }
1726
1727 return 0;
1728}
1729
1730static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
1731 const struct efx_filter_spec *right)
1732{
1733 if ((left->match_flags ^ right->match_flags) |
1734 ((left->flags ^ right->flags) &
1735 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
1736 return false;
1737
1738 return memcmp(&left->outer_vid, &right->outer_vid,
1739 sizeof(struct efx_filter_spec) -
1740 offsetof(struct efx_filter_spec, outer_vid)) == 0;
1741}
1742
1743static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
1744{
1745 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
1746 return jhash2((const u32 *)&spec->outer_vid,
1747 (sizeof(struct efx_filter_spec) -
1748 offsetof(struct efx_filter_spec, outer_vid)) / 4,
1749 0);
1750 /* XXX should we randomise the initval? */
1751}
1752
1753/* Decide whether a filter should be exclusive or else should allow
1754 * delivery to additional recipients. Currently we decide that
1755 * filters for specific local unicast MAC and IP addresses are
1756 * exclusive.
1757 */
1758static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
1759{
1760 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
1761 !is_multicast_ether_addr(spec->loc_mac))
1762 return true;
1763
1764 if ((spec->match_flags &
1765 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
1766 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
1767 if (spec->ether_type == htons(ETH_P_IP) &&
1768 !ipv4_is_multicast(spec->loc_host[0]))
1769 return true;
1770 if (spec->ether_type == htons(ETH_P_IPV6) &&
1771 ((const u8 *)spec->loc_host)[0] != 0xff)
1772 return true;
1773 }
1774
1775 return false;
1776}
1777
1778static struct efx_filter_spec *
1779efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
1780 unsigned int filter_idx)
1781{
1782 return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
1783 ~EFX_EF10_FILTER_FLAGS);
1784}
1785
1786static unsigned int
1787efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
1788 unsigned int filter_idx)
1789{
1790 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
1791}
1792
1793static void
1794efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
1795 unsigned int filter_idx,
1796 const struct efx_filter_spec *spec,
1797 unsigned int flags)
1798{
1799 table->entry[filter_idx].spec = (unsigned long)spec | flags;
1800}
1801
1802static void efx_ef10_filter_push_prep(struct efx_nic *efx,
1803 const struct efx_filter_spec *spec,
1804 efx_dword_t *inbuf, u64 handle,
1805 bool replacing)
1806{
1807 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1808
1809 memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
1810
1811 if (replacing) {
1812 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
1813 MC_CMD_FILTER_OP_IN_OP_REPLACE);
1814 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
1815 } else {
1816 u32 match_fields = 0;
1817
1818 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
1819 efx_ef10_filter_is_exclusive(spec) ?
1820 MC_CMD_FILTER_OP_IN_OP_INSERT :
1821 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
1822
1823 /* Convert match flags and values. Unlike almost
1824 * everything else in MCDI, these fields are in
1825 * network byte order.
1826 */
1827 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
1828 match_fields |=
1829 is_multicast_ether_addr(spec->loc_mac) ?
1830 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
1831 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
1832#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
1833 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
1834 match_fields |= \
1835 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
1836 mcdi_field ## _LBN; \
1837 BUILD_BUG_ON( \
1838 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
1839 sizeof(spec->gen_field)); \
1840 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
1841 &spec->gen_field, sizeof(spec->gen_field)); \
1842 }
1843 COPY_FIELD(REM_HOST, rem_host, SRC_IP);
1844 COPY_FIELD(LOC_HOST, loc_host, DST_IP);
1845 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
1846 COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
1847 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
1848 COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
1849 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
1850 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
1851 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
1852 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
1853#undef COPY_FIELD
1854 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
1855 match_fields);
1856 }
1857
1858 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1859 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
1860 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
1861 MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
1862 MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
1863 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
1864 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
1865 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id);
1866 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
1867 (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
1868 MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
1869 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
1870 if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
1871 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
1872 spec->rss_context !=
1873 EFX_FILTER_RSS_CONTEXT_DEFAULT ?
1874 spec->rss_context : nic_data->rx_rss_context);
1875}
1876
1877static int efx_ef10_filter_push(struct efx_nic *efx,
1878 const struct efx_filter_spec *spec,
1879 u64 *handle, bool replacing)
1880{
1881 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
1882 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
1883 int rc;
1884
1885 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
1886 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
1887 outbuf, sizeof(outbuf), NULL);
1888 if (rc == 0)
1889 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
1890 return rc;
1891}
1892
1893static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
1894 enum efx_filter_match_flags match_flags)
1895{
1896 unsigned int match_pri;
1897
1898 for (match_pri = 0;
1899 match_pri < table->rx_match_count;
1900 match_pri++)
1901 if (table->rx_match_flags[match_pri] == match_flags)
1902 return match_pri;
1903
1904 return -EPROTONOSUPPORT;
1905}
1906
1907static s32 efx_ef10_filter_insert(struct efx_nic *efx,
1908 struct efx_filter_spec *spec,
1909 bool replace_equal)
1910{
1911 struct efx_ef10_filter_table *table = efx->filter_state;
1912 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
1913 struct efx_filter_spec *saved_spec;
1914 unsigned int match_pri, hash;
1915 unsigned int priv_flags;
1916 bool replacing = false;
1917 int ins_index = -1;
1918 DEFINE_WAIT(wait);
1919 bool is_mc_recip;
1920 s32 rc;
1921
1922 /* For now, only support RX filters */
1923 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
1924 EFX_FILTER_FLAG_RX)
1925 return -EINVAL;
1926
1927 rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
1928 if (rc < 0)
1929 return rc;
1930 match_pri = rc;
1931
1932 hash = efx_ef10_filter_hash(spec);
1933 is_mc_recip = efx_filter_is_mc_recipient(spec);
1934 if (is_mc_recip)
1935 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
1936
1937 /* Find any existing filters with the same match tuple or
1938 * else a free slot to insert at. If any of them are busy,
1939 * we have to wait and retry.
1940 */
1941 for (;;) {
1942 unsigned int depth = 1;
1943 unsigned int i;
1944
1945 spin_lock_bh(&efx->filter_lock);
1946
1947 for (;;) {
1948 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
1949 saved_spec = efx_ef10_filter_entry_spec(table, i);
1950
1951 if (!saved_spec) {
1952 if (ins_index < 0)
1953 ins_index = i;
1954 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
1955 if (table->entry[i].spec &
1956 EFX_EF10_FILTER_FLAG_BUSY)
1957 break;
1958 if (spec->priority < saved_spec->priority &&
1959 !(saved_spec->priority ==
1960 EFX_FILTER_PRI_REQUIRED &&
1961 saved_spec->flags &
1962 EFX_FILTER_FLAG_RX_STACK)) {
1963 rc = -EPERM;
1964 goto out_unlock;
1965 }
1966 if (!is_mc_recip) {
1967 /* This is the only one */
1968 if (spec->priority ==
1969 saved_spec->priority &&
1970 !replace_equal) {
1971 rc = -EEXIST;
1972 goto out_unlock;
1973 }
1974 ins_index = i;
1975 goto found;
1976 } else if (spec->priority >
1977 saved_spec->priority ||
1978 (spec->priority ==
1979 saved_spec->priority &&
1980 replace_equal)) {
1981 if (ins_index < 0)
1982 ins_index = i;
1983 else
1984 __set_bit(depth, mc_rem_map);
1985 }
1986 }
1987
1988 /* Once we reach the maximum search depth, use
1989 * the first suitable slot or return -EBUSY if
1990 * there was none
1991 */
1992 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
1993 if (ins_index < 0) {
1994 rc = -EBUSY;
1995 goto out_unlock;
1996 }
1997 goto found;
1998 }
1999
2000 ++depth;
2001 }
2002
2003 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2004 spin_unlock_bh(&efx->filter_lock);
2005 schedule();
2006 }
2007
2008found:
2009 /* Create a software table entry if necessary, and mark it
2010 * busy. We might yet fail to insert, but any attempt to
2011 * insert a conflicting filter while we're waiting for the
2012 * firmware must find the busy entry.
2013 */
2014 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2015 if (saved_spec) {
2016 if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
2017 /* Just make sure it won't be removed */
2018 saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
2019 table->entry[ins_index].spec &=
2020 ~EFX_EF10_FILTER_FLAG_STACK_OLD;
2021 rc = ins_index;
2022 goto out_unlock;
2023 }
2024 replacing = true;
2025 priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
2026 } else {
2027 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2028 if (!saved_spec) {
2029 rc = -ENOMEM;
2030 goto out_unlock;
2031 }
2032 *saved_spec = *spec;
2033 priv_flags = 0;
2034 }
2035 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2036 priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
2037
2038 /* Mark lower-priority multicast recipients busy prior to removal */
2039 if (is_mc_recip) {
2040 unsigned int depth, i;
2041
2042 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2043 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2044 if (test_bit(depth, mc_rem_map))
2045 table->entry[i].spec |=
2046 EFX_EF10_FILTER_FLAG_BUSY;
2047 }
2048 }
2049
2050 spin_unlock_bh(&efx->filter_lock);
2051
2052 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
2053 replacing);
2054
2055 /* Finalise the software table entry */
2056 spin_lock_bh(&efx->filter_lock);
2057 if (rc == 0) {
2058 if (replacing) {
2059 /* Update the fields that may differ */
2060 saved_spec->priority = spec->priority;
2061 saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK;
2062 saved_spec->flags |= spec->flags;
2063 saved_spec->rss_context = spec->rss_context;
2064 saved_spec->dmaq_id = spec->dmaq_id;
2065 }
2066 } else if (!replacing) {
2067 kfree(saved_spec);
2068 saved_spec = NULL;
2069 }
2070 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
2071
2072 /* Remove and finalise entries for lower-priority multicast
2073 * recipients
2074 */
2075 if (is_mc_recip) {
2076 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2077 unsigned int depth, i;
2078
2079 memset(inbuf, 0, sizeof(inbuf));
2080
2081 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2082 if (!test_bit(depth, mc_rem_map))
2083 continue;
2084
2085 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2086 saved_spec = efx_ef10_filter_entry_spec(table, i);
2087 priv_flags = efx_ef10_filter_entry_flags(table, i);
2088
2089 if (rc == 0) {
2090 spin_unlock_bh(&efx->filter_lock);
2091 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2092 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2093 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2094 table->entry[i].handle);
2095 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2096 inbuf, sizeof(inbuf),
2097 NULL, 0, NULL);
2098 spin_lock_bh(&efx->filter_lock);
2099 }
2100
2101 if (rc == 0) {
2102 kfree(saved_spec);
2103 saved_spec = NULL;
2104 priv_flags = 0;
2105 } else {
2106 priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
2107 }
2108 efx_ef10_filter_set_entry(table, i, saved_spec,
2109 priv_flags);
2110 }
2111 }
2112
2113 /* If successful, return the inserted filter ID */
2114 if (rc == 0)
2115 rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
2116
2117 wake_up_all(&table->waitq);
2118out_unlock:
2119 spin_unlock_bh(&efx->filter_lock);
2120 finish_wait(&table->waitq, &wait);
2121 return rc;
2122}
2123
2124void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
2125{
2126 /* no need to do anything here on EF10 */
2127}
2128
2129/* Remove a filter.
2130 * If !stack_requested, remove by ID
2131 * If stack_requested, remove by index
2132 * Filter ID may come from userland and must be range-checked.
2133 */
2134static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
2135 enum efx_filter_priority priority,
2136 u32 filter_id, bool stack_requested)
2137{
2138 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2139 struct efx_ef10_filter_table *table = efx->filter_state;
2140 MCDI_DECLARE_BUF(inbuf,
2141 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2142 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2143 struct efx_filter_spec *spec;
2144 DEFINE_WAIT(wait);
2145 int rc;
2146
2147 /* Find the software table entry and mark it busy. Don't
2148 * remove it yet; any attempt to update while we're waiting
2149 * for the firmware must find the busy entry.
2150 */
2151 for (;;) {
2152 spin_lock_bh(&efx->filter_lock);
2153 if (!(table->entry[filter_idx].spec &
2154 EFX_EF10_FILTER_FLAG_BUSY))
2155 break;
2156 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2157 spin_unlock_bh(&efx->filter_lock);
2158 schedule();
2159 }
2160 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2161 if (!spec || spec->priority > priority ||
2162 (!stack_requested &&
2163 efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
2164 filter_id / HUNT_FILTER_TBL_ROWS)) {
2165 rc = -ENOENT;
2166 goto out_unlock;
2167 }
2168 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2169 spin_unlock_bh(&efx->filter_lock);
2170
2171 if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) {
2172 /* Reset steering of a stack-owned filter */
2173
2174 struct efx_filter_spec new_spec = *spec;
2175
2176 new_spec.priority = EFX_FILTER_PRI_REQUIRED;
2177 new_spec.flags = (EFX_FILTER_FLAG_RX |
2178 EFX_FILTER_FLAG_RX_RSS |
2179 EFX_FILTER_FLAG_RX_STACK);
2180 new_spec.dmaq_id = 0;
2181 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
2182 rc = efx_ef10_filter_push(efx, &new_spec,
2183 &table->entry[filter_idx].handle,
2184 true);
2185
2186 spin_lock_bh(&efx->filter_lock);
2187 if (rc == 0)
2188 *spec = new_spec;
2189 } else {
2190 /* Really remove the filter */
2191
2192 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2193 efx_ef10_filter_is_exclusive(spec) ?
2194 MC_CMD_FILTER_OP_IN_OP_REMOVE :
2195 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2196 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2197 table->entry[filter_idx].handle);
2198 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2199 inbuf, sizeof(inbuf), NULL, 0, NULL);
2200
2201 spin_lock_bh(&efx->filter_lock);
2202 if (rc == 0) {
2203 kfree(spec);
2204 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2205 }
2206 }
2207 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2208 wake_up_all(&table->waitq);
2209out_unlock:
2210 spin_unlock_bh(&efx->filter_lock);
2211 finish_wait(&table->waitq, &wait);
2212 return rc;
2213}
2214
2215static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
2216 enum efx_filter_priority priority,
2217 u32 filter_id)
2218{
2219 return efx_ef10_filter_remove_internal(efx, priority, filter_id, false);
2220}
2221
2222static int efx_ef10_filter_get_safe(struct efx_nic *efx,
2223 enum efx_filter_priority priority,
2224 u32 filter_id, struct efx_filter_spec *spec)
2225{
2226 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2227 struct efx_ef10_filter_table *table = efx->filter_state;
2228 const struct efx_filter_spec *saved_spec;
2229 int rc;
2230
2231 spin_lock_bh(&efx->filter_lock);
2232 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
2233 if (saved_spec && saved_spec->priority == priority &&
2234 efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
2235 filter_id / HUNT_FILTER_TBL_ROWS) {
2236 *spec = *saved_spec;
2237 rc = 0;
2238 } else {
2239 rc = -ENOENT;
2240 }
2241 spin_unlock_bh(&efx->filter_lock);
2242 return rc;
2243}
2244
2245static void efx_ef10_filter_clear_rx(struct efx_nic *efx,
2246 enum efx_filter_priority priority)
2247{
2248 /* TODO */
2249}
2250
2251static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
2252 enum efx_filter_priority priority)
2253{
2254 struct efx_ef10_filter_table *table = efx->filter_state;
2255 unsigned int filter_idx;
2256 s32 count = 0;
2257
2258 spin_lock_bh(&efx->filter_lock);
2259 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2260 if (table->entry[filter_idx].spec &&
2261 efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
2262 priority)
2263 ++count;
2264 }
2265 spin_unlock_bh(&efx->filter_lock);
2266 return count;
2267}
2268
2269static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
2270{
2271 struct efx_ef10_filter_table *table = efx->filter_state;
2272
2273 return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
2274}
2275
2276static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
2277 enum efx_filter_priority priority,
2278 u32 *buf, u32 size)
2279{
2280 struct efx_ef10_filter_table *table = efx->filter_state;
2281 struct efx_filter_spec *spec;
2282 unsigned int filter_idx;
2283 s32 count = 0;
2284
2285 spin_lock_bh(&efx->filter_lock);
2286 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2287 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2288 if (spec && spec->priority == priority) {
2289 if (count == size) {
2290 count = -EMSGSIZE;
2291 break;
2292 }
2293 buf[count++] = (efx_ef10_filter_rx_match_pri(
2294 table, spec->match_flags) *
2295 HUNT_FILTER_TBL_ROWS +
2296 filter_idx);
2297 }
2298 }
2299 spin_unlock_bh(&efx->filter_lock);
2300 return count;
2301}
2302
2303#ifdef CONFIG_RFS_ACCEL
2304
2305static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
2306
2307static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
2308 struct efx_filter_spec *spec)
2309{
2310 struct efx_ef10_filter_table *table = efx->filter_state;
2311 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2312 struct efx_filter_spec *saved_spec;
2313 unsigned int hash, i, depth = 1;
2314 bool replacing = false;
2315 int ins_index = -1;
2316 u64 cookie;
2317 s32 rc;
2318
2319 /* Must be an RX filter without RSS and not for a multicast
2320 * destination address (RFS only works for connected sockets).
2321 * These restrictions allow us to pass only a tiny amount of
2322 * data through to the completion function.
2323 */
2324 EFX_WARN_ON_PARANOID(spec->flags !=
2325 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
2326 EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
2327 EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
2328
2329 hash = efx_ef10_filter_hash(spec);
2330
2331 spin_lock_bh(&efx->filter_lock);
2332
2333 /* Find any existing filter with the same match tuple or else
2334 * a free slot to insert at. If an existing filter is busy,
2335 * we have to give up.
2336 */
2337 for (;;) {
2338 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2339 saved_spec = efx_ef10_filter_entry_spec(table, i);
2340
2341 if (!saved_spec) {
2342 if (ins_index < 0)
2343 ins_index = i;
2344 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
2345 if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
2346 rc = -EBUSY;
2347 goto fail_unlock;
2348 }
2349 EFX_WARN_ON_PARANOID(saved_spec->flags &
2350 EFX_FILTER_FLAG_RX_STACK);
2351 if (spec->priority < saved_spec->priority) {
2352 rc = -EPERM;
2353 goto fail_unlock;
2354 }
2355 ins_index = i;
2356 break;
2357 }
2358
2359 /* Once we reach the maximum search depth, use the
2360 * first suitable slot or return -EBUSY if there was
2361 * none
2362 */
2363 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
2364 if (ins_index < 0) {
2365 rc = -EBUSY;
2366 goto fail_unlock;
2367 }
2368 break;
2369 }
2370
2371 ++depth;
2372 }
2373
2374 /* Create a software table entry if necessary, and mark it
2375 * busy. We might yet fail to insert, but any attempt to
2376 * insert a conflicting filter while we're waiting for the
2377 * firmware must find the busy entry.
2378 */
2379 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2380 if (saved_spec) {
2381 replacing = true;
2382 } else {
2383 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2384 if (!saved_spec) {
2385 rc = -ENOMEM;
2386 goto fail_unlock;
2387 }
2388 *saved_spec = *spec;
2389 }
2390 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2391 EFX_EF10_FILTER_FLAG_BUSY);
2392
2393 spin_unlock_bh(&efx->filter_lock);
2394
2395 /* Pack up the variables needed on completion */
2396 cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
2397
2398 efx_ef10_filter_push_prep(efx, spec, inbuf,
2399 table->entry[ins_index].handle, replacing);
2400 efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2401 MC_CMD_FILTER_OP_OUT_LEN,
2402 efx_ef10_filter_rfs_insert_complete, cookie);
2403
2404 return ins_index;
2405
2406fail_unlock:
2407 spin_unlock_bh(&efx->filter_lock);
2408 return rc;
2409}
2410
2411static void
2412efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
2413 int rc, efx_dword_t *outbuf,
2414 size_t outlen_actual)
2415{
2416 struct efx_ef10_filter_table *table = efx->filter_state;
2417 unsigned int ins_index, dmaq_id;
2418 struct efx_filter_spec *spec;
2419 bool replacing;
2420
2421 /* Unpack the cookie */
2422 replacing = cookie >> 31;
2423 ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
2424 dmaq_id = cookie & 0xffff;
2425
2426 spin_lock_bh(&efx->filter_lock);
2427 spec = efx_ef10_filter_entry_spec(table, ins_index);
2428 if (rc == 0) {
2429 table->entry[ins_index].handle =
2430 MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
2431 if (replacing)
2432 spec->dmaq_id = dmaq_id;
2433 } else if (!replacing) {
2434 kfree(spec);
2435 spec = NULL;
2436 }
2437 efx_ef10_filter_set_entry(table, ins_index, spec, 0);
2438 spin_unlock_bh(&efx->filter_lock);
2439
2440 wake_up_all(&table->waitq);
2441}
2442
2443static void
2444efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2445 unsigned long filter_idx,
2446 int rc, efx_dword_t *outbuf,
2447 size_t outlen_actual);
2448
2449static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2450 unsigned int filter_idx)
2451{
2452 struct efx_ef10_filter_table *table = efx->filter_state;
2453 struct efx_filter_spec *spec =
2454 efx_ef10_filter_entry_spec(table, filter_idx);
2455 MCDI_DECLARE_BUF(inbuf,
2456 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2457 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2458
2459 if (!spec ||
2460 (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
2461 spec->priority != EFX_FILTER_PRI_HINT ||
2462 !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
2463 flow_id, filter_idx))
2464 return false;
2465
2466 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2467 MC_CMD_FILTER_OP_IN_OP_REMOVE);
2468 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2469 table->entry[filter_idx].handle);
2470 if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
2471 efx_ef10_filter_rfs_expire_complete, filter_idx))
2472 return false;
2473
2474 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2475 return true;
2476}
2477
2478static void
2479efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2480 unsigned long filter_idx,
2481 int rc, efx_dword_t *outbuf,
2482 size_t outlen_actual)
2483{
2484 struct efx_ef10_filter_table *table = efx->filter_state;
2485 struct efx_filter_spec *spec =
2486 efx_ef10_filter_entry_spec(table, filter_idx);
2487
2488 spin_lock_bh(&efx->filter_lock);
2489 if (rc == 0) {
2490 kfree(spec);
2491 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2492 }
2493 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2494 wake_up_all(&table->waitq);
2495 spin_unlock_bh(&efx->filter_lock);
2496}
2497
2498#endif /* CONFIG_RFS_ACCEL */
2499
2500static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
2501{
2502 int match_flags = 0;
2503
2504#define MAP_FLAG(gen_flag, mcdi_field) { \
2505 u32 old_mcdi_flags = mcdi_flags; \
2506 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
2507 mcdi_field ## _LBN); \
2508 if (mcdi_flags != old_mcdi_flags) \
2509 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
2510 }
2511 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
2512 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
2513 MAP_FLAG(REM_HOST, SRC_IP);
2514 MAP_FLAG(LOC_HOST, DST_IP);
2515 MAP_FLAG(REM_MAC, SRC_MAC);
2516 MAP_FLAG(REM_PORT, SRC_PORT);
2517 MAP_FLAG(LOC_MAC, DST_MAC);
2518 MAP_FLAG(LOC_PORT, DST_PORT);
2519 MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
2520 MAP_FLAG(INNER_VID, INNER_VLAN);
2521 MAP_FLAG(OUTER_VID, OUTER_VLAN);
2522 MAP_FLAG(IP_PROTO, IP_PROTO);
2523#undef MAP_FLAG
2524
2525 /* Did we map them all? */
2526 if (mcdi_flags)
2527 return -EINVAL;
2528
2529 return match_flags;
2530}
2531
2532static int efx_ef10_filter_table_probe(struct efx_nic *efx)
2533{
2534 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
2535 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
2536 unsigned int pd_match_pri, pd_match_count;
2537 struct efx_ef10_filter_table *table;
2538 size_t outlen;
2539 int rc;
2540
2541 table = kzalloc(sizeof(*table), GFP_KERNEL);
2542 if (!table)
2543 return -ENOMEM;
2544
2545 /* Find out which RX filter types are supported, and their priorities */
2546 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
2547 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
2548 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
2549 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
2550 &outlen);
2551 if (rc)
2552 goto fail;
2553 pd_match_count = MCDI_VAR_ARRAY_LEN(
2554 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
2555 table->rx_match_count = 0;
2556
2557 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
2558 u32 mcdi_flags =
2559 MCDI_ARRAY_DWORD(
2560 outbuf,
2561 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
2562 pd_match_pri);
2563 rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
2564 if (rc < 0) {
2565 netif_dbg(efx, probe, efx->net_dev,
2566 "%s: fw flags %#x pri %u not supported in driver\n",
2567 __func__, mcdi_flags, pd_match_pri);
2568 } else {
2569 netif_dbg(efx, probe, efx->net_dev,
2570 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
2571 __func__, mcdi_flags, pd_match_pri,
2572 rc, table->rx_match_count);
2573 table->rx_match_flags[table->rx_match_count++] = rc;
2574 }
2575 }
2576
2577 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
2578 if (!table->entry) {
2579 rc = -ENOMEM;
2580 goto fail;
2581 }
2582
2583 efx->filter_state = table;
2584 init_waitqueue_head(&table->waitq);
2585 return 0;
2586
2587fail:
2588 kfree(table);
2589 return rc;
2590}
2591
2592static void efx_ef10_filter_table_restore(struct efx_nic *efx)
2593{
2594 struct efx_ef10_filter_table *table = efx->filter_state;
2595 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2596 struct efx_filter_spec *spec;
2597 unsigned int filter_idx;
2598 bool failed = false;
2599 int rc;
2600
2601 if (!nic_data->must_restore_filters)
2602 return;
2603
2604 spin_lock_bh(&efx->filter_lock);
2605
2606 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2607 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2608 if (!spec)
2609 continue;
2610
2611 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2612 spin_unlock_bh(&efx->filter_lock);
2613
2614 rc = efx_ef10_filter_push(efx, spec,
2615 &table->entry[filter_idx].handle,
2616 false);
2617 if (rc)
2618 failed = true;
2619
2620 spin_lock_bh(&efx->filter_lock);
2621 if (rc) {
2622 kfree(spec);
2623 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2624 } else {
2625 table->entry[filter_idx].spec &=
2626 ~EFX_EF10_FILTER_FLAG_BUSY;
2627 }
2628 }
2629
2630 spin_unlock_bh(&efx->filter_lock);
2631
2632 if (failed)
2633 netif_err(efx, hw, efx->net_dev,
2634 "unable to restore all filters\n");
2635 else
2636 nic_data->must_restore_filters = false;
2637}
2638
2639static void efx_ef10_filter_table_remove(struct efx_nic *efx)
2640{
2641 struct efx_ef10_filter_table *table = efx->filter_state;
2642 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2643 struct efx_filter_spec *spec;
2644 unsigned int filter_idx;
2645 int rc;
2646
2647 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2648 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2649 if (!spec)
2650 continue;
2651
2652 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2653 efx_ef10_filter_is_exclusive(spec) ?
2654 MC_CMD_FILTER_OP_IN_OP_REMOVE :
2655 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2656 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2657 table->entry[filter_idx].handle);
2658 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2659 NULL, 0, NULL);
2660
2661 WARN_ON(rc != 0);
2662 kfree(spec);
2663 }
2664
2665 vfree(table->entry);
2666 kfree(table);
2667}
2668
2669static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
2670{
2671 struct efx_ef10_filter_table *table = efx->filter_state;
2672 struct net_device *net_dev = efx->net_dev;
2673 struct efx_filter_spec spec;
2674 bool remove_failed = false;
2675 struct netdev_hw_addr *uc;
2676 struct netdev_hw_addr *mc;
2677 unsigned int filter_idx;
2678 int i, n, rc;
2679
2680 if (!efx_dev_registered(efx))
2681 return;
2682
2683 /* Mark old filters that may need to be removed */
2684 spin_lock_bh(&efx->filter_lock);
2685 n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count;
2686 for (i = 0; i < n; i++) {
2687 filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
2688 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
2689 }
2690 n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count;
2691 for (i = 0; i < n; i++) {
2692 filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
2693 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
2694 }
2695 spin_unlock_bh(&efx->filter_lock);
2696
2697 /* Copy/convert the address lists; add the primary station
2698 * address and broadcast address
2699 */
2700 netif_addr_lock_bh(net_dev);
2701 if (net_dev->flags & IFF_PROMISC ||
2702 netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) {
2703 table->stack_uc_count = -1;
2704 } else {
2705 table->stack_uc_count = 1 + netdev_uc_count(net_dev);
2706 memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr,
2707 ETH_ALEN);
2708 i = 1;
2709 netdev_for_each_uc_addr(uc, net_dev) {
2710 memcpy(table->stack_uc_list[i].addr,
2711 uc->addr, ETH_ALEN);
2712 i++;
2713 }
2714 }
2715 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
2716 netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) {
2717 table->stack_mc_count = -1;
2718 } else {
2719 table->stack_mc_count = 1 + netdev_mc_count(net_dev);
2720 eth_broadcast_addr(table->stack_mc_list[0].addr);
2721 i = 1;
2722 netdev_for_each_mc_addr(mc, net_dev) {
2723 memcpy(table->stack_mc_list[i].addr,
2724 mc->addr, ETH_ALEN);
2725 i++;
2726 }
2727 }
2728 netif_addr_unlock_bh(net_dev);
2729
2730 /* Insert/renew unicast filters */
2731 if (table->stack_uc_count >= 0) {
2732 for (i = 0; i < table->stack_uc_count; i++) {
2733 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2734 EFX_FILTER_FLAG_RX_RSS |
2735 EFX_FILTER_FLAG_RX_STACK,
2736 0);
2737 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
2738 table->stack_uc_list[i].addr);
2739 rc = efx_ef10_filter_insert(efx, &spec, true);
2740 if (rc < 0) {
2741 /* Fall back to unicast-promisc */
2742 while (i--)
2743 efx_ef10_filter_remove_safe(
2744 efx, EFX_FILTER_PRI_REQUIRED,
2745 table->stack_uc_list[i].id);
2746 table->stack_uc_count = -1;
2747 break;
2748 }
2749 table->stack_uc_list[i].id = rc;
2750 }
2751 }
2752 if (table->stack_uc_count < 0) {
2753 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2754 EFX_FILTER_FLAG_RX_RSS |
2755 EFX_FILTER_FLAG_RX_STACK,
2756 0);
2757 efx_filter_set_uc_def(&spec);
2758 rc = efx_ef10_filter_insert(efx, &spec, true);
2759 if (rc < 0) {
2760 WARN_ON(1);
2761 table->stack_uc_count = 0;
2762 } else {
2763 table->stack_uc_list[0].id = rc;
2764 }
2765 }
2766
2767 /* Insert/renew multicast filters */
2768 if (table->stack_mc_count >= 0) {
2769 for (i = 0; i < table->stack_mc_count; i++) {
2770 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2771 EFX_FILTER_FLAG_RX_RSS |
2772 EFX_FILTER_FLAG_RX_STACK,
2773 0);
2774 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
2775 table->stack_mc_list[i].addr);
2776 rc = efx_ef10_filter_insert(efx, &spec, true);
2777 if (rc < 0) {
2778 /* Fall back to multicast-promisc */
2779 while (i--)
2780 efx_ef10_filter_remove_safe(
2781 efx, EFX_FILTER_PRI_REQUIRED,
2782 table->stack_mc_list[i].id);
2783 table->stack_mc_count = -1;
2784 break;
2785 }
2786 table->stack_mc_list[i].id = rc;
2787 }
2788 }
2789 if (table->stack_mc_count < 0) {
2790 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2791 EFX_FILTER_FLAG_RX_RSS |
2792 EFX_FILTER_FLAG_RX_STACK,
2793 0);
2794 efx_filter_set_mc_def(&spec);
2795 rc = efx_ef10_filter_insert(efx, &spec, true);
2796 if (rc < 0) {
2797 WARN_ON(1);
2798 table->stack_mc_count = 0;
2799 } else {
2800 table->stack_mc_list[0].id = rc;
2801 }
2802 }
2803
2804 /* Remove filters that weren't renewed. Since nothing else
2805 * changes the STACK_OLD flag or removes these filters, we
2806 * don't need to hold the filter_lock while scanning for
2807 * these filters.
2808 */
2809 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
2810 if (ACCESS_ONCE(table->entry[i].spec) &
2811 EFX_EF10_FILTER_FLAG_STACK_OLD) {
2812 if (efx_ef10_filter_remove_internal(efx,
2813 EFX_FILTER_PRI_REQUIRED,
2814 i, true) < 0)
2815 remove_failed = true;
2816 }
2817 }
2818 WARN_ON(remove_failed);
2819}
2820
2821static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
2822{
2823 efx_ef10_filter_sync_rx_mode(efx);
2824
2825 return efx_mcdi_set_mac(efx);
2826}
2827
2828#ifdef CONFIG_SFC_MTD
2829
2830struct efx_ef10_nvram_type_info {
2831 u16 type, type_mask;
2832 u8 port;
2833 const char *name;
2834};
2835
2836static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
2837 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
2838 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
2839 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
2840 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
2841 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
2842 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
2843 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
2844 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
2845 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
2846 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
2847};
2848
2849static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
2850 struct efx_mcdi_mtd_partition *part,
2851 unsigned int type)
2852{
2853 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
2854 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
2855 const struct efx_ef10_nvram_type_info *info;
2856 size_t size, erase_size, outlen;
2857 bool protected;
2858 int rc;
2859
2860 for (info = efx_ef10_nvram_types; ; info++) {
2861 if (info ==
2862 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
2863 return -ENODEV;
2864 if ((type & ~info->type_mask) == info->type)
2865 break;
2866 }
2867 if (info->port != efx_port_num(efx))
2868 return -ENODEV;
2869
2870 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
2871 if (rc)
2872 return rc;
2873 if (protected)
2874 return -ENODEV; /* hide it */
2875
2876 part->nvram_type = type;
2877
2878 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
2879 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
2880 outbuf, sizeof(outbuf), &outlen);
2881 if (rc)
2882 return rc;
2883 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
2884 return -EIO;
2885 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
2886 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
2887 part->fw_subtype = MCDI_DWORD(outbuf,
2888 NVRAM_METADATA_OUT_SUBTYPE);
2889
2890 part->common.dev_type_name = "EF10 NVRAM manager";
2891 part->common.type_name = info->name;
2892
2893 part->common.mtd.type = MTD_NORFLASH;
2894 part->common.mtd.flags = MTD_CAP_NORFLASH;
2895 part->common.mtd.size = size;
2896 part->common.mtd.erasesize = erase_size;
2897
2898 return 0;
2899}
2900
2901static int efx_ef10_mtd_probe(struct efx_nic *efx)
2902{
2903 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
2904 struct efx_mcdi_mtd_partition *parts;
2905 size_t outlen, n_parts_total, i, n_parts;
2906 unsigned int type;
2907 int rc;
2908
2909 ASSERT_RTNL();
2910
2911 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
2912 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
2913 outbuf, sizeof(outbuf), &outlen);
2914 if (rc)
2915 return rc;
2916 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
2917 return -EIO;
2918
2919 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
2920 if (n_parts_total >
2921 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
2922 return -EIO;
2923
2924 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
2925 if (!parts)
2926 return -ENOMEM;
2927
2928 n_parts = 0;
2929 for (i = 0; i < n_parts_total; i++) {
2930 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
2931 i);
2932 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
2933 if (rc == 0)
2934 n_parts++;
2935 else if (rc != -ENODEV)
2936 goto fail;
2937 }
2938
2939 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
2940fail:
2941 if (rc)
2942 kfree(parts);
2943 return rc;
2944}
2945
2946#endif /* CONFIG_SFC_MTD */
2947
2948static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
2949{
2950 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
2951}
2952
2953const struct efx_nic_type efx_hunt_a0_nic_type = {
2954 .mem_map_size = efx_ef10_mem_map_size,
2955 .probe = efx_ef10_probe,
2956 .remove = efx_ef10_remove,
2957 .dimension_resources = efx_ef10_dimension_resources,
2958 .init = efx_ef10_init_nic,
2959 .fini = efx_port_dummy_op_void,
2960 .map_reset_reason = efx_mcdi_map_reset_reason,
2961 .map_reset_flags = efx_ef10_map_reset_flags,
2962 .reset = efx_mcdi_reset,
2963 .probe_port = efx_mcdi_port_probe,
2964 .remove_port = efx_mcdi_port_remove,
2965 .fini_dmaq = efx_ef10_fini_dmaq,
2966 .describe_stats = efx_ef10_describe_stats,
2967 .update_stats = efx_ef10_update_stats,
2968 .start_stats = efx_mcdi_mac_start_stats,
2969 .stop_stats = efx_mcdi_mac_stop_stats,
2970 .set_id_led = efx_mcdi_set_id_led,
2971 .push_irq_moderation = efx_ef10_push_irq_moderation,
2972 .reconfigure_mac = efx_ef10_mac_reconfigure,
2973 .check_mac_fault = efx_mcdi_mac_check_fault,
2974 .reconfigure_port = efx_mcdi_port_reconfigure,
2975 .get_wol = efx_ef10_get_wol,
2976 .set_wol = efx_ef10_set_wol,
2977 .resume_wol = efx_port_dummy_op_void,
2978 /* TODO: test_chip */
2979 .test_nvram = efx_mcdi_nvram_test_all,
2980 .mcdi_request = efx_ef10_mcdi_request,
2981 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
2982 .mcdi_read_response = efx_ef10_mcdi_read_response,
2983 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
2984 .irq_enable_master = efx_port_dummy_op_void,
2985 .irq_test_generate = efx_ef10_irq_test_generate,
2986 .irq_disable_non_ev = efx_port_dummy_op_void,
2987 .irq_handle_msi = efx_ef10_msi_interrupt,
2988 .irq_handle_legacy = efx_ef10_legacy_interrupt,
2989 .tx_probe = efx_ef10_tx_probe,
2990 .tx_init = efx_ef10_tx_init,
2991 .tx_remove = efx_ef10_tx_remove,
2992 .tx_write = efx_ef10_tx_write,
2993 .rx_push_indir_table = efx_ef10_rx_push_indir_table,
2994 .rx_probe = efx_ef10_rx_probe,
2995 .rx_init = efx_ef10_rx_init,
2996 .rx_remove = efx_ef10_rx_remove,
2997 .rx_write = efx_ef10_rx_write,
2998 .rx_defer_refill = efx_ef10_rx_defer_refill,
2999 .ev_probe = efx_ef10_ev_probe,
3000 .ev_init = efx_ef10_ev_init,
3001 .ev_fini = efx_ef10_ev_fini,
3002 .ev_remove = efx_ef10_ev_remove,
3003 .ev_process = efx_ef10_ev_process,
3004 .ev_read_ack = efx_ef10_ev_read_ack,
3005 .ev_test_generate = efx_ef10_ev_test_generate,
3006 .filter_table_probe = efx_ef10_filter_table_probe,
3007 .filter_table_restore = efx_ef10_filter_table_restore,
3008 .filter_table_remove = efx_ef10_filter_table_remove,
3009 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
3010 .filter_insert = efx_ef10_filter_insert,
3011 .filter_remove_safe = efx_ef10_filter_remove_safe,
3012 .filter_get_safe = efx_ef10_filter_get_safe,
3013 .filter_clear_rx = efx_ef10_filter_clear_rx,
3014 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
3015 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
3016 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
3017#ifdef CONFIG_RFS_ACCEL
3018 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
3019 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
3020#endif
3021#ifdef CONFIG_SFC_MTD
3022 .mtd_probe = efx_ef10_mtd_probe,
3023 .mtd_rename = efx_mcdi_mtd_rename,
3024 .mtd_read = efx_mcdi_mtd_read,
3025 .mtd_erase = efx_mcdi_mtd_erase,
3026 .mtd_write = efx_mcdi_mtd_write,
3027 .mtd_sync = efx_mcdi_mtd_sync,
3028#endif
3029 .ptp_write_host_time = efx_ef10_ptp_write_host_time,
3030
3031 .revision = EFX_REV_HUNT_A0,
3032 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
3033 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
3034 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
3035 .can_rx_scatter = true,
3036 .always_rx_scatter = true,
3037 .max_interrupt_mode = EFX_INT_MODE_MSIX,
3038 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
3039 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3040 NETIF_F_RXHASH | NETIF_F_NTUPLE),
3041 .mcdi_max_ver = 2,
3042 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
3043};
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
new file mode 100644
index 000000000000..b3f4e3755fd9
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -0,0 +1,415 @@
1/****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_EF10_REGS_H
11#define EFX_EF10_REGS_H
12
13/* EF10 hardware architecture definitions have a name prefix following
14 * the format:
15 *
16 * E<type>_<min-rev><max-rev>_
17 *
18 * The following <type> strings are used:
19 *
20 * MMIO register Host memory structure
21 * -------------------------------------------------------------
22 * Address R
23 * Bitfield RF SF
24 * Enumerator FE SE
25 *
26 * <min-rev> is the first revision to which the definition applies:
27 *
28 * D: Huntington A0
29 *
30 * If the definition has been changed or removed in later revisions
31 * then <max-rev> is the last revision to which the definition applies;
32 * otherwise it is "Z".
33 */
34
35/**************************************************************************
36 *
37 * EF10 registers and descriptors
38 *
39 **************************************************************************
40 */
41
42/* BIU_HW_REV_ID_REG: */
43#define ER_DZ_BIU_HW_REV_ID 0x00000000
44#define ERF_DZ_HW_REV_ID_LBN 0
45#define ERF_DZ_HW_REV_ID_WIDTH 32
46
47/* BIU_MC_SFT_STATUS_REG: */
48#define ER_DZ_BIU_MC_SFT_STATUS 0x00000010
49#define ER_DZ_BIU_MC_SFT_STATUS_STEP 4
50#define ER_DZ_BIU_MC_SFT_STATUS_ROWS 8
51#define ERF_DZ_MC_SFT_STATUS_LBN 0
52#define ERF_DZ_MC_SFT_STATUS_WIDTH 32
53
54/* BIU_INT_ISR_REG: */
55#define ER_DZ_BIU_INT_ISR 0x00000090
56#define ERF_DZ_ISR_REG_LBN 0
57#define ERF_DZ_ISR_REG_WIDTH 32
58
59/* MC_DB_LWRD_REG: */
60#define ER_DZ_MC_DB_LWRD 0x00000200
61#define ERF_DZ_MC_DOORBELL_L_LBN 0
62#define ERF_DZ_MC_DOORBELL_L_WIDTH 32
63
64/* MC_DB_HWRD_REG: */
65#define ER_DZ_MC_DB_HWRD 0x00000204
66#define ERF_DZ_MC_DOORBELL_H_LBN 0
67#define ERF_DZ_MC_DOORBELL_H_WIDTH 32
68
69/* EVQ_RPTR_REG: */
70#define ER_DZ_EVQ_RPTR 0x00000400
71#define ER_DZ_EVQ_RPTR_STEP 8192
72#define ER_DZ_EVQ_RPTR_ROWS 2048
73#define ERF_DZ_EVQ_RPTR_VLD_LBN 15
74#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1
75#define ERF_DZ_EVQ_RPTR_LBN 0
76#define ERF_DZ_EVQ_RPTR_WIDTH 15
77
78/* EVQ_TMR_REG: */
79#define ER_DZ_EVQ_TMR 0x00000420
80#define ER_DZ_EVQ_TMR_STEP 8192
81#define ER_DZ_EVQ_TMR_ROWS 2048
82#define ERF_DZ_TC_TIMER_MODE_LBN 14
83#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
84#define ERF_DZ_TC_TIMER_VAL_LBN 0
85#define ERF_DZ_TC_TIMER_VAL_WIDTH 14
86
87/* RX_DESC_UPD_REG: */
88#define ER_DZ_RX_DESC_UPD 0x00000830
89#define ER_DZ_RX_DESC_UPD_STEP 8192
90#define ER_DZ_RX_DESC_UPD_ROWS 2048
91#define ERF_DZ_RX_DESC_WPTR_LBN 0
92#define ERF_DZ_RX_DESC_WPTR_WIDTH 12
93
94/* TX_DESC_UPD_REG: */
95#define ER_DZ_TX_DESC_UPD 0x00000a10
96#define ER_DZ_TX_DESC_UPD_STEP 8192
97#define ER_DZ_TX_DESC_UPD_ROWS 2048
98#define ERF_DZ_RSVD_LBN 76
99#define ERF_DZ_RSVD_WIDTH 20
100#define ERF_DZ_TX_DESC_WPTR_LBN 64
101#define ERF_DZ_TX_DESC_WPTR_WIDTH 12
102#define ERF_DZ_TX_DESC_HWORD_LBN 32
103#define ERF_DZ_TX_DESC_HWORD_WIDTH 32
104#define ERF_DZ_TX_DESC_LWORD_LBN 0
105#define ERF_DZ_TX_DESC_LWORD_WIDTH 32
106
107/* DRIVER_EV */
108#define ESF_DZ_DRV_CODE_LBN 60
109#define ESF_DZ_DRV_CODE_WIDTH 4
110#define ESF_DZ_DRV_SUB_CODE_LBN 56
111#define ESF_DZ_DRV_SUB_CODE_WIDTH 4
112#define ESE_DZ_DRV_TIMER_EV 3
113#define ESE_DZ_DRV_START_UP_EV 2
114#define ESE_DZ_DRV_WAKE_UP_EV 1
115#define ESF_DZ_DRV_SUB_DATA_LBN 0
116#define ESF_DZ_DRV_SUB_DATA_WIDTH 56
117#define ESF_DZ_DRV_EVQ_ID_LBN 0
118#define ESF_DZ_DRV_EVQ_ID_WIDTH 14
119#define ESF_DZ_DRV_TMR_ID_LBN 0
120#define ESF_DZ_DRV_TMR_ID_WIDTH 14
121
122/* EVENT_ENTRY */
123#define ESF_DZ_EV_CODE_LBN 60
124#define ESF_DZ_EV_CODE_WIDTH 4
125#define ESE_DZ_EV_CODE_MCDI_EV 12
126#define ESE_DZ_EV_CODE_DRIVER_EV 5
127#define ESE_DZ_EV_CODE_TX_EV 2
128#define ESE_DZ_EV_CODE_RX_EV 0
129#define ESE_DZ_OTHER other
130#define ESF_DZ_EV_DATA_LBN 0
131#define ESF_DZ_EV_DATA_WIDTH 60
132
133/* MC_EVENT */
134#define ESF_DZ_MC_CODE_LBN 60
135#define ESF_DZ_MC_CODE_WIDTH 4
136#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59
137#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1
138#define ESF_DZ_MC_DROP_EVENT_LBN 58
139#define ESF_DZ_MC_DROP_EVENT_WIDTH 1
140#define ESF_DZ_MC_SOFT_LBN 0
141#define ESF_DZ_MC_SOFT_WIDTH 58
142
143/* RX_EVENT */
144#define ESF_DZ_RX_CODE_LBN 60
145#define ESF_DZ_RX_CODE_WIDTH 4
146#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59
147#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
148#define ESF_DZ_RX_DROP_EVENT_LBN 58
149#define ESF_DZ_RX_DROP_EVENT_WIDTH 1
150#define ESF_DZ_RX_EV_RSVD2_LBN 54
151#define ESF_DZ_RX_EV_RSVD2_WIDTH 4
152#define ESF_DZ_RX_EV_SOFT2_LBN 52
153#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
154#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
155#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
156#define ESF_DZ_RX_L4_CLASS_LBN 45
157#define ESF_DZ_RX_L4_CLASS_WIDTH 3
158#define ESE_DZ_L4_CLASS_RSVD7 7
159#define ESE_DZ_L4_CLASS_RSVD6 6
160#define ESE_DZ_L4_CLASS_RSVD5 5
161#define ESE_DZ_L4_CLASS_RSVD4 4
162#define ESE_DZ_L4_CLASS_RSVD3 3
163#define ESE_DZ_L4_CLASS_UDP 2
164#define ESE_DZ_L4_CLASS_TCP 1
165#define ESE_DZ_L4_CLASS_UNKNOWN 0
166#define ESF_DZ_RX_L3_CLASS_LBN 42
167#define ESF_DZ_RX_L3_CLASS_WIDTH 3
168#define ESE_DZ_L3_CLASS_RSVD7 7
169#define ESE_DZ_L3_CLASS_IP6_FRAG 6
170#define ESE_DZ_L3_CLASS_ARP 5
171#define ESE_DZ_L3_CLASS_IP4_FRAG 4
172#define ESE_DZ_L3_CLASS_FCOE 3
173#define ESE_DZ_L3_CLASS_IP6 2
174#define ESE_DZ_L3_CLASS_IP4 1
175#define ESE_DZ_L3_CLASS_UNKNOWN 0
176#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39
177#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3
178#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7
179#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6
180#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5
181#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4
182#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3
183#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2
184#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1
185#define ESE_DZ_ETH_TAG_CLASS_NONE 0
186#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36
187#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3
188#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2
189#define ESE_DZ_ETH_BASE_CLASS_LLC 1
190#define ESE_DZ_ETH_BASE_CLASS_ETH2 0
191#define ESF_DZ_RX_MAC_CLASS_LBN 35
192#define ESF_DZ_RX_MAC_CLASS_WIDTH 1
193#define ESE_DZ_MAC_CLASS_MCAST 1
194#define ESE_DZ_MAC_CLASS_UCAST 0
195#define ESF_DZ_RX_EV_SOFT1_LBN 32
196#define ESF_DZ_RX_EV_SOFT1_WIDTH 3
197#define ESF_DZ_RX_EV_RSVD1_LBN 31
198#define ESF_DZ_RX_EV_RSVD1_WIDTH 1
199#define ESF_DZ_RX_ABORT_LBN 30
200#define ESF_DZ_RX_ABORT_WIDTH 1
201#define ESF_DZ_RX_ECC_ERR_LBN 29
202#define ESF_DZ_RX_ECC_ERR_WIDTH 1
203#define ESF_DZ_RX_CRC1_ERR_LBN 28
204#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
205#define ESF_DZ_RX_CRC0_ERR_LBN 27
206#define ESF_DZ_RX_CRC0_ERR_WIDTH 1
207#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26
208#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1
209#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25
210#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1
211#define ESF_DZ_RX_ECRC_ERR_LBN 24
212#define ESF_DZ_RX_ECRC_ERR_WIDTH 1
213#define ESF_DZ_RX_QLABEL_LBN 16
214#define ESF_DZ_RX_QLABEL_WIDTH 5
215#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15
216#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1
217#define ESF_DZ_RX_CONT_LBN 14
218#define ESF_DZ_RX_CONT_WIDTH 1
219#define ESF_DZ_RX_BYTES_LBN 0
220#define ESF_DZ_RX_BYTES_WIDTH 14
221
222/* RX_KER_DESC */
223#define ESF_DZ_RX_KER_RESERVED_LBN 62
224#define ESF_DZ_RX_KER_RESERVED_WIDTH 2
225#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48
226#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14
227#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
228#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
229
230/* RX_USER_DESC */
231#define ESF_DZ_RX_USR_RESERVED_LBN 62
232#define ESF_DZ_RX_USR_RESERVED_WIDTH 2
233#define ESF_DZ_RX_USR_BYTE_CNT_LBN 48
234#define ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14
235#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44
236#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4
237#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
238#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
239#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
240#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
241#define ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0
242#define ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44
243#define ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12
244#define ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32
245#define ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16
246#define ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28
247#define ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20
248#define ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24
249#define ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22
250#define ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22
251#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0
252#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
253#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0
254#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
255#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0
256#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
257#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0
258#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
259
260/* TX_CSUM_TSTAMP_DESC */
261#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
262#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
263#define ESF_DZ_TX_OPTION_TYPE_LBN 60
264#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
265#define ESE_DZ_TX_OPTION_DESC_TSO 7
266#define ESE_DZ_TX_OPTION_DESC_VLAN 6
267#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
268#define ESF_DZ_TX_TIMESTAMP_LBN 5
269#define ESF_DZ_TX_TIMESTAMP_WIDTH 1
270#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
271#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3
272#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5
273#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4
274#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3
275#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2
276#define ESE_DZ_TX_OPTION_CRC_FCOE 1
277#define ESE_DZ_TX_OPTION_CRC_OFF 0
278#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1
279#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1
280#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0
281#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1
282
283/* TX_EVENT */
284#define ESF_DZ_TX_CODE_LBN 60
285#define ESF_DZ_TX_CODE_WIDTH 4
286#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59
287#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
288#define ESF_DZ_TX_DROP_EVENT_LBN 58
289#define ESF_DZ_TX_DROP_EVENT_WIDTH 1
290#define ESF_DZ_TX_EV_RSVD_LBN 48
291#define ESF_DZ_TX_EV_RSVD_WIDTH 10
292#define ESF_DZ_TX_SOFT2_LBN 32
293#define ESF_DZ_TX_SOFT2_WIDTH 16
294#define ESF_DZ_TX_CAN_MERGE_LBN 31
295#define ESF_DZ_TX_CAN_MERGE_WIDTH 1
296#define ESF_DZ_TX_SOFT1_LBN 24
297#define ESF_DZ_TX_SOFT1_WIDTH 7
298#define ESF_DZ_TX_QLABEL_LBN 16
299#define ESF_DZ_TX_QLABEL_WIDTH 5
300#define ESF_DZ_TX_DESCR_INDX_LBN 0
301#define ESF_DZ_TX_DESCR_INDX_WIDTH 16
302
303/* TX_KER_DESC */
304#define ESF_DZ_TX_KER_TYPE_LBN 63
305#define ESF_DZ_TX_KER_TYPE_WIDTH 1
306#define ESF_DZ_TX_KER_CONT_LBN 62
307#define ESF_DZ_TX_KER_CONT_WIDTH 1
308#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48
309#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14
310#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0
311#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48
312
313/* TX_PIO_DESC */
314#define ESF_DZ_TX_PIO_TYPE_LBN 63
315#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
316#define ESF_DZ_TX_PIO_OPT_LBN 60
317#define ESF_DZ_TX_PIO_OPT_WIDTH 3
318#define ESF_DZ_TX_PIO_CONT_LBN 59
319#define ESF_DZ_TX_PIO_CONT_WIDTH 1
320#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
321#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12
322#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0
323#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12
324
325/* TX_TSO_DESC */
326#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
327#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
328#define ESF_DZ_TX_OPTION_TYPE_LBN 60
329#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
330#define ESE_DZ_TX_OPTION_DESC_TSO 7
331#define ESE_DZ_TX_OPTION_DESC_VLAN 6
332#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
333#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
334#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
335#define ESF_DZ_TX_TSO_IP_ID_LBN 32
336#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
337#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
338#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
339
340/* TX_USER_DESC */
341#define ESF_DZ_TX_USR_TYPE_LBN 63
342#define ESF_DZ_TX_USR_TYPE_WIDTH 1
343#define ESF_DZ_TX_USR_CONT_LBN 62
344#define ESF_DZ_TX_USR_CONT_WIDTH 1
345#define ESF_DZ_TX_USR_BYTE_CNT_LBN 48
346#define ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14
347#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44
348#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4
349#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
350#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
351#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
352#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
353#define ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0
354#define ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44
355#define ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12
356#define ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32
357#define ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16
358#define ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28
359#define ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20
360#define ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24
361#define ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22
362#define ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22
363#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0
364#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
365#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0
366#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
367#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0
368#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
369#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0
370#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
371/*************************************************************************/
372
373/* TX_DESC_UPD_REG: Transmit descriptor update register.
374 * We may write just one dword of these registers.
375 */
376#define ER_DZ_TX_DESC_UPD_DWORD (ER_DZ_TX_DESC_UPD + 2 * 4)
377#define ERF_DZ_TX_DESC_WPTR_DWORD_LBN (ERF_DZ_TX_DESC_WPTR_LBN - 2 * 32)
378#define ERF_DZ_TX_DESC_WPTR_DWORD_WIDTH ERF_DZ_TX_DESC_WPTR_WIDTH
379
380/* The workaround for bug 35388 requires multiplexing writes through
381 * the TX_DESC_UPD_DWORD address.
382 * TX_DESC_UPD: 0ppppppppppp (bit 11 lost)
383 * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits)
384 * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost)
385 */
386#define ER_DD_EVQ_INDIRECT ER_DZ_TX_DESC_UPD_DWORD
387#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8
388#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4
389#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8
390#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9
391#define ERF_DD_EVQ_IND_RPTR_LBN 0
392#define ERF_DD_EVQ_IND_RPTR_WIDTH 8
393#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10
394#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2
395#define EFE_DD_EVQ_IND_TIMER_FLAGS 3
396#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8
397#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2
398#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0
399#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8
400
401/* TX_PIOBUF
402 * PIO buffer aperture (paged)
403 */
404#define ER_DZ_TX_PIOBUF 4096
405#define ER_DZ_TX_PIOBUF_SIZE 2048
406
407/* RX packet prefix */
408#define ES_DZ_RX_PREFIX_HASH_OFST 0
409#define ES_DZ_RX_PREFIX_VLAN1_OFST 4
410#define ES_DZ_RX_PREFIX_VLAN2_OFST 6
411#define ES_DZ_RX_PREFIX_PKTLEN_OFST 8
412#define ES_DZ_RX_PREFIX_TSTAMP_OFST 10
413#define ES_DZ_RX_PREFIX_SIZE 14
414
415#endif /* EFX_EF10_REGS_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index c72968840f1a..07c9bc4c61bc 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2011 Solarflare Communications Inc. 4 * Copyright 2005-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -17,7 +17,6 @@
17#include <linux/ip.h> 17#include <linux/ip.h>
18#include <linux/tcp.h> 18#include <linux/tcp.h>
19#include <linux/in.h> 19#include <linux/in.h>
20#include <linux/crc32.h>
21#include <linux/ethtool.h> 20#include <linux/ethtool.h>
22#include <linux/topology.h> 21#include <linux/topology.h>
23#include <linux/gfp.h> 22#include <linux/gfp.h>
@@ -81,8 +80,7 @@ const char *const efx_reset_type_names[] = {
81 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", 80 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
82 [RESET_TYPE_INT_ERROR] = "INT_ERROR", 81 [RESET_TYPE_INT_ERROR] = "INT_ERROR",
83 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", 82 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
84 [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", 83 [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
85 [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
86 [RESET_TYPE_TX_SKIP] = "TX_SKIP", 84 [RESET_TYPE_TX_SKIP] = "TX_SKIP",
87 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", 85 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
88}; 86};
@@ -191,8 +189,8 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
191 * 189 *
192 *************************************************************************/ 190 *************************************************************************/
193 191
194static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq); 192static int efx_soft_enable_interrupts(struct efx_nic *efx);
195static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq); 193static void efx_soft_disable_interrupts(struct efx_nic *efx);
196static void efx_remove_channel(struct efx_channel *channel); 194static void efx_remove_channel(struct efx_channel *channel);
197static void efx_remove_channels(struct efx_nic *efx); 195static void efx_remove_channels(struct efx_nic *efx);
198static const struct efx_channel_type efx_default_channel_type; 196static const struct efx_channel_type efx_default_channel_type;
@@ -248,30 +246,12 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
248 efx_channel_get_rx_queue(channel); 246 efx_channel_get_rx_queue(channel);
249 247
250 efx_rx_flush_packet(channel); 248 efx_rx_flush_packet(channel);
251 if (rx_queue->enabled) 249 efx_fast_push_rx_descriptors(rx_queue);
252 efx_fast_push_rx_descriptors(rx_queue);
253 } 250 }
254 251
255 return spent; 252 return spent;
256} 253}
257 254
258/* Mark channel as finished processing
259 *
260 * Note that since we will not receive further interrupts for this
261 * channel before we finish processing and call the eventq_read_ack()
262 * method, there is no need to use the interrupt hold-off timers.
263 */
264static inline void efx_channel_processed(struct efx_channel *channel)
265{
266 /* The interrupt handler for this channel may set work_pending
267 * as soon as we acknowledge the events we've seen. Make sure
268 * it's cleared before then. */
269 channel->work_pending = false;
270 smp_wmb();
271
272 efx_nic_eventq_read_ack(channel);
273}
274
275/* NAPI poll handler 255/* NAPI poll handler
276 * 256 *
277 * NAPI guarantees serialisation of polls of the same device, which 257 * NAPI guarantees serialisation of polls of the same device, which
@@ -316,58 +296,16 @@ static int efx_poll(struct napi_struct *napi, int budget)
316 296
317 /* There is no race here; although napi_disable() will 297 /* There is no race here; although napi_disable() will
318 * only wait for napi_complete(), this isn't a problem 298 * only wait for napi_complete(), this isn't a problem
319 * since efx_channel_processed() will have no effect if 299 * since efx_nic_eventq_read_ack() will have no effect if
320 * interrupts have already been disabled. 300 * interrupts have already been disabled.
321 */ 301 */
322 napi_complete(napi); 302 napi_complete(napi);
323 efx_channel_processed(channel); 303 efx_nic_eventq_read_ack(channel);
324 } 304 }
325 305
326 return spent; 306 return spent;
327} 307}
328 308
329/* Process the eventq of the specified channel immediately on this CPU
330 *
331 * Disable hardware generated interrupts, wait for any existing
332 * processing to finish, then directly poll (and ack ) the eventq.
333 * Finally reenable NAPI and interrupts.
334 *
335 * This is for use only during a loopback self-test. It must not
336 * deliver any packets up the stack as this can result in deadlock.
337 */
338void efx_process_channel_now(struct efx_channel *channel)
339{
340 struct efx_nic *efx = channel->efx;
341
342 BUG_ON(channel->channel >= efx->n_channels);
343 BUG_ON(!channel->enabled);
344 BUG_ON(!efx->loopback_selftest);
345
346 /* Disable interrupts and wait for ISRs to complete */
347 efx_nic_disable_interrupts(efx);
348 if (efx->legacy_irq) {
349 synchronize_irq(efx->legacy_irq);
350 efx->legacy_irq_enabled = false;
351 }
352 if (channel->irq)
353 synchronize_irq(channel->irq);
354
355 /* Wait for any NAPI processing to complete */
356 napi_disable(&channel->napi_str);
357
358 /* Poll the channel */
359 efx_process_channel(channel, channel->eventq_mask + 1);
360
361 /* Ack the eventq. This may cause an interrupt to be generated
362 * when they are reenabled */
363 efx_channel_processed(channel);
364
365 napi_enable(&channel->napi_str);
366 if (efx->legacy_irq)
367 efx->legacy_irq_enabled = true;
368 efx_nic_enable_interrupts(efx);
369}
370
371/* Create event queue 309/* Create event queue
372 * Event queue memory allocations are done only once. If the channel 310 * Event queue memory allocations are done only once. If the channel
373 * is reset, the memory buffer will be reused; this guards against 311 * is reset, the memory buffer will be reused; this guards against
@@ -391,14 +329,23 @@ static int efx_probe_eventq(struct efx_channel *channel)
391} 329}
392 330
393/* Prepare channel's event queue */ 331/* Prepare channel's event queue */
394static void efx_init_eventq(struct efx_channel *channel) 332static int efx_init_eventq(struct efx_channel *channel)
395{ 333{
396 netif_dbg(channel->efx, drv, channel->efx->net_dev, 334 struct efx_nic *efx = channel->efx;
397 "chan %d init event queue\n", channel->channel); 335 int rc;
398 336
399 channel->eventq_read_ptr = 0; 337 EFX_WARN_ON_PARANOID(channel->eventq_init);
400 338
401 efx_nic_init_eventq(channel); 339 netif_dbg(efx, drv, efx->net_dev,
340 "chan %d init event queue\n", channel->channel);
341
342 rc = efx_nic_init_eventq(channel);
343 if (rc == 0) {
344 efx->type->push_irq_moderation(channel);
345 channel->eventq_read_ptr = 0;
346 channel->eventq_init = true;
347 }
348 return rc;
402} 349}
403 350
404/* Enable event queue processing and NAPI */ 351/* Enable event queue processing and NAPI */
@@ -407,11 +354,7 @@ static void efx_start_eventq(struct efx_channel *channel)
407 netif_dbg(channel->efx, ifup, channel->efx->net_dev, 354 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
408 "chan %d start event queue\n", channel->channel); 355 "chan %d start event queue\n", channel->channel);
409 356
410 /* The interrupt handler for this channel may set work_pending 357 /* Make sure the NAPI handler sees the enabled flag set */
411 * as soon as we enable it. Make sure it's cleared before
412 * then. Similarly, make sure it sees the enabled flag set.
413 */
414 channel->work_pending = false;
415 channel->enabled = true; 358 channel->enabled = true;
416 smp_wmb(); 359 smp_wmb();
417 360
@@ -431,10 +374,14 @@ static void efx_stop_eventq(struct efx_channel *channel)
431 374
432static void efx_fini_eventq(struct efx_channel *channel) 375static void efx_fini_eventq(struct efx_channel *channel)
433{ 376{
377 if (!channel->eventq_init)
378 return;
379
434 netif_dbg(channel->efx, drv, channel->efx->net_dev, 380 netif_dbg(channel->efx, drv, channel->efx->net_dev,
435 "chan %d fini event queue\n", channel->channel); 381 "chan %d fini event queue\n", channel->channel);
436 382
437 efx_nic_fini_eventq(channel); 383 efx_nic_fini_eventq(channel);
384 channel->eventq_init = false;
438} 385}
439 386
440static void efx_remove_eventq(struct efx_channel *channel) 387static void efx_remove_eventq(struct efx_channel *channel)
@@ -583,8 +530,8 @@ static void efx_set_channel_names(struct efx_nic *efx)
583 530
584 efx_for_each_channel(channel, efx) 531 efx_for_each_channel(channel, efx)
585 channel->type->get_name(channel, 532 channel->type->get_name(channel,
586 efx->channel_name[channel->channel], 533 efx->msi_context[channel->channel].name,
587 sizeof(efx->channel_name[0])); 534 sizeof(efx->msi_context[0].name));
588} 535}
589 536
590static int efx_probe_channels(struct efx_nic *efx) 537static int efx_probe_channels(struct efx_nic *efx)
@@ -634,13 +581,13 @@ static void efx_start_datapath(struct efx_nic *efx)
634 * support the current MTU, including padding for header 581 * support the current MTU, including padding for header
635 * alignment and overruns. 582 * alignment and overruns.
636 */ 583 */
637 efx->rx_dma_len = (efx->type->rx_buffer_hash_size + 584 efx->rx_dma_len = (efx->rx_prefix_size +
638 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 585 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
639 efx->type->rx_buffer_padding); 586 efx->type->rx_buffer_padding);
640 rx_buf_len = (sizeof(struct efx_rx_page_state) + 587 rx_buf_len = (sizeof(struct efx_rx_page_state) +
641 NET_IP_ALIGN + efx->rx_dma_len); 588 NET_IP_ALIGN + efx->rx_dma_len);
642 if (rx_buf_len <= PAGE_SIZE) { 589 if (rx_buf_len <= PAGE_SIZE) {
643 efx->rx_scatter = false; 590 efx->rx_scatter = efx->type->always_rx_scatter;
644 efx->rx_buffer_order = 0; 591 efx->rx_buffer_order = 0;
645 } else if (efx->type->can_rx_scatter) { 592 } else if (efx->type->can_rx_scatter) {
646 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES); 593 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
@@ -668,9 +615,9 @@ static void efx_start_datapath(struct efx_nic *efx)
668 efx->rx_dma_len, efx->rx_page_buf_step, 615 efx->rx_dma_len, efx->rx_page_buf_step,
669 efx->rx_bufs_per_page, efx->rx_pages_per_batch); 616 efx->rx_bufs_per_page, efx->rx_pages_per_batch);
670 617
671 /* RX filters also have scatter-enabled flags */ 618 /* RX filters may also have scatter-enabled flags */
672 if (efx->rx_scatter != old_rx_scatter) 619 if (efx->rx_scatter != old_rx_scatter)
673 efx_filter_update_rx_scatter(efx); 620 efx->type->filter_update_rx_scatter(efx);
674 621
675 /* We must keep at least one descriptor in a TX ring empty. 622 /* We must keep at least one descriptor in a TX ring empty.
676 * We could avoid this when the queue size does not exactly 623 * We could avoid this when the queue size does not exactly
@@ -684,11 +631,14 @@ static void efx_start_datapath(struct efx_nic *efx)
684 631
685 /* Initialise the channels */ 632 /* Initialise the channels */
686 efx_for_each_channel(channel, efx) { 633 efx_for_each_channel(channel, efx) {
687 efx_for_each_channel_tx_queue(tx_queue, channel) 634 efx_for_each_channel_tx_queue(tx_queue, channel) {
688 efx_init_tx_queue(tx_queue); 635 efx_init_tx_queue(tx_queue);
636 atomic_inc(&efx->active_queues);
637 }
689 638
690 efx_for_each_channel_rx_queue(rx_queue, channel) { 639 efx_for_each_channel_rx_queue(rx_queue, channel) {
691 efx_init_rx_queue(rx_queue); 640 efx_init_rx_queue(rx_queue);
641 atomic_inc(&efx->active_queues);
692 efx_nic_generate_fill_event(rx_queue); 642 efx_nic_generate_fill_event(rx_queue);
693 } 643 }
694 644
@@ -704,30 +654,15 @@ static void efx_stop_datapath(struct efx_nic *efx)
704 struct efx_channel *channel; 654 struct efx_channel *channel;
705 struct efx_tx_queue *tx_queue; 655 struct efx_tx_queue *tx_queue;
706 struct efx_rx_queue *rx_queue; 656 struct efx_rx_queue *rx_queue;
707 struct pci_dev *dev = efx->pci_dev;
708 int rc; 657 int rc;
709 658
710 EFX_ASSERT_RESET_SERIALISED(efx); 659 EFX_ASSERT_RESET_SERIALISED(efx);
711 BUG_ON(efx->port_enabled); 660 BUG_ON(efx->port_enabled);
712 661
713 /* Only perform flush if dma is enabled */ 662 /* Stop RX refill */
714 if (dev->is_busmaster && efx->state != STATE_RECOVERY) { 663 efx_for_each_channel(channel, efx) {
715 rc = efx_nic_flush_queues(efx); 664 efx_for_each_channel_rx_queue(rx_queue, channel)
716 665 rx_queue->refill_enabled = false;
717 if (rc && EFX_WORKAROUND_7803(efx)) {
718 /* Schedule a reset to recover from the flush failure. The
719 * descriptor caches reference memory we're about to free,
720 * but falcon_reconfigure_mac_wrapper() won't reconnect
721 * the MACs because of the pending reset. */
722 netif_err(efx, drv, efx->net_dev,
723 "Resetting to recover from flush failure\n");
724 efx_schedule_reset(efx, RESET_TYPE_ALL);
725 } else if (rc) {
726 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
727 } else {
728 netif_dbg(efx, drv, efx->net_dev,
729 "successfully flushed all queues\n");
730 }
731 } 666 }
732 667
733 efx_for_each_channel(channel, efx) { 668 efx_for_each_channel(channel, efx) {
@@ -741,7 +676,26 @@ static void efx_stop_datapath(struct efx_nic *efx)
741 efx_stop_eventq(channel); 676 efx_stop_eventq(channel);
742 efx_start_eventq(channel); 677 efx_start_eventq(channel);
743 } 678 }
679 }
680
681 rc = efx->type->fini_dmaq(efx);
682 if (rc && EFX_WORKAROUND_7803(efx)) {
683 /* Schedule a reset to recover from the flush failure. The
684 * descriptor caches reference memory we're about to free,
685 * but falcon_reconfigure_mac_wrapper() won't reconnect
686 * the MACs because of the pending reset.
687 */
688 netif_err(efx, drv, efx->net_dev,
689 "Resetting to recover from flush failure\n");
690 efx_schedule_reset(efx, RESET_TYPE_ALL);
691 } else if (rc) {
692 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
693 } else {
694 netif_dbg(efx, drv, efx->net_dev,
695 "successfully flushed all queues\n");
696 }
744 697
698 efx_for_each_channel(channel, efx) {
745 efx_for_each_channel_rx_queue(rx_queue, channel) 699 efx_for_each_channel_rx_queue(rx_queue, channel)
746 efx_fini_rx_queue(rx_queue); 700 efx_fini_rx_queue(rx_queue);
747 efx_for_each_possible_channel_tx_queue(tx_queue, channel) 701 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
@@ -779,7 +733,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
779 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; 733 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
780 u32 old_rxq_entries, old_txq_entries; 734 u32 old_rxq_entries, old_txq_entries;
781 unsigned i, next_buffer_table = 0; 735 unsigned i, next_buffer_table = 0;
782 int rc; 736 int rc, rc2;
783 737
784 rc = efx_check_disabled(efx); 738 rc = efx_check_disabled(efx);
785 if (rc) 739 if (rc)
@@ -809,7 +763,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
809 763
810 efx_device_detach_sync(efx); 764 efx_device_detach_sync(efx);
811 efx_stop_all(efx); 765 efx_stop_all(efx);
812 efx_stop_interrupts(efx, true); 766 efx_soft_disable_interrupts(efx);
813 767
814 /* Clone channels (where possible) */ 768 /* Clone channels (where possible) */
815 memset(other_channel, 0, sizeof(other_channel)); 769 memset(other_channel, 0, sizeof(other_channel));
@@ -859,9 +813,16 @@ out:
859 } 813 }
860 } 814 }
861 815
862 efx_start_interrupts(efx, true); 816 rc2 = efx_soft_enable_interrupts(efx);
863 efx_start_all(efx); 817 if (rc2) {
864 netif_device_attach(efx->net_dev); 818 rc = rc ? rc : rc2;
819 netif_err(efx, drv, efx->net_dev,
820 "unable to restart interrupts on channel reallocation\n");
821 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
822 } else {
823 efx_start_all(efx);
824 netif_device_attach(efx->net_dev);
825 }
865 return rc; 826 return rc;
866 827
867rollback: 828rollback:
@@ -931,10 +892,9 @@ void efx_link_status_changed(struct efx_nic *efx)
931 /* Status message for kernel log */ 892 /* Status message for kernel log */
932 if (link_state->up) 893 if (link_state->up)
933 netif_info(efx, link, efx->net_dev, 894 netif_info(efx, link, efx->net_dev,
934 "link up at %uMbps %s-duplex (MTU %d)%s\n", 895 "link up at %uMbps %s-duplex (MTU %d)\n",
935 link_state->speed, link_state->fd ? "full" : "half", 896 link_state->speed, link_state->fd ? "full" : "half",
936 efx->net_dev->mtu, 897 efx->net_dev->mtu);
937 (efx->promiscuous ? " [PROMISC]" : ""));
938 else 898 else
939 netif_info(efx, link, efx->net_dev, "link down\n"); 899 netif_info(efx, link, efx->net_dev, "link down\n");
940} 900}
@@ -983,10 +943,6 @@ int __efx_reconfigure_port(struct efx_nic *efx)
983 943
984 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 944 WARN_ON(!mutex_is_locked(&efx->mac_lock));
985 945
986 /* Serialise the promiscuous flag with efx_set_rx_mode. */
987 netif_addr_lock_bh(efx->net_dev);
988 netif_addr_unlock_bh(efx->net_dev);
989
990 /* Disable PHY transmit in mac level loopbacks */ 946 /* Disable PHY transmit in mac level loopbacks */
991 phy_mode = efx->phy_mode; 947 phy_mode = efx->phy_mode;
992 if (LOOPBACK_INTERNAL(efx)) 948 if (LOOPBACK_INTERNAL(efx))
@@ -1144,6 +1100,7 @@ static int efx_init_io(struct efx_nic *efx)
1144{ 1100{
1145 struct pci_dev *pci_dev = efx->pci_dev; 1101 struct pci_dev *pci_dev = efx->pci_dev;
1146 dma_addr_t dma_mask = efx->type->max_dma_mask; 1102 dma_addr_t dma_mask = efx->type->max_dma_mask;
1103 unsigned int mem_map_size = efx->type->mem_map_size(efx);
1147 int rc; 1104 int rc;
1148 1105
1149 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); 1106 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1196,20 +1153,18 @@ static int efx_init_io(struct efx_nic *efx)
1196 rc = -EIO; 1153 rc = -EIO;
1197 goto fail3; 1154 goto fail3;
1198 } 1155 }
1199 efx->membase = ioremap_nocache(efx->membase_phys, 1156 efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
1200 efx->type->mem_map_size);
1201 if (!efx->membase) { 1157 if (!efx->membase) {
1202 netif_err(efx, probe, efx->net_dev, 1158 netif_err(efx, probe, efx->net_dev,
1203 "could not map memory BAR at %llx+%x\n", 1159 "could not map memory BAR at %llx+%x\n",
1204 (unsigned long long)efx->membase_phys, 1160 (unsigned long long)efx->membase_phys, mem_map_size);
1205 efx->type->mem_map_size);
1206 rc = -ENOMEM; 1161 rc = -ENOMEM;
1207 goto fail4; 1162 goto fail4;
1208 } 1163 }
1209 netif_dbg(efx, probe, efx->net_dev, 1164 netif_dbg(efx, probe, efx->net_dev,
1210 "memory BAR at %llx+%x (virtual %p)\n", 1165 "memory BAR at %llx+%x (virtual %p)\n",
1211 (unsigned long long)efx->membase_phys, 1166 (unsigned long long)efx->membase_phys, mem_map_size,
1212 efx->type->mem_map_size, efx->membase); 1167 efx->membase);
1213 1168
1214 return 0; 1169 return 0;
1215 1170
@@ -1288,8 +1243,6 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1288 */ 1243 */
1289static int efx_probe_interrupts(struct efx_nic *efx) 1244static int efx_probe_interrupts(struct efx_nic *efx)
1290{ 1245{
1291 unsigned int max_channels =
1292 min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
1293 unsigned int extra_channels = 0; 1246 unsigned int extra_channels = 0;
1294 unsigned int i, j; 1247 unsigned int i, j;
1295 int rc; 1248 int rc;
@@ -1306,7 +1259,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1306 if (separate_tx_channels) 1259 if (separate_tx_channels)
1307 n_channels *= 2; 1260 n_channels *= 2;
1308 n_channels += extra_channels; 1261 n_channels += extra_channels;
1309 n_channels = min(n_channels, max_channels); 1262 n_channels = min(n_channels, efx->max_channels);
1310 1263
1311 for (i = 0; i < n_channels; i++) 1264 for (i = 0; i < n_channels; i++)
1312 xentries[i].entry = i; 1265 xentries[i].entry = i;
@@ -1392,31 +1345,42 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1392 return 0; 1345 return 0;
1393} 1346}
1394 1347
1395/* Enable interrupts, then probe and start the event queues */ 1348static int efx_soft_enable_interrupts(struct efx_nic *efx)
1396static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1397{ 1349{
1398 struct efx_channel *channel; 1350 struct efx_channel *channel, *end_channel;
1351 int rc;
1399 1352
1400 BUG_ON(efx->state == STATE_DISABLED); 1353 BUG_ON(efx->state == STATE_DISABLED);
1401 1354
1402 if (efx->eeh_disabled_legacy_irq) { 1355 efx->irq_soft_enabled = true;
1403 enable_irq(efx->legacy_irq); 1356 smp_wmb();
1404 efx->eeh_disabled_legacy_irq = false;
1405 }
1406 if (efx->legacy_irq)
1407 efx->legacy_irq_enabled = true;
1408 efx_nic_enable_interrupts(efx);
1409 1357
1410 efx_for_each_channel(channel, efx) { 1358 efx_for_each_channel(channel, efx) {
1411 if (!channel->type->keep_eventq || !may_keep_eventq) 1359 if (!channel->type->keep_eventq) {
1412 efx_init_eventq(channel); 1360 rc = efx_init_eventq(channel);
1361 if (rc)
1362 goto fail;
1363 }
1413 efx_start_eventq(channel); 1364 efx_start_eventq(channel);
1414 } 1365 }
1415 1366
1416 efx_mcdi_mode_event(efx); 1367 efx_mcdi_mode_event(efx);
1368
1369 return 0;
1370fail:
1371 end_channel = channel;
1372 efx_for_each_channel(channel, efx) {
1373 if (channel == end_channel)
1374 break;
1375 efx_stop_eventq(channel);
1376 if (!channel->type->keep_eventq)
1377 efx_fini_eventq(channel);
1378 }
1379
1380 return rc;
1417} 1381}
1418 1382
1419static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq) 1383static void efx_soft_disable_interrupts(struct efx_nic *efx)
1420{ 1384{
1421 struct efx_channel *channel; 1385 struct efx_channel *channel;
1422 1386
@@ -1425,20 +1389,79 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1425 1389
1426 efx_mcdi_mode_poll(efx); 1390 efx_mcdi_mode_poll(efx);
1427 1391
1428 efx_nic_disable_interrupts(efx); 1392 efx->irq_soft_enabled = false;
1429 if (efx->legacy_irq) { 1393 smp_wmb();
1394
1395 if (efx->legacy_irq)
1430 synchronize_irq(efx->legacy_irq); 1396 synchronize_irq(efx->legacy_irq);
1431 efx->legacy_irq_enabled = false;
1432 }
1433 1397
1434 efx_for_each_channel(channel, efx) { 1398 efx_for_each_channel(channel, efx) {
1435 if (channel->irq) 1399 if (channel->irq)
1436 synchronize_irq(channel->irq); 1400 synchronize_irq(channel->irq);
1437 1401
1438 efx_stop_eventq(channel); 1402 efx_stop_eventq(channel);
1439 if (!channel->type->keep_eventq || !may_keep_eventq) 1403 if (!channel->type->keep_eventq)
1404 efx_fini_eventq(channel);
1405 }
1406
1407 /* Flush the asynchronous MCDI request queue */
1408 efx_mcdi_flush_async(efx);
1409}
1410
1411static int efx_enable_interrupts(struct efx_nic *efx)
1412{
1413 struct efx_channel *channel, *end_channel;
1414 int rc;
1415
1416 BUG_ON(efx->state == STATE_DISABLED);
1417
1418 if (efx->eeh_disabled_legacy_irq) {
1419 enable_irq(efx->legacy_irq);
1420 efx->eeh_disabled_legacy_irq = false;
1421 }
1422
1423 efx->type->irq_enable_master(efx);
1424
1425 efx_for_each_channel(channel, efx) {
1426 if (channel->type->keep_eventq) {
1427 rc = efx_init_eventq(channel);
1428 if (rc)
1429 goto fail;
1430 }
1431 }
1432
1433 rc = efx_soft_enable_interrupts(efx);
1434 if (rc)
1435 goto fail;
1436
1437 return 0;
1438
1439fail:
1440 end_channel = channel;
1441 efx_for_each_channel(channel, efx) {
1442 if (channel == end_channel)
1443 break;
1444 if (channel->type->keep_eventq)
1440 efx_fini_eventq(channel); 1445 efx_fini_eventq(channel);
1441 } 1446 }
1447
1448 efx->type->irq_disable_non_ev(efx);
1449
1450 return rc;
1451}
1452
1453static void efx_disable_interrupts(struct efx_nic *efx)
1454{
1455 struct efx_channel *channel;
1456
1457 efx_soft_disable_interrupts(efx);
1458
1459 efx_for_each_channel(channel, efx) {
1460 if (channel->type->keep_eventq)
1461 efx_fini_eventq(channel);
1462 }
1463
1464 efx->type->irq_disable_non_ev(efx);
1442} 1465}
1443 1466
1444static void efx_remove_interrupts(struct efx_nic *efx) 1467static void efx_remove_interrupts(struct efx_nic *efx)
@@ -1495,9 +1518,11 @@ static int efx_probe_nic(struct efx_nic *efx)
1495 * in MSI-X interrupts. */ 1518 * in MSI-X interrupts. */
1496 rc = efx_probe_interrupts(efx); 1519 rc = efx_probe_interrupts(efx);
1497 if (rc) 1520 if (rc)
1498 goto fail; 1521 goto fail1;
1499 1522
1500 efx->type->dimension_resources(efx); 1523 rc = efx->type->dimension_resources(efx);
1524 if (rc)
1525 goto fail2;
1501 1526
1502 if (efx->n_channels > 1) 1527 if (efx->n_channels > 1)
1503 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); 1528 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1515,7 +1540,9 @@ static int efx_probe_nic(struct efx_nic *efx)
1515 1540
1516 return 0; 1541 return 0;
1517 1542
1518fail: 1543fail2:
1544 efx_remove_interrupts(efx);
1545fail1:
1519 efx->type->remove(efx); 1546 efx->type->remove(efx);
1520 return rc; 1547 return rc;
1521} 1548}
@@ -1528,6 +1555,44 @@ static void efx_remove_nic(struct efx_nic *efx)
1528 efx->type->remove(efx); 1555 efx->type->remove(efx);
1529} 1556}
1530 1557
1558static int efx_probe_filters(struct efx_nic *efx)
1559{
1560 int rc;
1561
1562 spin_lock_init(&efx->filter_lock);
1563
1564 rc = efx->type->filter_table_probe(efx);
1565 if (rc)
1566 return rc;
1567
1568#ifdef CONFIG_RFS_ACCEL
1569 if (efx->type->offload_features & NETIF_F_NTUPLE) {
1570 efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
1571 sizeof(*efx->rps_flow_id),
1572 GFP_KERNEL);
1573 if (!efx->rps_flow_id) {
1574 efx->type->filter_table_remove(efx);
1575 return -ENOMEM;
1576 }
1577 }
1578#endif
1579
1580 return 0;
1581}
1582
1583static void efx_remove_filters(struct efx_nic *efx)
1584{
1585#ifdef CONFIG_RFS_ACCEL
1586 kfree(efx->rps_flow_id);
1587#endif
1588 efx->type->filter_table_remove(efx);
1589}
1590
1591static void efx_restore_filters(struct efx_nic *efx)
1592{
1593 efx->type->filter_table_restore(efx);
1594}
1595
1531/************************************************************************** 1596/**************************************************************************
1532 * 1597 *
1533 * NIC startup/shutdown 1598 * NIC startup/shutdown
@@ -1917,34 +1982,9 @@ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
1917 struct rtnl_link_stats64 *stats) 1982 struct rtnl_link_stats64 *stats)
1918{ 1983{
1919 struct efx_nic *efx = netdev_priv(net_dev); 1984 struct efx_nic *efx = netdev_priv(net_dev);
1920 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1921 1985
1922 spin_lock_bh(&efx->stats_lock); 1986 spin_lock_bh(&efx->stats_lock);
1923 1987 efx->type->update_stats(efx, NULL, stats);
1924 efx->type->update_stats(efx);
1925
1926 stats->rx_packets = mac_stats->rx_packets;
1927 stats->tx_packets = mac_stats->tx_packets;
1928 stats->rx_bytes = mac_stats->rx_bytes;
1929 stats->tx_bytes = mac_stats->tx_bytes;
1930 stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
1931 stats->multicast = mac_stats->rx_multicast;
1932 stats->collisions = mac_stats->tx_collision;
1933 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1934 mac_stats->rx_length_error);
1935 stats->rx_crc_errors = mac_stats->rx_bad;
1936 stats->rx_frame_errors = mac_stats->rx_align_error;
1937 stats->rx_fifo_errors = mac_stats->rx_overflow;
1938 stats->rx_missed_errors = mac_stats->rx_missed;
1939 stats->tx_window_errors = mac_stats->tx_late_collision;
1940
1941 stats->rx_errors = (stats->rx_length_errors +
1942 stats->rx_crc_errors +
1943 stats->rx_frame_errors +
1944 mac_stats->rx_symbol_error);
1945 stats->tx_errors = (stats->tx_window_errors +
1946 mac_stats->tx_bad);
1947
1948 spin_unlock_bh(&efx->stats_lock); 1988 spin_unlock_bh(&efx->stats_lock);
1949 1989
1950 return stats; 1990 return stats;
@@ -2018,30 +2058,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
2018static void efx_set_rx_mode(struct net_device *net_dev) 2058static void efx_set_rx_mode(struct net_device *net_dev)
2019{ 2059{
2020 struct efx_nic *efx = netdev_priv(net_dev); 2060 struct efx_nic *efx = netdev_priv(net_dev);
2021 struct netdev_hw_addr *ha;
2022 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2023 u32 crc;
2024 int bit;
2025
2026 efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
2027
2028 /* Build multicast hash table */
2029 if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
2030 memset(mc_hash, 0xff, sizeof(*mc_hash));
2031 } else {
2032 memset(mc_hash, 0x00, sizeof(*mc_hash));
2033 netdev_for_each_mc_addr(ha, net_dev) {
2034 crc = ether_crc_le(ETH_ALEN, ha->addr);
2035 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
2036 __set_bit_le(bit, mc_hash);
2037 }
2038
2039 /* Broadcast packets go through the multicast hash filter.
2040 * ether_crc_le() of the broadcast address is 0xbe2612ff
2041 * so we always add bit 0xff to the mask.
2042 */
2043 __set_bit_le(0xff, mc_hash);
2044 }
2045 2061
2046 if (efx->port_enabled) 2062 if (efx->port_enabled)
2047 queue_work(efx->workqueue, &efx->mac_work); 2063 queue_work(efx->workqueue, &efx->mac_work);
@@ -2059,7 +2075,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
2059 return 0; 2075 return 0;
2060} 2076}
2061 2077
2062static const struct net_device_ops efx_netdev_ops = { 2078static const struct net_device_ops efx_farch_netdev_ops = {
2063 .ndo_open = efx_net_open, 2079 .ndo_open = efx_net_open,
2064 .ndo_stop = efx_net_stop, 2080 .ndo_stop = efx_net_stop,
2065 .ndo_get_stats64 = efx_net_stats, 2081 .ndo_get_stats64 = efx_net_stats,
@@ -2086,6 +2102,26 @@ static const struct net_device_ops efx_netdev_ops = {
2086#endif 2102#endif
2087}; 2103};
2088 2104
2105static const struct net_device_ops efx_ef10_netdev_ops = {
2106 .ndo_open = efx_net_open,
2107 .ndo_stop = efx_net_stop,
2108 .ndo_get_stats64 = efx_net_stats,
2109 .ndo_tx_timeout = efx_watchdog,
2110 .ndo_start_xmit = efx_hard_start_xmit,
2111 .ndo_validate_addr = eth_validate_addr,
2112 .ndo_do_ioctl = efx_ioctl,
2113 .ndo_change_mtu = efx_change_mtu,
2114 .ndo_set_mac_address = efx_set_mac_address,
2115 .ndo_set_rx_mode = efx_set_rx_mode,
2116 .ndo_set_features = efx_set_features,
2117#ifdef CONFIG_NET_POLL_CONTROLLER
2118 .ndo_poll_controller = efx_netpoll,
2119#endif
2120#ifdef CONFIG_RFS_ACCEL
2121 .ndo_rx_flow_steer = efx_filter_rfs,
2122#endif
2123};
2124
2089static void efx_update_name(struct efx_nic *efx) 2125static void efx_update_name(struct efx_nic *efx)
2090{ 2126{
2091 strcpy(efx->name, efx->net_dev->name); 2127 strcpy(efx->name, efx->net_dev->name);
@@ -2098,7 +2134,8 @@ static int efx_netdev_event(struct notifier_block *this,
2098{ 2134{
2099 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); 2135 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
2100 2136
2101 if (net_dev->netdev_ops == &efx_netdev_ops && 2137 if ((net_dev->netdev_ops == &efx_farch_netdev_ops ||
2138 net_dev->netdev_ops == &efx_ef10_netdev_ops) &&
2102 event == NETDEV_CHANGENAME) 2139 event == NETDEV_CHANGENAME)
2103 efx_update_name(netdev_priv(net_dev)); 2140 efx_update_name(netdev_priv(net_dev));
2104 2141
@@ -2125,7 +2162,12 @@ static int efx_register_netdev(struct efx_nic *efx)
2125 2162
2126 net_dev->watchdog_timeo = 5 * HZ; 2163 net_dev->watchdog_timeo = 5 * HZ;
2127 net_dev->irq = efx->pci_dev->irq; 2164 net_dev->irq = efx->pci_dev->irq;
2128 net_dev->netdev_ops = &efx_netdev_ops; 2165 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
2166 net_dev->netdev_ops = &efx_ef10_netdev_ops;
2167 net_dev->priv_flags |= IFF_UNICAST_FLT;
2168 } else {
2169 net_dev->netdev_ops = &efx_farch_netdev_ops;
2170 }
2129 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); 2171 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
2130 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; 2172 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2131 2173
@@ -2185,22 +2227,11 @@ fail_locked:
2185 2227
2186static void efx_unregister_netdev(struct efx_nic *efx) 2228static void efx_unregister_netdev(struct efx_nic *efx)
2187{ 2229{
2188 struct efx_channel *channel;
2189 struct efx_tx_queue *tx_queue;
2190
2191 if (!efx->net_dev) 2230 if (!efx->net_dev)
2192 return; 2231 return;
2193 2232
2194 BUG_ON(netdev_priv(efx->net_dev) != efx); 2233 BUG_ON(netdev_priv(efx->net_dev) != efx);
2195 2234
2196 /* Free up any skbs still remaining. This has to happen before
2197 * we try to unregister the netdev as running their destructors
2198 * may be needed to get the device ref. count to 0. */
2199 efx_for_each_channel(channel, efx) {
2200 efx_for_each_channel_tx_queue(tx_queue, channel)
2201 efx_release_tx_buffers(tx_queue);
2202 }
2203
2204 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 2235 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2205 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2236 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2206 2237
@@ -2223,7 +2254,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2223 EFX_ASSERT_RESET_SERIALISED(efx); 2254 EFX_ASSERT_RESET_SERIALISED(efx);
2224 2255
2225 efx_stop_all(efx); 2256 efx_stop_all(efx);
2226 efx_stop_interrupts(efx, false); 2257 efx_disable_interrupts(efx);
2227 2258
2228 mutex_lock(&efx->mac_lock); 2259 mutex_lock(&efx->mac_lock);
2229 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) 2260 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
@@ -2260,9 +2291,9 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2260 "could not restore PHY settings\n"); 2291 "could not restore PHY settings\n");
2261 } 2292 }
2262 2293
2263 efx->type->reconfigure_mac(efx); 2294 rc = efx_enable_interrupts(efx);
2264 2295 if (rc)
2265 efx_start_interrupts(efx, false); 2296 goto fail;
2266 efx_restore_filters(efx); 2297 efx_restore_filters(efx);
2267 efx_sriov_reset(efx); 2298 efx_sriov_reset(efx);
2268 2299
@@ -2458,6 +2489,8 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
2458 .driver_data = (unsigned long) &siena_a0_nic_type}, 2489 .driver_data = (unsigned long) &siena_a0_nic_type},
2459 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */ 2490 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
2460 .driver_data = (unsigned long) &siena_a0_nic_type}, 2491 .driver_data = (unsigned long) &siena_a0_nic_type},
2492 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
2493 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2461 {0} /* end of list */ 2494 {0} /* end of list */
2462}; 2495};
2463 2496
@@ -2516,6 +2549,9 @@ static int efx_init_struct(struct efx_nic *efx,
2516 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 2549 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2517 2550
2518 efx->net_dev = net_dev; 2551 efx->net_dev = net_dev;
2552 efx->rx_prefix_size = efx->type->rx_prefix_size;
2553 efx->rx_packet_hash_offset =
2554 efx->type->rx_hash_offset - efx->type->rx_prefix_size;
2519 spin_lock_init(&efx->stats_lock); 2555 spin_lock_init(&efx->stats_lock);
2520 mutex_init(&efx->mac_lock); 2556 mutex_init(&efx->mac_lock);
2521 efx->phy_op = &efx_dummy_phy_operations; 2557 efx->phy_op = &efx_dummy_phy_operations;
@@ -2527,10 +2563,10 @@ static int efx_init_struct(struct efx_nic *efx,
2527 efx->channel[i] = efx_alloc_channel(efx, i, NULL); 2563 efx->channel[i] = efx_alloc_channel(efx, i, NULL);
2528 if (!efx->channel[i]) 2564 if (!efx->channel[i])
2529 goto fail; 2565 goto fail;
2566 efx->msi_context[i].efx = efx;
2567 efx->msi_context[i].index = i;
2530 } 2568 }
2531 2569
2532 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
2533
2534 /* Higher numbered interrupt modes are less capable! */ 2570 /* Higher numbered interrupt modes are less capable! */
2535 efx->interrupt_mode = max(efx->type->max_interrupt_mode, 2571 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
2536 interrupt_mode); 2572 interrupt_mode);
@@ -2579,7 +2615,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
2579 BUG_ON(efx->state == STATE_READY); 2615 BUG_ON(efx->state == STATE_READY);
2580 cancel_work_sync(&efx->reset_work); 2616 cancel_work_sync(&efx->reset_work);
2581 2617
2582 efx_stop_interrupts(efx, false); 2618 efx_disable_interrupts(efx);
2583 efx_nic_fini_interrupt(efx); 2619 efx_nic_fini_interrupt(efx);
2584 efx_fini_port(efx); 2620 efx_fini_port(efx);
2585 efx->type->fini(efx); 2621 efx->type->fini(efx);
@@ -2601,7 +2637,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2601 /* Mark the NIC as fini, then stop the interface */ 2637 /* Mark the NIC as fini, then stop the interface */
2602 rtnl_lock(); 2638 rtnl_lock();
2603 dev_close(efx->net_dev); 2639 dev_close(efx->net_dev);
2604 efx_stop_interrupts(efx, false); 2640 efx_disable_interrupts(efx);
2605 rtnl_unlock(); 2641 rtnl_unlock();
2606 2642
2607 efx_sriov_fini(efx); 2643 efx_sriov_fini(efx);
@@ -2703,10 +2739,14 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2703 rc = efx_nic_init_interrupt(efx); 2739 rc = efx_nic_init_interrupt(efx);
2704 if (rc) 2740 if (rc)
2705 goto fail5; 2741 goto fail5;
2706 efx_start_interrupts(efx, false); 2742 rc = efx_enable_interrupts(efx);
2743 if (rc)
2744 goto fail6;
2707 2745
2708 return 0; 2746 return 0;
2709 2747
2748 fail6:
2749 efx_nic_fini_interrupt(efx);
2710 fail5: 2750 fail5:
2711 efx_fini_port(efx); 2751 efx_fini_port(efx);
2712 fail4: 2752 fail4:
@@ -2824,7 +2864,7 @@ static int efx_pm_freeze(struct device *dev)
2824 efx_device_detach_sync(efx); 2864 efx_device_detach_sync(efx);
2825 2865
2826 efx_stop_all(efx); 2866 efx_stop_all(efx);
2827 efx_stop_interrupts(efx, false); 2867 efx_disable_interrupts(efx);
2828 } 2868 }
2829 2869
2830 rtnl_unlock(); 2870 rtnl_unlock();
@@ -2834,12 +2874,15 @@ static int efx_pm_freeze(struct device *dev)
2834 2874
2835static int efx_pm_thaw(struct device *dev) 2875static int efx_pm_thaw(struct device *dev)
2836{ 2876{
2877 int rc;
2837 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2878 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2838 2879
2839 rtnl_lock(); 2880 rtnl_lock();
2840 2881
2841 if (efx->state != STATE_DISABLED) { 2882 if (efx->state != STATE_DISABLED) {
2842 efx_start_interrupts(efx, false); 2883 rc = efx_enable_interrupts(efx);
2884 if (rc)
2885 goto fail;
2843 2886
2844 mutex_lock(&efx->mac_lock); 2887 mutex_lock(&efx->mac_lock);
2845 efx->phy_op->reconfigure(efx); 2888 efx->phy_op->reconfigure(efx);
@@ -2860,6 +2903,11 @@ static int efx_pm_thaw(struct device *dev)
2860 queue_work(reset_workqueue, &efx->reset_work); 2903 queue_work(reset_workqueue, &efx->reset_work);
2861 2904
2862 return 0; 2905 return 0;
2906
2907fail:
2908 rtnl_unlock();
2909
2910 return rc;
2863} 2911}
2864 2912
2865static int efx_pm_poweroff(struct device *dev) 2913static int efx_pm_poweroff(struct device *dev)
@@ -2896,8 +2944,8 @@ static int efx_pm_resume(struct device *dev)
2896 rc = efx->type->init(efx); 2944 rc = efx->type->init(efx);
2897 if (rc) 2945 if (rc)
2898 return rc; 2946 return rc;
2899 efx_pm_thaw(dev); 2947 rc = efx_pm_thaw(dev);
2900 return 0; 2948 return rc;
2901} 2949}
2902 2950
2903static int efx_pm_suspend(struct device *dev) 2951static int efx_pm_suspend(struct device *dev)
@@ -2942,7 +2990,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
2942 efx_device_detach_sync(efx); 2990 efx_device_detach_sync(efx);
2943 2991
2944 efx_stop_all(efx); 2992 efx_stop_all(efx);
2945 efx_stop_interrupts(efx, false); 2993 efx_disable_interrupts(efx);
2946 2994
2947 status = PCI_ERS_RESULT_NEED_RESET; 2995 status = PCI_ERS_RESULT_NEED_RESET;
2948 } else { 2996 } else {
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index bdb30bbb0c97..34d00f5771fe 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -23,7 +23,6 @@ extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
23extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); 23extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
24extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); 24extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
25extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); 25extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
26extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
27extern netdev_tx_t 26extern netdev_tx_t
28efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); 27efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
29extern netdev_tx_t 28extern netdev_tx_t
@@ -69,27 +68,99 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
69#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) 68#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
70 69
71/* Filters */ 70/* Filters */
72extern int efx_probe_filters(struct efx_nic *efx); 71
73extern void efx_restore_filters(struct efx_nic *efx); 72/**
74extern void efx_remove_filters(struct efx_nic *efx); 73 * efx_filter_insert_filter - add or replace a filter
75extern void efx_filter_update_rx_scatter(struct efx_nic *efx); 74 * @efx: NIC in which to insert the filter
76extern s32 efx_filter_insert_filter(struct efx_nic *efx, 75 * @spec: Specification for the filter
77 struct efx_filter_spec *spec, 76 * @replace_equal: Flag for whether the specified filter may replace an
78 bool replace); 77 * existing filter with equal priority
79extern int efx_filter_remove_id_safe(struct efx_nic *efx, 78 *
80 enum efx_filter_priority priority, 79 * On success, return the filter ID.
81 u32 filter_id); 80 * On failure, return a negative error code.
82extern int efx_filter_get_filter_safe(struct efx_nic *efx, 81 *
83 enum efx_filter_priority priority, 82 * If existing filters have equal match values to the new filter spec,
84 u32 filter_id, struct efx_filter_spec *); 83 * then the new filter might replace them or the function might fail,
85extern void efx_filter_clear_rx(struct efx_nic *efx, 84 * as follows.
86 enum efx_filter_priority priority); 85 *
87extern u32 efx_filter_count_rx_used(struct efx_nic *efx, 86 * 1. If the existing filters have lower priority, or @replace_equal
88 enum efx_filter_priority priority); 87 * is set and they have equal priority, replace them.
89extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx); 88 *
90extern s32 efx_filter_get_rx_ids(struct efx_nic *efx, 89 * 2. If the existing filters have higher priority, return -%EPERM.
91 enum efx_filter_priority priority, 90 *
92 u32 *buf, u32 size); 91 * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not
92 * support delivery to multiple recipients, return -%EEXIST.
93 *
94 * This implies that filters for multiple multicast recipients must
95 * all be inserted with the same priority and @replace_equal = %false.
96 */
97static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
98 struct efx_filter_spec *spec,
99 bool replace_equal)
100{
101 return efx->type->filter_insert(efx, spec, replace_equal);
102}
103
104/**
105 * efx_filter_remove_id_safe - remove a filter by ID, carefully
106 * @efx: NIC from which to remove the filter
107 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
108 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
109 *
110 * This function will range-check @filter_id, so it is safe to call
111 * with a value passed from userland.
112 */
113static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
114 enum efx_filter_priority priority,
115 u32 filter_id)
116{
117 return efx->type->filter_remove_safe(efx, priority, filter_id);
118}
119
120/**
121 * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
122 * @efx: NIC from which to remove the filter
123 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
124 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
125 * @spec: Buffer in which to store filter specification
126 *
127 * This function will range-check @filter_id, so it is safe to call
128 * with a value passed from userland.
129 */
130static inline int
131efx_filter_get_filter_safe(struct efx_nic *efx,
132 enum efx_filter_priority priority,
133 u32 filter_id, struct efx_filter_spec *spec)
134{
135 return efx->type->filter_get_safe(efx, priority, filter_id, spec);
136}
137
138/**
139 * efx_farch_filter_clear_rx - remove RX filters by priority
140 * @efx: NIC from which to remove the filters
141 * @priority: Maximum priority to remove
142 */
143static inline void efx_filter_clear_rx(struct efx_nic *efx,
144 enum efx_filter_priority priority)
145{
146 return efx->type->filter_clear_rx(efx, priority);
147}
148
149static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
150 enum efx_filter_priority priority)
151{
152 return efx->type->filter_count_rx_used(efx, priority);
153}
154static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
155{
156 return efx->type->filter_get_rx_id_limit(efx);
157}
158static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
159 enum efx_filter_priority priority,
160 u32 *buf, u32 size)
161{
162 return efx->type->filter_get_rx_ids(efx, priority, buf, size);
163}
93#ifdef CONFIG_RFS_ACCEL 164#ifdef CONFIG_RFS_ACCEL
94extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 165extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
95 u16 rxq_index, u32 flow_id); 166 u16 rxq_index, u32 flow_id);
@@ -105,11 +176,11 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel)
105static inline void efx_filter_rfs_expire(struct efx_channel *channel) {} 176static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
106#define efx_filter_rfs_enabled() 0 177#define efx_filter_rfs_enabled() 0
107#endif 178#endif
179extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
108 180
109/* Channels */ 181/* Channels */
110extern int efx_channel_dummy_op_int(struct efx_channel *channel); 182extern int efx_channel_dummy_op_int(struct efx_channel *channel);
111extern void efx_channel_dummy_op_void(struct efx_channel *channel); 183extern void efx_channel_dummy_op_void(struct efx_channel *channel);
112extern void efx_process_channel_now(struct efx_channel *channel);
113extern int 184extern int
114efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); 185efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
115 186
@@ -141,7 +212,12 @@ extern void efx_port_dummy_op_void(struct efx_nic *efx);
141 212
142/* MTD */ 213/* MTD */
143#ifdef CONFIG_SFC_MTD 214#ifdef CONFIG_SFC_MTD
144extern int efx_mtd_probe(struct efx_nic *efx); 215extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
216 size_t n_parts, size_t sizeof_part);
217static inline int efx_mtd_probe(struct efx_nic *efx)
218{
219 return efx->type->mtd_probe(efx);
220}
145extern void efx_mtd_rename(struct efx_nic *efx); 221extern void efx_mtd_rename(struct efx_nic *efx);
146extern void efx_mtd_remove(struct efx_nic *efx); 222extern void efx_mtd_remove(struct efx_nic *efx);
147#else 223#else
@@ -155,7 +231,6 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
155 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 231 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
156 "channel %d scheduling NAPI poll on CPU%d\n", 232 "channel %d scheduling NAPI poll on CPU%d\n",
157 channel->channel, raw_smp_processor_id()); 233 channel->channel, raw_smp_processor_id());
158 channel->work_pending = true;
159 234
160 napi_schedule(&channel->napi_str); 235 napi_schedule(&channel->napi_str);
161} 236}
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index ab8fb5889e55..7fdfee019092 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2007-2009 Solarflare Communications Inc. 3 * Copyright 2007-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -147,8 +147,7 @@ enum efx_loopback_mode {
147 * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog 147 * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
148 * @RESET_TYPE_INT_ERROR: reset due to internal error 148 * @RESET_TYPE_INT_ERROR: reset due to internal error
149 * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors 149 * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
150 * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch 150 * @RESET_TYPE_DMA_ERROR: DMA error
151 * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch
152 * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors 151 * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
153 * @RESET_TYPE_MC_FAILURE: MC reboot/assertion 152 * @RESET_TYPE_MC_FAILURE: MC reboot/assertion
154 */ 153 */
@@ -163,8 +162,7 @@ enum reset_type {
163 RESET_TYPE_TX_WATCHDOG, 162 RESET_TYPE_TX_WATCHDOG,
164 RESET_TYPE_INT_ERROR, 163 RESET_TYPE_INT_ERROR,
165 RESET_TYPE_RX_RECOVERY, 164 RESET_TYPE_RX_RECOVERY,
166 RESET_TYPE_RX_DESC_FETCH, 165 RESET_TYPE_DMA_ERROR,
167 RESET_TYPE_TX_DESC_FETCH,
168 RESET_TYPE_TX_SKIP, 166 RESET_TYPE_TX_SKIP,
169 RESET_TYPE_MC_FAILURE, 167 RESET_TYPE_MC_FAILURE,
170 RESET_TYPE_MAX, 168 RESET_TYPE_MAX,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 1fc21458413d..5b471cf5c323 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -19,14 +19,9 @@
19#include "filter.h" 19#include "filter.h"
20#include "nic.h" 20#include "nic.h"
21 21
22struct ethtool_string { 22struct efx_sw_stat_desc {
23 char name[ETH_GSTRING_LEN];
24};
25
26struct efx_ethtool_stat {
27 const char *name; 23 const char *name;
28 enum { 24 enum {
29 EFX_ETHTOOL_STAT_SOURCE_mac_stats,
30 EFX_ETHTOOL_STAT_SOURCE_nic, 25 EFX_ETHTOOL_STAT_SOURCE_nic,
31 EFX_ETHTOOL_STAT_SOURCE_channel, 26 EFX_ETHTOOL_STAT_SOURCE_channel,
32 EFX_ETHTOOL_STAT_SOURCE_tx_queue 27 EFX_ETHTOOL_STAT_SOURCE_tx_queue
@@ -35,7 +30,7 @@ struct efx_ethtool_stat {
35 u64(*get_stat) (void *field); /* Reader function */ 30 u64(*get_stat) (void *field); /* Reader function */
36}; 31};
37 32
38/* Initialiser for a struct #efx_ethtool_stat with type-checking */ 33/* Initialiser for a struct efx_sw_stat_desc with type-checking */
39#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \ 34#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
40 get_stat_function) { \ 35 get_stat_function) { \
41 .name = #stat_name, \ 36 .name = #stat_name, \
@@ -52,24 +47,11 @@ static u64 efx_get_uint_stat(void *field)
52 return *(unsigned int *)field; 47 return *(unsigned int *)field;
53} 48}
54 49
55static u64 efx_get_u64_stat(void *field)
56{
57 return *(u64 *) field;
58}
59
60static u64 efx_get_atomic_stat(void *field) 50static u64 efx_get_atomic_stat(void *field)
61{ 51{
62 return atomic_read((atomic_t *) field); 52 return atomic_read((atomic_t *) field);
63} 53}
64 54
65#define EFX_ETHTOOL_U64_MAC_STAT(field) \
66 EFX_ETHTOOL_STAT(field, mac_stats, field, \
67 u64, efx_get_u64_stat)
68
69#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
70 EFX_ETHTOOL_STAT(name, nic, n_##name, \
71 unsigned int, efx_get_uint_stat)
72
73#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \ 55#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
74 EFX_ETHTOOL_STAT(field, nic, field, \ 56 EFX_ETHTOOL_STAT(field, nic, field, \
75 atomic_t, efx_get_atomic_stat) 57 atomic_t, efx_get_atomic_stat)
@@ -82,72 +64,12 @@ static u64 efx_get_atomic_stat(void *field)
82 EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \ 64 EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
83 unsigned int, efx_get_uint_stat) 65 unsigned int, efx_get_uint_stat)
84 66
85static const struct efx_ethtool_stat efx_ethtool_stats[] = { 67static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
86 EFX_ETHTOOL_U64_MAC_STAT(tx_bytes), 68 EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
87 EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
88 EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
89 EFX_ETHTOOL_U64_MAC_STAT(tx_packets),
90 EFX_ETHTOOL_U64_MAC_STAT(tx_bad),
91 EFX_ETHTOOL_U64_MAC_STAT(tx_pause),
92 EFX_ETHTOOL_U64_MAC_STAT(tx_control),
93 EFX_ETHTOOL_U64_MAC_STAT(tx_unicast),
94 EFX_ETHTOOL_U64_MAC_STAT(tx_multicast),
95 EFX_ETHTOOL_U64_MAC_STAT(tx_broadcast),
96 EFX_ETHTOOL_U64_MAC_STAT(tx_lt64),
97 EFX_ETHTOOL_U64_MAC_STAT(tx_64),
98 EFX_ETHTOOL_U64_MAC_STAT(tx_65_to_127),
99 EFX_ETHTOOL_U64_MAC_STAT(tx_128_to_255),
100 EFX_ETHTOOL_U64_MAC_STAT(tx_256_to_511),
101 EFX_ETHTOOL_U64_MAC_STAT(tx_512_to_1023),
102 EFX_ETHTOOL_U64_MAC_STAT(tx_1024_to_15xx),
103 EFX_ETHTOOL_U64_MAC_STAT(tx_15xx_to_jumbo),
104 EFX_ETHTOOL_U64_MAC_STAT(tx_gtjumbo),
105 EFX_ETHTOOL_U64_MAC_STAT(tx_collision),
106 EFX_ETHTOOL_U64_MAC_STAT(tx_single_collision),
107 EFX_ETHTOOL_U64_MAC_STAT(tx_multiple_collision),
108 EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_collision),
109 EFX_ETHTOOL_U64_MAC_STAT(tx_deferred),
110 EFX_ETHTOOL_U64_MAC_STAT(tx_late_collision),
111 EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_deferred),
112 EFX_ETHTOOL_U64_MAC_STAT(tx_non_tcpudp),
113 EFX_ETHTOOL_U64_MAC_STAT(tx_mac_src_error),
114 EFX_ETHTOOL_U64_MAC_STAT(tx_ip_src_error),
115 EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts), 69 EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
116 EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), 70 EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
117 EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), 71 EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
118 EFX_ETHTOOL_UINT_TXQ_STAT(pushes), 72 EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
119 EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
120 EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
121 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
122 EFX_ETHTOOL_U64_MAC_STAT(rx_packets),
123 EFX_ETHTOOL_U64_MAC_STAT(rx_good),
124 EFX_ETHTOOL_U64_MAC_STAT(rx_bad),
125 EFX_ETHTOOL_U64_MAC_STAT(rx_pause),
126 EFX_ETHTOOL_U64_MAC_STAT(rx_control),
127 EFX_ETHTOOL_U64_MAC_STAT(rx_unicast),
128 EFX_ETHTOOL_U64_MAC_STAT(rx_multicast),
129 EFX_ETHTOOL_U64_MAC_STAT(rx_broadcast),
130 EFX_ETHTOOL_U64_MAC_STAT(rx_lt64),
131 EFX_ETHTOOL_U64_MAC_STAT(rx_64),
132 EFX_ETHTOOL_U64_MAC_STAT(rx_65_to_127),
133 EFX_ETHTOOL_U64_MAC_STAT(rx_128_to_255),
134 EFX_ETHTOOL_U64_MAC_STAT(rx_256_to_511),
135 EFX_ETHTOOL_U64_MAC_STAT(rx_512_to_1023),
136 EFX_ETHTOOL_U64_MAC_STAT(rx_1024_to_15xx),
137 EFX_ETHTOOL_U64_MAC_STAT(rx_15xx_to_jumbo),
138 EFX_ETHTOOL_U64_MAC_STAT(rx_gtjumbo),
139 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_lt64),
140 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_64_to_15xx),
141 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_15xx_to_jumbo),
142 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_gtjumbo),
143 EFX_ETHTOOL_U64_MAC_STAT(rx_overflow),
144 EFX_ETHTOOL_U64_MAC_STAT(rx_missed),
145 EFX_ETHTOOL_U64_MAC_STAT(rx_false_carrier),
146 EFX_ETHTOOL_U64_MAC_STAT(rx_symbol_error),
147 EFX_ETHTOOL_U64_MAC_STAT(rx_align_error),
148 EFX_ETHTOOL_U64_MAC_STAT(rx_length_error),
149 EFX_ETHTOOL_U64_MAC_STAT(rx_internal_error),
150 EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
151 EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), 73 EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
152 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), 74 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
153 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), 75 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
@@ -155,10 +77,11 @@ static const struct efx_ethtool_stat efx_ethtool_stats[] = {
155 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), 77 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
156 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), 78 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
157 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc), 79 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc),
80 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
81 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
158}; 82};
159 83
160/* Number of ethtool statistics */ 84#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
161#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
162 85
163#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB 86#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
164 87
@@ -205,8 +128,6 @@ static int efx_ethtool_get_settings(struct net_device *net_dev,
205 efx->phy_op->get_settings(efx, ecmd); 128 efx->phy_op->get_settings(efx, ecmd);
206 mutex_unlock(&efx->mac_lock); 129 mutex_unlock(&efx->mac_lock);
207 130
208 /* GMAC does not support 1000Mbps HD */
209 ecmd->supported &= ~SUPPORTED_1000baseT_Half;
210 /* Both MACs support pause frames (bidirectional and respond-only) */ 131 /* Both MACs support pause frames (bidirectional and respond-only) */
211 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; 132 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
212 133
@@ -291,12 +212,11 @@ static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
291 * 212 *
292 * Fill in an individual self-test entry. 213 * Fill in an individual self-test entry.
293 */ 214 */
294static void efx_fill_test(unsigned int test_index, 215static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
295 struct ethtool_string *strings, u64 *data,
296 int *test, const char *unit_format, int unit_id, 216 int *test, const char *unit_format, int unit_id,
297 const char *test_format, const char *test_id) 217 const char *test_format, const char *test_id)
298{ 218{
299 struct ethtool_string unit_str, test_str; 219 char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
300 220
301 /* Fill data value, if applicable */ 221 /* Fill data value, if applicable */
302 if (data) 222 if (data)
@@ -305,15 +225,14 @@ static void efx_fill_test(unsigned int test_index,
305 /* Fill string, if applicable */ 225 /* Fill string, if applicable */
306 if (strings) { 226 if (strings) {
307 if (strchr(unit_format, '%')) 227 if (strchr(unit_format, '%'))
308 snprintf(unit_str.name, sizeof(unit_str.name), 228 snprintf(unit_str, sizeof(unit_str),
309 unit_format, unit_id); 229 unit_format, unit_id);
310 else 230 else
311 strcpy(unit_str.name, unit_format); 231 strcpy(unit_str, unit_format);
312 snprintf(test_str.name, sizeof(test_str.name), 232 snprintf(test_str, sizeof(test_str), test_format, test_id);
313 test_format, test_id); 233 snprintf(strings + test_index * ETH_GSTRING_LEN,
314 snprintf(strings[test_index].name, 234 ETH_GSTRING_LEN,
315 sizeof(strings[test_index].name), 235 "%-6s %-24s", unit_str, test_str);
316 "%-6s %-24s", unit_str.name, test_str.name);
317 } 236 }
318} 237}
319 238
@@ -336,7 +255,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
336 struct efx_loopback_self_tests *lb_tests, 255 struct efx_loopback_self_tests *lb_tests,
337 enum efx_loopback_mode mode, 256 enum efx_loopback_mode mode,
338 unsigned int test_index, 257 unsigned int test_index,
339 struct ethtool_string *strings, u64 *data) 258 u8 *strings, u64 *data)
340{ 259{
341 struct efx_channel *channel = 260 struct efx_channel *channel =
342 efx_get_channel(efx, efx->tx_channel_offset); 261 efx_get_channel(efx, efx->tx_channel_offset);
@@ -373,8 +292,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
373 */ 292 */
374static int efx_ethtool_fill_self_tests(struct efx_nic *efx, 293static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
375 struct efx_self_tests *tests, 294 struct efx_self_tests *tests,
376 struct ethtool_string *strings, 295 u8 *strings, u64 *data)
377 u64 *data)
378{ 296{
379 struct efx_channel *channel; 297 struct efx_channel *channel;
380 unsigned int n = 0, i; 298 unsigned int n = 0, i;
@@ -433,12 +351,14 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
433static int efx_ethtool_get_sset_count(struct net_device *net_dev, 351static int efx_ethtool_get_sset_count(struct net_device *net_dev,
434 int string_set) 352 int string_set)
435{ 353{
354 struct efx_nic *efx = netdev_priv(net_dev);
355
436 switch (string_set) { 356 switch (string_set) {
437 case ETH_SS_STATS: 357 case ETH_SS_STATS:
438 return EFX_ETHTOOL_NUM_STATS; 358 return efx->type->describe_stats(efx, NULL) +
359 EFX_ETHTOOL_SW_STAT_COUNT;
439 case ETH_SS_TEST: 360 case ETH_SS_TEST:
440 return efx_ethtool_fill_self_tests(netdev_priv(net_dev), 361 return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
441 NULL, NULL, NULL);
442 default: 362 default:
443 return -EINVAL; 363 return -EINVAL;
444 } 364 }
@@ -448,20 +368,18 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
448 u32 string_set, u8 *strings) 368 u32 string_set, u8 *strings)
449{ 369{
450 struct efx_nic *efx = netdev_priv(net_dev); 370 struct efx_nic *efx = netdev_priv(net_dev);
451 struct ethtool_string *ethtool_strings =
452 (struct ethtool_string *)strings;
453 int i; 371 int i;
454 372
455 switch (string_set) { 373 switch (string_set) {
456 case ETH_SS_STATS: 374 case ETH_SS_STATS:
457 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) 375 strings += (efx->type->describe_stats(efx, strings) *
458 strlcpy(ethtool_strings[i].name, 376 ETH_GSTRING_LEN);
459 efx_ethtool_stats[i].name, 377 for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
460 sizeof(ethtool_strings[i].name)); 378 strlcpy(strings + i * ETH_GSTRING_LEN,
379 efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
461 break; 380 break;
462 case ETH_SS_TEST: 381 case ETH_SS_TEST:
463 efx_ethtool_fill_self_tests(efx, NULL, 382 efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
464 ethtool_strings, NULL);
465 break; 383 break;
466 default: 384 default:
467 /* No other string sets */ 385 /* No other string sets */
@@ -474,27 +392,20 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
474 u64 *data) 392 u64 *data)
475{ 393{
476 struct efx_nic *efx = netdev_priv(net_dev); 394 struct efx_nic *efx = netdev_priv(net_dev);
477 struct efx_mac_stats *mac_stats = &efx->mac_stats; 395 const struct efx_sw_stat_desc *stat;
478 const struct efx_ethtool_stat *stat;
479 struct efx_channel *channel; 396 struct efx_channel *channel;
480 struct efx_tx_queue *tx_queue; 397 struct efx_tx_queue *tx_queue;
481 int i; 398 int i;
482 399
483 EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
484
485 spin_lock_bh(&efx->stats_lock); 400 spin_lock_bh(&efx->stats_lock);
486 401
487 /* Update MAC and NIC statistics */ 402 /* Get NIC statistics */
488 efx->type->update_stats(efx); 403 data += efx->type->update_stats(efx, data, NULL);
489 404
490 /* Fill detailed statistics buffer */ 405 /* Get software statistics */
491 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) { 406 for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
492 stat = &efx_ethtool_stats[i]; 407 stat = &efx_sw_stat_desc[i];
493 switch (stat->source) { 408 switch (stat->source) {
494 case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
495 data[i] = stat->get_stat((void *)mac_stats +
496 stat->offset);
497 break;
498 case EFX_ETHTOOL_STAT_SOURCE_nic: 409 case EFX_ETHTOOL_STAT_SOURCE_nic:
499 data[i] = stat->get_stat((void *)efx + stat->offset); 410 data[i] = stat->get_stat((void *)efx + stat->offset);
500 break; 411 break;
@@ -709,7 +620,6 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
709 struct efx_nic *efx = netdev_priv(net_dev); 620 struct efx_nic *efx = netdev_priv(net_dev);
710 u8 wanted_fc, old_fc; 621 u8 wanted_fc, old_fc;
711 u32 old_adv; 622 u32 old_adv;
712 bool reset;
713 int rc = 0; 623 int rc = 0;
714 624
715 mutex_lock(&efx->mac_lock); 625 mutex_lock(&efx->mac_lock);
@@ -732,24 +642,10 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
732 goto out; 642 goto out;
733 } 643 }
734 644
735 /* TX flow control may automatically turn itself off if the 645 /* Hook for Falcon bug 11482 workaround */
736 * link partner (intermittently) stops responding to pause 646 if (efx->type->prepare_enable_fc_tx &&
737 * frames. There isn't any indication that this has happened, 647 (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
738 * so the best we do is leave it up to the user to spot this 648 efx->type->prepare_enable_fc_tx(efx);
739 * and fix it be cycling transmit flow control on this end. */
740 reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX);
741 if (EFX_WORKAROUND_11482(efx) && reset) {
742 if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
743 /* Recover by resetting the EM block */
744 falcon_stop_nic_stats(efx);
745 falcon_drain_tx_fifo(efx);
746 falcon_reconfigure_xmac(efx);
747 falcon_start_nic_stats(efx);
748 } else {
749 /* Schedule a reset to recover */
750 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
751 }
752 }
753 649
754 old_adv = efx->link_advertising; 650 old_adv = efx->link_advertising;
755 old_fc = efx->wanted_fc; 651 old_fc = efx->wanted_fc;
@@ -814,11 +710,12 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
814 return efx_reset(efx, rc); 710 return efx_reset(efx, rc);
815} 711}
816 712
817/* MAC address mask including only MC flag */ 713/* MAC address mask including only I/G bit */
818static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; 714static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
819 715
820#define IP4_ADDR_FULL_MASK ((__force __be32)~0) 716#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
821#define PORT_FULL_MASK ((__force __be16)~0) 717#define PORT_FULL_MASK ((__force __be16)~0)
718#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
822 719
823static int efx_ethtool_get_class_rule(struct efx_nic *efx, 720static int efx_ethtool_get_class_rule(struct efx_nic *efx,
824 struct ethtool_rx_flow_spec *rule) 721 struct ethtool_rx_flow_spec *rule)
@@ -828,8 +725,6 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
828 struct ethhdr *mac_entry = &rule->h_u.ether_spec; 725 struct ethhdr *mac_entry = &rule->h_u.ether_spec;
829 struct ethhdr *mac_mask = &rule->m_u.ether_spec; 726 struct ethhdr *mac_mask = &rule->m_u.ether_spec;
830 struct efx_filter_spec spec; 727 struct efx_filter_spec spec;
831 u16 vid;
832 u8 proto;
833 int rc; 728 int rc;
834 729
835 rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL, 730 rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
@@ -837,44 +732,72 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
837 if (rc) 732 if (rc)
838 return rc; 733 return rc;
839 734
840 if (spec.dmaq_id == 0xfff) 735 if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
841 rule->ring_cookie = RX_CLS_FLOW_DISC; 736 rule->ring_cookie = RX_CLS_FLOW_DISC;
842 else 737 else
843 rule->ring_cookie = spec.dmaq_id; 738 rule->ring_cookie = spec.dmaq_id;
844 739
845 if (spec.type == EFX_FILTER_MC_DEF || spec.type == EFX_FILTER_UC_DEF) { 740 if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
846 rule->flow_type = ETHER_FLOW; 741 spec.ether_type == htons(ETH_P_IP) &&
847 memcpy(mac_mask->h_dest, mac_addr_mc_mask, ETH_ALEN); 742 (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
848 if (spec.type == EFX_FILTER_MC_DEF) 743 (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
849 memcpy(mac_entry->h_dest, mac_addr_mc_mask, ETH_ALEN); 744 !(spec.match_flags &
850 return 0; 745 ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
851 } 746 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
852 747 EFX_FILTER_MATCH_IP_PROTO |
853 rc = efx_filter_get_eth_local(&spec, &vid, mac_entry->h_dest); 748 EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
854 if (rc == 0) { 749 rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
750 TCP_V4_FLOW : UDP_V4_FLOW);
751 if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
752 ip_entry->ip4dst = spec.loc_host[0];
753 ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
754 }
755 if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
756 ip_entry->ip4src = spec.rem_host[0];
757 ip_mask->ip4src = IP4_ADDR_FULL_MASK;
758 }
759 if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
760 ip_entry->pdst = spec.loc_port;
761 ip_mask->pdst = PORT_FULL_MASK;
762 }
763 if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
764 ip_entry->psrc = spec.rem_port;
765 ip_mask->psrc = PORT_FULL_MASK;
766 }
767 } else if (!(spec.match_flags &
768 ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
769 EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
770 EFX_FILTER_MATCH_OUTER_VID))) {
855 rule->flow_type = ETHER_FLOW; 771 rule->flow_type = ETHER_FLOW;
856 memset(mac_mask->h_dest, ~0, ETH_ALEN); 772 if (spec.match_flags &
857 if (vid != EFX_FILTER_VID_UNSPEC) { 773 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
858 rule->flow_type |= FLOW_EXT; 774 memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN);
859 rule->h_ext.vlan_tci = htons(vid); 775 if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
860 rule->m_ext.vlan_tci = htons(0xfff); 776 memset(mac_mask->h_dest, ~0, ETH_ALEN);
777 else
778 memcpy(mac_mask->h_dest, mac_addr_ig_mask,
779 ETH_ALEN);
861 } 780 }
862 return 0; 781 if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
782 memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN);
783 memset(mac_mask->h_source, ~0, ETH_ALEN);
784 }
785 if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
786 mac_entry->h_proto = spec.ether_type;
787 mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
788 }
789 } else {
790 /* The above should handle all filters that we insert */
791 WARN_ON(1);
792 return -EINVAL;
863 } 793 }
864 794
865 rc = efx_filter_get_ipv4_local(&spec, &proto, 795 if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
866 &ip_entry->ip4dst, &ip_entry->pdst); 796 rule->flow_type |= FLOW_EXT;
867 if (rc != 0) { 797 rule->h_ext.vlan_tci = spec.outer_vid;
868 rc = efx_filter_get_ipv4_full( 798 rule->m_ext.vlan_tci = htons(0xfff);
869 &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
870 &ip_entry->ip4src, &ip_entry->psrc);
871 EFX_WARN_ON_PARANOID(rc);
872 ip_mask->ip4src = IP4_ADDR_FULL_MASK;
873 ip_mask->psrc = PORT_FULL_MASK;
874 } 799 }
875 rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW; 800
876 ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
877 ip_mask->pdst = PORT_FULL_MASK;
878 return rc; 801 return rc;
879} 802}
880 803
@@ -982,82 +905,80 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
982 efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 905 efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
983 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, 906 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
984 (rule->ring_cookie == RX_CLS_FLOW_DISC) ? 907 (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
985 0xfff : rule->ring_cookie); 908 EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
986 909
987 switch (rule->flow_type) { 910 switch (rule->flow_type & ~FLOW_EXT) {
988 case TCP_V4_FLOW: 911 case TCP_V4_FLOW:
989 case UDP_V4_FLOW: { 912 case UDP_V4_FLOW:
990 u8 proto = (rule->flow_type == TCP_V4_FLOW ? 913 spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
991 IPPROTO_TCP : IPPROTO_UDP); 914 EFX_FILTER_MATCH_IP_PROTO);
992 915 spec.ether_type = htons(ETH_P_IP);
993 /* Must match all of destination, */ 916 spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
994 if (!(ip_mask->ip4dst == IP4_ADDR_FULL_MASK && 917 IPPROTO_TCP : IPPROTO_UDP);
995 ip_mask->pdst == PORT_FULL_MASK)) 918 if (ip_mask->ip4dst) {
996 return -EINVAL; 919 if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
997 /* all or none of source, */ 920 return -EINVAL;
998 if ((ip_mask->ip4src || ip_mask->psrc) && 921 spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
999 !(ip_mask->ip4src == IP4_ADDR_FULL_MASK && 922 spec.loc_host[0] = ip_entry->ip4dst;
1000 ip_mask->psrc == PORT_FULL_MASK)) 923 }
1001 return -EINVAL; 924 if (ip_mask->ip4src) {
1002 /* and nothing else */ 925 if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
1003 if (ip_mask->tos || rule->m_ext.vlan_tci) 926 return -EINVAL;
927 spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
928 spec.rem_host[0] = ip_entry->ip4src;
929 }
930 if (ip_mask->pdst) {
931 if (ip_mask->pdst != PORT_FULL_MASK)
932 return -EINVAL;
933 spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
934 spec.loc_port = ip_entry->pdst;
935 }
936 if (ip_mask->psrc) {
937 if (ip_mask->psrc != PORT_FULL_MASK)
938 return -EINVAL;
939 spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
940 spec.rem_port = ip_entry->psrc;
941 }
942 if (ip_mask->tos)
1004 return -EINVAL; 943 return -EINVAL;
1005
1006 if (ip_mask->ip4src)
1007 rc = efx_filter_set_ipv4_full(&spec, proto,
1008 ip_entry->ip4dst,
1009 ip_entry->pdst,
1010 ip_entry->ip4src,
1011 ip_entry->psrc);
1012 else
1013 rc = efx_filter_set_ipv4_local(&spec, proto,
1014 ip_entry->ip4dst,
1015 ip_entry->pdst);
1016 if (rc)
1017 return rc;
1018 break; 944 break;
1019 }
1020
1021 case ETHER_FLOW | FLOW_EXT:
1022 case ETHER_FLOW: {
1023 u16 vlan_tag_mask = (rule->flow_type & FLOW_EXT ?
1024 ntohs(rule->m_ext.vlan_tci) : 0);
1025
1026 /* Must not match on source address or Ethertype */
1027 if (!is_zero_ether_addr(mac_mask->h_source) ||
1028 mac_mask->h_proto)
1029 return -EINVAL;
1030 945
1031 /* Is it a default UC or MC filter? */ 946 case ETHER_FLOW:
1032 if (ether_addr_equal(mac_mask->h_dest, mac_addr_mc_mask) && 947 if (!is_zero_ether_addr(mac_mask->h_dest)) {
1033 vlan_tag_mask == 0) { 948 if (ether_addr_equal(mac_mask->h_dest,
1034 if (is_multicast_ether_addr(mac_entry->h_dest)) 949 mac_addr_ig_mask))
1035 rc = efx_filter_set_mc_def(&spec); 950 spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
951 else if (is_broadcast_ether_addr(mac_mask->h_dest))
952 spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
1036 else 953 else
1037 rc = efx_filter_set_uc_def(&spec); 954 return -EINVAL;
955 memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN);
1038 } 956 }
1039 /* Otherwise, it must match all of destination and all 957 if (!is_zero_ether_addr(mac_mask->h_source)) {
1040 * or none of VID. 958 if (!is_broadcast_ether_addr(mac_mask->h_source))
1041 */ 959 return -EINVAL;
1042 else if (is_broadcast_ether_addr(mac_mask->h_dest) && 960 spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
1043 (vlan_tag_mask == 0xfff || vlan_tag_mask == 0)) { 961 memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN);
1044 rc = efx_filter_set_eth_local( 962 }
1045 &spec, 963 if (mac_mask->h_proto) {
1046 vlan_tag_mask ? 964 if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
1047 ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC, 965 return -EINVAL;
1048 mac_entry->h_dest); 966 spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
1049 } else { 967 spec.ether_type = mac_entry->h_proto;
1050 rc = -EINVAL;
1051 } 968 }
1052 if (rc)
1053 return rc;
1054 break; 969 break;
1055 }
1056 970
1057 default: 971 default:
1058 return -EINVAL; 972 return -EINVAL;
1059 } 973 }
1060 974
975 if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
976 if (rule->m_ext.vlan_tci != htons(0xfff))
977 return -EINVAL;
978 spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
979 spec.outer_vid = rule->h_ext.vlan_tci;
980 }
981
1061 rc = efx_filter_insert_filter(efx, &spec, true); 982 rc = efx_filter_insert_filter(efx, &spec, true);
1062 if (rc < 0) 983 if (rc < 0)
1063 return rc; 984 return rc;
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 71998e7995d9..ff5d322b9b49 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -19,17 +19,284 @@
19#include "net_driver.h" 19#include "net_driver.h"
20#include "bitfield.h" 20#include "bitfield.h"
21#include "efx.h" 21#include "efx.h"
22#include "spi.h"
23#include "nic.h" 22#include "nic.h"
24#include "regs.h" 23#include "farch_regs.h"
25#include "io.h" 24#include "io.h"
26#include "phy.h" 25#include "phy.h"
27#include "workarounds.h" 26#include "workarounds.h"
28#include "selftest.h" 27#include "selftest.h"
28#include "mdio_10g.h"
29 29
30/* Hardware control for SFC4000 (aka Falcon). */ 30/* Hardware control for SFC4000 (aka Falcon). */
31 31
32/**************************************************************************
33 *
34 * NIC stats
35 *
36 **************************************************************************
37 */
38
39#define FALCON_MAC_STATS_SIZE 0x100
40
41#define XgRxOctets_offset 0x0
42#define XgRxOctets_WIDTH 48
43#define XgRxOctetsOK_offset 0x8
44#define XgRxOctetsOK_WIDTH 48
45#define XgRxPkts_offset 0x10
46#define XgRxPkts_WIDTH 32
47#define XgRxPktsOK_offset 0x14
48#define XgRxPktsOK_WIDTH 32
49#define XgRxBroadcastPkts_offset 0x18
50#define XgRxBroadcastPkts_WIDTH 32
51#define XgRxMulticastPkts_offset 0x1C
52#define XgRxMulticastPkts_WIDTH 32
53#define XgRxUnicastPkts_offset 0x20
54#define XgRxUnicastPkts_WIDTH 32
55#define XgRxUndersizePkts_offset 0x24
56#define XgRxUndersizePkts_WIDTH 32
57#define XgRxOversizePkts_offset 0x28
58#define XgRxOversizePkts_WIDTH 32
59#define XgRxJabberPkts_offset 0x2C
60#define XgRxJabberPkts_WIDTH 32
61#define XgRxUndersizeFCSerrorPkts_offset 0x30
62#define XgRxUndersizeFCSerrorPkts_WIDTH 32
63#define XgRxDropEvents_offset 0x34
64#define XgRxDropEvents_WIDTH 32
65#define XgRxFCSerrorPkts_offset 0x38
66#define XgRxFCSerrorPkts_WIDTH 32
67#define XgRxAlignError_offset 0x3C
68#define XgRxAlignError_WIDTH 32
69#define XgRxSymbolError_offset 0x40
70#define XgRxSymbolError_WIDTH 32
71#define XgRxInternalMACError_offset 0x44
72#define XgRxInternalMACError_WIDTH 32
73#define XgRxControlPkts_offset 0x48
74#define XgRxControlPkts_WIDTH 32
75#define XgRxPausePkts_offset 0x4C
76#define XgRxPausePkts_WIDTH 32
77#define XgRxPkts64Octets_offset 0x50
78#define XgRxPkts64Octets_WIDTH 32
79#define XgRxPkts65to127Octets_offset 0x54
80#define XgRxPkts65to127Octets_WIDTH 32
81#define XgRxPkts128to255Octets_offset 0x58
82#define XgRxPkts128to255Octets_WIDTH 32
83#define XgRxPkts256to511Octets_offset 0x5C
84#define XgRxPkts256to511Octets_WIDTH 32
85#define XgRxPkts512to1023Octets_offset 0x60
86#define XgRxPkts512to1023Octets_WIDTH 32
87#define XgRxPkts1024to15xxOctets_offset 0x64
88#define XgRxPkts1024to15xxOctets_WIDTH 32
89#define XgRxPkts15xxtoMaxOctets_offset 0x68
90#define XgRxPkts15xxtoMaxOctets_WIDTH 32
91#define XgRxLengthError_offset 0x6C
92#define XgRxLengthError_WIDTH 32
93#define XgTxPkts_offset 0x80
94#define XgTxPkts_WIDTH 32
95#define XgTxOctets_offset 0x88
96#define XgTxOctets_WIDTH 48
97#define XgTxMulticastPkts_offset 0x90
98#define XgTxMulticastPkts_WIDTH 32
99#define XgTxBroadcastPkts_offset 0x94
100#define XgTxBroadcastPkts_WIDTH 32
101#define XgTxUnicastPkts_offset 0x98
102#define XgTxUnicastPkts_WIDTH 32
103#define XgTxControlPkts_offset 0x9C
104#define XgTxControlPkts_WIDTH 32
105#define XgTxPausePkts_offset 0xA0
106#define XgTxPausePkts_WIDTH 32
107#define XgTxPkts64Octets_offset 0xA4
108#define XgTxPkts64Octets_WIDTH 32
109#define XgTxPkts65to127Octets_offset 0xA8
110#define XgTxPkts65to127Octets_WIDTH 32
111#define XgTxPkts128to255Octets_offset 0xAC
112#define XgTxPkts128to255Octets_WIDTH 32
113#define XgTxPkts256to511Octets_offset 0xB0
114#define XgTxPkts256to511Octets_WIDTH 32
115#define XgTxPkts512to1023Octets_offset 0xB4
116#define XgTxPkts512to1023Octets_WIDTH 32
117#define XgTxPkts1024to15xxOctets_offset 0xB8
118#define XgTxPkts1024to15xxOctets_WIDTH 32
119#define XgTxPkts1519toMaxOctets_offset 0xBC
120#define XgTxPkts1519toMaxOctets_WIDTH 32
121#define XgTxUndersizePkts_offset 0xC0
122#define XgTxUndersizePkts_WIDTH 32
123#define XgTxOversizePkts_offset 0xC4
124#define XgTxOversizePkts_WIDTH 32
125#define XgTxNonTcpUdpPkt_offset 0xC8
126#define XgTxNonTcpUdpPkt_WIDTH 16
127#define XgTxMacSrcErrPkt_offset 0xCC
128#define XgTxMacSrcErrPkt_WIDTH 16
129#define XgTxIpSrcErrPkt_offset 0xD0
130#define XgTxIpSrcErrPkt_WIDTH 16
131#define XgDmaDone_offset 0xD4
132#define XgDmaDone_WIDTH 32
133
134#define FALCON_XMAC_STATS_DMA_FLAG(efx) \
135 (*(u32 *)((efx)->stats_buffer.addr + XgDmaDone_offset))
136
137#define FALCON_DMA_STAT(ext_name, hw_name) \
138 [FALCON_STAT_ ## ext_name] = \
139 { #ext_name, \
140 /* 48-bit stats are zero-padded to 64 on DMA */ \
141 hw_name ## _ ## WIDTH == 48 ? 64 : hw_name ## _ ## WIDTH, \
142 hw_name ## _ ## offset }
143#define FALCON_OTHER_STAT(ext_name) \
144 [FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 }
145
146static const struct efx_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
147 FALCON_DMA_STAT(tx_bytes, XgTxOctets),
148 FALCON_DMA_STAT(tx_packets, XgTxPkts),
149 FALCON_DMA_STAT(tx_pause, XgTxPausePkts),
150 FALCON_DMA_STAT(tx_control, XgTxControlPkts),
151 FALCON_DMA_STAT(tx_unicast, XgTxUnicastPkts),
152 FALCON_DMA_STAT(tx_multicast, XgTxMulticastPkts),
153 FALCON_DMA_STAT(tx_broadcast, XgTxBroadcastPkts),
154 FALCON_DMA_STAT(tx_lt64, XgTxUndersizePkts),
155 FALCON_DMA_STAT(tx_64, XgTxPkts64Octets),
156 FALCON_DMA_STAT(tx_65_to_127, XgTxPkts65to127Octets),
157 FALCON_DMA_STAT(tx_128_to_255, XgTxPkts128to255Octets),
158 FALCON_DMA_STAT(tx_256_to_511, XgTxPkts256to511Octets),
159 FALCON_DMA_STAT(tx_512_to_1023, XgTxPkts512to1023Octets),
160 FALCON_DMA_STAT(tx_1024_to_15xx, XgTxPkts1024to15xxOctets),
161 FALCON_DMA_STAT(tx_15xx_to_jumbo, XgTxPkts1519toMaxOctets),
162 FALCON_DMA_STAT(tx_gtjumbo, XgTxOversizePkts),
163 FALCON_DMA_STAT(tx_non_tcpudp, XgTxNonTcpUdpPkt),
164 FALCON_DMA_STAT(tx_mac_src_error, XgTxMacSrcErrPkt),
165 FALCON_DMA_STAT(tx_ip_src_error, XgTxIpSrcErrPkt),
166 FALCON_DMA_STAT(rx_bytes, XgRxOctets),
167 FALCON_DMA_STAT(rx_good_bytes, XgRxOctetsOK),
168 FALCON_OTHER_STAT(rx_bad_bytes),
169 FALCON_DMA_STAT(rx_packets, XgRxPkts),
170 FALCON_DMA_STAT(rx_good, XgRxPktsOK),
171 FALCON_DMA_STAT(rx_bad, XgRxFCSerrorPkts),
172 FALCON_DMA_STAT(rx_pause, XgRxPausePkts),
173 FALCON_DMA_STAT(rx_control, XgRxControlPkts),
174 FALCON_DMA_STAT(rx_unicast, XgRxUnicastPkts),
175 FALCON_DMA_STAT(rx_multicast, XgRxMulticastPkts),
176 FALCON_DMA_STAT(rx_broadcast, XgRxBroadcastPkts),
177 FALCON_DMA_STAT(rx_lt64, XgRxUndersizePkts),
178 FALCON_DMA_STAT(rx_64, XgRxPkts64Octets),
179 FALCON_DMA_STAT(rx_65_to_127, XgRxPkts65to127Octets),
180 FALCON_DMA_STAT(rx_128_to_255, XgRxPkts128to255Octets),
181 FALCON_DMA_STAT(rx_256_to_511, XgRxPkts256to511Octets),
182 FALCON_DMA_STAT(rx_512_to_1023, XgRxPkts512to1023Octets),
183 FALCON_DMA_STAT(rx_1024_to_15xx, XgRxPkts1024to15xxOctets),
184 FALCON_DMA_STAT(rx_15xx_to_jumbo, XgRxPkts15xxtoMaxOctets),
185 FALCON_DMA_STAT(rx_gtjumbo, XgRxOversizePkts),
186 FALCON_DMA_STAT(rx_bad_lt64, XgRxUndersizeFCSerrorPkts),
187 FALCON_DMA_STAT(rx_bad_gtjumbo, XgRxJabberPkts),
188 FALCON_DMA_STAT(rx_overflow, XgRxDropEvents),
189 FALCON_DMA_STAT(rx_symbol_error, XgRxSymbolError),
190 FALCON_DMA_STAT(rx_align_error, XgRxAlignError),
191 FALCON_DMA_STAT(rx_length_error, XgRxLengthError),
192 FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError),
193 FALCON_OTHER_STAT(rx_nodesc_drop_cnt),
194};
195static const unsigned long falcon_stat_mask[] = {
196 [0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL,
197};
198
199/**************************************************************************
200 *
201 * Basic SPI command set and bit definitions
202 *
203 *************************************************************************/
204
205#define SPI_WRSR 0x01 /* Write status register */
206#define SPI_WRITE 0x02 /* Write data to memory array */
207#define SPI_READ 0x03 /* Read data from memory array */
208#define SPI_WRDI 0x04 /* Reset write enable latch */
209#define SPI_RDSR 0x05 /* Read status register */
210#define SPI_WREN 0x06 /* Set write enable latch */
211#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
212
213#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
214#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
215#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
216#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
217#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
218#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
219
220/**************************************************************************
221 *
222 * Non-volatile memory layout
223 *
224 **************************************************************************
225 */
226
227/* SFC4000 flash is partitioned into:
228 * 0-0x400 chip and board config (see struct falcon_nvconfig)
229 * 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
230 * 0x8000-end boot code (mapped to PCI expansion ROM)
231 * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
232 * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
233 * 0-0x400 chip and board config
234 * configurable VPD
235 * 0x800-0x1800 boot config
236 * Aside from the chip and board config, all of these are optional and may
237 * be absent or truncated depending on the devices used.
238 */
239#define FALCON_NVCONFIG_END 0x400U
240#define FALCON_FLASH_BOOTCODE_START 0x8000U
241#define FALCON_EEPROM_BOOTCONFIG_START 0x800U
242#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
243
244/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
245struct falcon_nvconfig_board_v2 {
246 __le16 nports;
247 u8 port0_phy_addr;
248 u8 port0_phy_type;
249 u8 port1_phy_addr;
250 u8 port1_phy_type;
251 __le16 asic_sub_revision;
252 __le16 board_revision;
253} __packed;
254
255/* Board configuration v3 extra information */
256struct falcon_nvconfig_board_v3 {
257 __le32 spi_device_type[2];
258} __packed;
259
260/* Bit numbers for spi_device_type */
261#define SPI_DEV_TYPE_SIZE_LBN 0
262#define SPI_DEV_TYPE_SIZE_WIDTH 5
263#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
264#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
265#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
266#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
267#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
268#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
269#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
270#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
271#define SPI_DEV_TYPE_FIELD(type, field) \
272 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
273
274#define FALCON_NVCONFIG_OFFSET 0x300
275
276#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
277struct falcon_nvconfig {
278 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
279 u8 mac_address[2][8]; /* 0x310 */
280 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
281 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
282 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
283 efx_oword_t hw_init_reg; /* 0x350 */
284 efx_oword_t nic_stat_reg; /* 0x360 */
285 efx_oword_t glb_ctl_reg; /* 0x370 */
286 efx_oword_t srm_cfg_reg; /* 0x380 */
287 efx_oword_t spare_reg; /* 0x390 */
288 __le16 board_magic_num; /* 0x3A0 */
289 __le16 board_struct_ver;
290 __le16 board_checksum;
291 struct falcon_nvconfig_board_v2 board_v2;
292 efx_oword_t ee_base_page_reg; /* 0x3B0 */
293 struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
294} __packed;
295
296/*************************************************************************/
297
32static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method); 298static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
299static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
33 300
34static const unsigned int 301static const unsigned int
35/* "Large" EEPROM device: Atmel AT25640 or similar 302/* "Large" EEPROM device: Atmel AT25640 or similar
@@ -146,7 +413,7 @@ static void falcon_prepare_flush(struct efx_nic *efx)
146 * 413 *
147 * NB most hardware supports MSI interrupts 414 * NB most hardware supports MSI interrupts
148 */ 415 */
149inline void falcon_irq_ack_a1(struct efx_nic *efx) 416static inline void falcon_irq_ack_a1(struct efx_nic *efx)
150{ 417{
151 efx_dword_t reg; 418 efx_dword_t reg;
152 419
@@ -156,7 +423,7 @@ inline void falcon_irq_ack_a1(struct efx_nic *efx)
156} 423}
157 424
158 425
159irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) 426static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
160{ 427{
161 struct efx_nic *efx = dev_id; 428 struct efx_nic *efx = dev_id;
162 efx_oword_t *int_ker = efx->irq_status.addr; 429 efx_oword_t *int_ker = efx->irq_status.addr;
@@ -177,10 +444,13 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
177 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 444 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
178 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 445 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
179 446
447 if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
448 return IRQ_HANDLED;
449
180 /* Check to see if we have a serious error condition */ 450 /* Check to see if we have a serious error condition */
181 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 451 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
182 if (unlikely(syserr)) 452 if (unlikely(syserr))
183 return efx_nic_fatal_interrupt(efx); 453 return efx_farch_fatal_interrupt(efx);
184 454
185 /* Determine interrupting queues, clear interrupt status 455 /* Determine interrupting queues, clear interrupt status
186 * register and acknowledge the device interrupt. 456 * register and acknowledge the device interrupt.
@@ -241,9 +511,10 @@ static int falcon_spi_wait(struct efx_nic *efx)
241 } 511 }
242} 512}
243 513
244int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi, 514static int
245 unsigned int command, int address, 515falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi,
246 const void *in, void *out, size_t len) 516 unsigned int command, int address,
517 const void *in, void *out, size_t len)
247{ 518{
248 bool addressed = (address >= 0); 519 bool addressed = (address >= 0);
249 bool reading = (out != NULL); 520 bool reading = (out != NULL);
@@ -297,48 +568,16 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
297 return 0; 568 return 0;
298} 569}
299 570
300static size_t
301falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
302{
303 return min(FALCON_SPI_MAX_LEN,
304 (spi->block_size - (start & (spi->block_size - 1))));
305}
306
307static inline u8 571static inline u8
308efx_spi_munge_command(const struct efx_spi_device *spi, 572falcon_spi_munge_command(const struct falcon_spi_device *spi,
309 const u8 command, const unsigned int address) 573 const u8 command, const unsigned int address)
310{ 574{
311 return command | (((address >> 8) & spi->munge_address) << 3); 575 return command | (((address >> 8) & spi->munge_address) << 3);
312} 576}
313 577
314/* Wait up to 10 ms for buffered write completion */ 578static int
315int 579falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi,
316falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi) 580 loff_t start, size_t len, size_t *retlen, u8 *buffer)
317{
318 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
319 u8 status;
320 int rc;
321
322 for (;;) {
323 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
324 &status, sizeof(status));
325 if (rc)
326 return rc;
327 if (!(status & SPI_STATUS_NRDY))
328 return 0;
329 if (time_after_eq(jiffies, timeout)) {
330 netif_err(efx, hw, efx->net_dev,
331 "SPI write timeout on device %d"
332 " last status=0x%02x\n",
333 spi->device_id, status);
334 return -ETIMEDOUT;
335 }
336 schedule_timeout_uninterruptible(1);
337 }
338}
339
340int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
341 loff_t start, size_t len, size_t *retlen, u8 *buffer)
342{ 581{
343 size_t block_len, pos = 0; 582 size_t block_len, pos = 0;
344 unsigned int command; 583 unsigned int command;
@@ -347,7 +586,7 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
347 while (pos < len) { 586 while (pos < len) {
348 block_len = min(len - pos, FALCON_SPI_MAX_LEN); 587 block_len = min(len - pos, FALCON_SPI_MAX_LEN);
349 588
350 command = efx_spi_munge_command(spi, SPI_READ, start + pos); 589 command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
351 rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL, 590 rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
352 buffer + pos, block_len); 591 buffer + pos, block_len);
353 if (rc) 592 if (rc)
@@ -367,8 +606,52 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
367 return rc; 606 return rc;
368} 607}
369 608
370int 609#ifdef CONFIG_SFC_MTD
371falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi, 610
611struct falcon_mtd_partition {
612 struct efx_mtd_partition common;
613 const struct falcon_spi_device *spi;
614 size_t offset;
615};
616
617#define to_falcon_mtd_partition(mtd) \
618 container_of(mtd, struct falcon_mtd_partition, common.mtd)
619
620static size_t
621falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
622{
623 return min(FALCON_SPI_MAX_LEN,
624 (spi->block_size - (start & (spi->block_size - 1))));
625}
626
627/* Wait up to 10 ms for buffered write completion */
628static int
629falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi)
630{
631 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
632 u8 status;
633 int rc;
634
635 for (;;) {
636 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
637 &status, sizeof(status));
638 if (rc)
639 return rc;
640 if (!(status & SPI_STATUS_NRDY))
641 return 0;
642 if (time_after_eq(jiffies, timeout)) {
643 netif_err(efx, hw, efx->net_dev,
644 "SPI write timeout on device %d"
645 " last status=0x%02x\n",
646 spi->device_id, status);
647 return -ETIMEDOUT;
648 }
649 schedule_timeout_uninterruptible(1);
650 }
651}
652
653static int
654falcon_spi_write(struct efx_nic *efx, const struct falcon_spi_device *spi,
372 loff_t start, size_t len, size_t *retlen, const u8 *buffer) 655 loff_t start, size_t len, size_t *retlen, const u8 *buffer)
373{ 656{
374 u8 verify_buffer[FALCON_SPI_MAX_LEN]; 657 u8 verify_buffer[FALCON_SPI_MAX_LEN];
@@ -383,7 +666,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
383 666
384 block_len = min(len - pos, 667 block_len = min(len - pos,
385 falcon_spi_write_limit(spi, start + pos)); 668 falcon_spi_write_limit(spi, start + pos));
386 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos); 669 command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos);
387 rc = falcon_spi_cmd(efx, spi, command, start + pos, 670 rc = falcon_spi_cmd(efx, spi, command, start + pos,
388 buffer + pos, NULL, block_len); 671 buffer + pos, NULL, block_len);
389 if (rc) 672 if (rc)
@@ -393,7 +676,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
393 if (rc) 676 if (rc)
394 break; 677 break;
395 678
396 command = efx_spi_munge_command(spi, SPI_READ, start + pos); 679 command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
397 rc = falcon_spi_cmd(efx, spi, command, start + pos, 680 rc = falcon_spi_cmd(efx, spi, command, start + pos,
398 NULL, verify_buffer, block_len); 681 NULL, verify_buffer, block_len);
399 if (memcmp(verify_buffer, buffer + pos, block_len)) { 682 if (memcmp(verify_buffer, buffer + pos, block_len)) {
@@ -416,6 +699,520 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
416 return rc; 699 return rc;
417} 700}
418 701
702static int
703falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible)
704{
705 const struct falcon_spi_device *spi = part->spi;
706 struct efx_nic *efx = part->common.mtd.priv;
707 u8 status;
708 int rc, i;
709
710 /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
711 for (i = 0; i < 40; i++) {
712 __set_current_state(uninterruptible ?
713 TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
714 schedule_timeout(HZ / 10);
715 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
716 &status, sizeof(status));
717 if (rc)
718 return rc;
719 if (!(status & SPI_STATUS_NRDY))
720 return 0;
721 if (signal_pending(current))
722 return -EINTR;
723 }
724 pr_err("%s: timed out waiting for %s\n",
725 part->common.name, part->common.dev_type_name);
726 return -ETIMEDOUT;
727}
728
729static int
730falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi)
731{
732 const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
733 SPI_STATUS_BP0);
734 u8 status;
735 int rc;
736
737 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
738 &status, sizeof(status));
739 if (rc)
740 return rc;
741
742 if (!(status & unlock_mask))
743 return 0; /* already unlocked */
744
745 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
746 if (rc)
747 return rc;
748 rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
749 if (rc)
750 return rc;
751
752 status &= ~unlock_mask;
753 rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
754 NULL, sizeof(status));
755 if (rc)
756 return rc;
757 rc = falcon_spi_wait_write(efx, spi);
758 if (rc)
759 return rc;
760
761 return 0;
762}
763
764#define FALCON_SPI_VERIFY_BUF_LEN 16
765
766static int
767falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len)
768{
769 const struct falcon_spi_device *spi = part->spi;
770 struct efx_nic *efx = part->common.mtd.priv;
771 unsigned pos, block_len;
772 u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
773 u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
774 int rc;
775
776 if (len != spi->erase_size)
777 return -EINVAL;
778
779 if (spi->erase_command == 0)
780 return -EOPNOTSUPP;
781
782 rc = falcon_spi_unlock(efx, spi);
783 if (rc)
784 return rc;
785 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
786 if (rc)
787 return rc;
788 rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
789 NULL, 0);
790 if (rc)
791 return rc;
792 rc = falcon_spi_slow_wait(part, false);
793
794 /* Verify the entire region has been wiped */
795 memset(empty, 0xff, sizeof(empty));
796 for (pos = 0; pos < len; pos += block_len) {
797 block_len = min(len - pos, sizeof(buffer));
798 rc = falcon_spi_read(efx, spi, start + pos, block_len,
799 NULL, buffer);
800 if (rc)
801 return rc;
802 if (memcmp(empty, buffer, block_len))
803 return -EIO;
804
805 /* Avoid locking up the system */
806 cond_resched();
807 if (signal_pending(current))
808 return -EINTR;
809 }
810
811 return rc;
812}
813
814static void falcon_mtd_rename(struct efx_mtd_partition *part)
815{
816 struct efx_nic *efx = part->mtd.priv;
817
818 snprintf(part->name, sizeof(part->name), "%s %s",
819 efx->name, part->type_name);
820}
821
822static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
823 size_t len, size_t *retlen, u8 *buffer)
824{
825 struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
826 struct efx_nic *efx = mtd->priv;
827 struct falcon_nic_data *nic_data = efx->nic_data;
828 int rc;
829
830 rc = mutex_lock_interruptible(&nic_data->spi_lock);
831 if (rc)
832 return rc;
833 rc = falcon_spi_read(efx, part->spi, part->offset + start,
834 len, retlen, buffer);
835 mutex_unlock(&nic_data->spi_lock);
836 return rc;
837}
838
839static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
840{
841 struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
842 struct efx_nic *efx = mtd->priv;
843 struct falcon_nic_data *nic_data = efx->nic_data;
844 int rc;
845
846 rc = mutex_lock_interruptible(&nic_data->spi_lock);
847 if (rc)
848 return rc;
849 rc = falcon_spi_erase(part, part->offset + start, len);
850 mutex_unlock(&nic_data->spi_lock);
851 return rc;
852}
853
854static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
855 size_t len, size_t *retlen, const u8 *buffer)
856{
857 struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
858 struct efx_nic *efx = mtd->priv;
859 struct falcon_nic_data *nic_data = efx->nic_data;
860 int rc;
861
862 rc = mutex_lock_interruptible(&nic_data->spi_lock);
863 if (rc)
864 return rc;
865 rc = falcon_spi_write(efx, part->spi, part->offset + start,
866 len, retlen, buffer);
867 mutex_unlock(&nic_data->spi_lock);
868 return rc;
869}
870
871static int falcon_mtd_sync(struct mtd_info *mtd)
872{
873 struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
874 struct efx_nic *efx = mtd->priv;
875 struct falcon_nic_data *nic_data = efx->nic_data;
876 int rc;
877
878 mutex_lock(&nic_data->spi_lock);
879 rc = falcon_spi_slow_wait(part, true);
880 mutex_unlock(&nic_data->spi_lock);
881 return rc;
882}
883
884static int falcon_mtd_probe(struct efx_nic *efx)
885{
886 struct falcon_nic_data *nic_data = efx->nic_data;
887 struct falcon_mtd_partition *parts;
888 struct falcon_spi_device *spi;
889 size_t n_parts;
890 int rc = -ENODEV;
891
892 ASSERT_RTNL();
893
894 /* Allocate space for maximum number of partitions */
895 parts = kcalloc(2, sizeof(*parts), GFP_KERNEL);
896 if (!parts)
897 return -ENOMEM;
898 n_parts = 0;
899
900 spi = &nic_data->spi_flash;
901 if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
902 parts[n_parts].spi = spi;
903 parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START;
904 parts[n_parts].common.dev_type_name = "flash";
905 parts[n_parts].common.type_name = "sfc_flash_bootrom";
906 parts[n_parts].common.mtd.type = MTD_NORFLASH;
907 parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH;
908 parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
909 parts[n_parts].common.mtd.erasesize = spi->erase_size;
910 n_parts++;
911 }
912
913 spi = &nic_data->spi_eeprom;
914 if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) {
915 parts[n_parts].spi = spi;
916 parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START;
917 parts[n_parts].common.dev_type_name = "EEPROM";
918 parts[n_parts].common.type_name = "sfc_bootconfig";
919 parts[n_parts].common.mtd.type = MTD_RAM;
920 parts[n_parts].common.mtd.flags = MTD_CAP_RAM;
921 parts[n_parts].common.mtd.size =
922 min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) -
923 FALCON_EEPROM_BOOTCONFIG_START;
924 parts[n_parts].common.mtd.erasesize = spi->erase_size;
925 n_parts++;
926 }
927
928 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
929 if (rc)
930 kfree(parts);
931 return rc;
932}
933
934#endif /* CONFIG_SFC_MTD */
935
936/**************************************************************************
937 *
938 * XMAC operations
939 *
940 **************************************************************************
941 */
942
943/* Configure the XAUI driver that is an output from Falcon */
944static void falcon_setup_xaui(struct efx_nic *efx)
945{
946 efx_oword_t sdctl, txdrv;
947
948 /* Move the XAUI into low power, unless there is no PHY, in
949 * which case the XAUI will have to drive a cable. */
950 if (efx->phy_type == PHY_TYPE_NONE)
951 return;
952
953 efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
954 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
955 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
956 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
957 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
958 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
959 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
960 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
961 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
962 efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
963
964 EFX_POPULATE_OWORD_8(txdrv,
965 FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
966 FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
967 FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
968 FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
969 FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
970 FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
971 FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
972 FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
973 efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
974}
975
976int falcon_reset_xaui(struct efx_nic *efx)
977{
978 struct falcon_nic_data *nic_data = efx->nic_data;
979 efx_oword_t reg;
980 int count;
981
982 /* Don't fetch MAC statistics over an XMAC reset */
983 WARN_ON(nic_data->stats_disable_count == 0);
984
985 /* Start reset sequence */
986 EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
987 efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
988
989 /* Wait up to 10 ms for completion, then reinitialise */
990 for (count = 0; count < 1000; count++) {
991 efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
992 if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
993 EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
994 falcon_setup_xaui(efx);
995 return 0;
996 }
997 udelay(10);
998 }
999 netif_err(efx, hw, efx->net_dev,
1000 "timed out waiting for XAUI/XGXS reset\n");
1001 return -ETIMEDOUT;
1002}
1003
1004static void falcon_ack_status_intr(struct efx_nic *efx)
1005{
1006 struct falcon_nic_data *nic_data = efx->nic_data;
1007 efx_oword_t reg;
1008
1009 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
1010 return;
1011
1012 /* We expect xgmii faults if the wireside link is down */
1013 if (!efx->link_state.up)
1014 return;
1015
1016 /* We can only use this interrupt to signal the negative edge of
1017 * xaui_align [we have to poll the positive edge]. */
1018 if (nic_data->xmac_poll_required)
1019 return;
1020
1021 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
1022}
1023
1024static bool falcon_xgxs_link_ok(struct efx_nic *efx)
1025{
1026 efx_oword_t reg;
1027 bool align_done, link_ok = false;
1028 int sync_status;
1029
1030 /* Read link status */
1031 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1032
1033 align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
1034 sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
1035 if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
1036 link_ok = true;
1037
1038 /* Clear link status ready for next read */
1039 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
1040 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
1041 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
1042 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
1043
1044 return link_ok;
1045}
1046
1047static bool falcon_xmac_link_ok(struct efx_nic *efx)
1048{
1049 /*
1050 * Check MAC's XGXS link status except when using XGMII loopback
1051 * which bypasses the XGXS block.
1052 * If possible, check PHY's XGXS link status except when using
1053 * MAC loopback.
1054 */
1055 return (efx->loopback_mode == LOOPBACK_XGMII ||
1056 falcon_xgxs_link_ok(efx)) &&
1057 (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
1058 LOOPBACK_INTERNAL(efx) ||
1059 efx_mdio_phyxgxs_lane_sync(efx));
1060}
1061
1062static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
1063{
1064 unsigned int max_frame_len;
1065 efx_oword_t reg;
1066 bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
1067 bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
1068
1069 /* Configure MAC - cut-thru mode is hard wired on */
1070 EFX_POPULATE_OWORD_3(reg,
1071 FRF_AB_XM_RX_JUMBO_MODE, 1,
1072 FRF_AB_XM_TX_STAT_EN, 1,
1073 FRF_AB_XM_RX_STAT_EN, 1);
1074 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1075
1076 /* Configure TX */
1077 EFX_POPULATE_OWORD_6(reg,
1078 FRF_AB_XM_TXEN, 1,
1079 FRF_AB_XM_TX_PRMBL, 1,
1080 FRF_AB_XM_AUTO_PAD, 1,
1081 FRF_AB_XM_TXCRC, 1,
1082 FRF_AB_XM_FCNTL, tx_fc,
1083 FRF_AB_XM_IPG, 0x3);
1084 efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
1085
1086 /* Configure RX */
1087 EFX_POPULATE_OWORD_5(reg,
1088 FRF_AB_XM_RXEN, 1,
1089 FRF_AB_XM_AUTO_DEPAD, 0,
1090 FRF_AB_XM_ACPT_ALL_MCAST, 1,
1091 FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter,
1092 FRF_AB_XM_PASS_CRC_ERR, 1);
1093 efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
1094
1095 /* Set frame length */
1096 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
1097 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
1098 efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
1099 EFX_POPULATE_OWORD_2(reg,
1100 FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
1101 FRF_AB_XM_TX_JUMBO_MODE, 1);
1102 efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
1103
1104 EFX_POPULATE_OWORD_2(reg,
1105 FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
1106 FRF_AB_XM_DIS_FCNTL, !rx_fc);
1107 efx_writeo(efx, &reg, FR_AB_XM_FC);
1108
1109 /* Set MAC address */
1110 memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
1111 efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
1112 memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
1113 efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
1114}
1115
1116static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
1117{
1118 efx_oword_t reg;
1119 bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
1120 bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
1121 bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
1122 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
1123
1124 /* XGXS block is flaky and will need to be reset if moving
1125 * into our out of XGMII, XGXS or XAUI loopbacks. */
1126 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1127 old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
1128 old_xgmii_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
1129
1130 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
1131 old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
1132
1133 /* The PHY driver may have turned XAUI off */
1134 if ((xgxs_loopback != old_xgxs_loopback) ||
1135 (xaui_loopback != old_xaui_loopback) ||
1136 (xgmii_loopback != old_xgmii_loopback))
1137 falcon_reset_xaui(efx);
1138
1139 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1140 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
1141 (xgxs_loopback || xaui_loopback) ?
1142 FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
1143 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
1144 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
1145 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
1146
1147 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
1148 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
1149 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
1150 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
1151 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
1152 efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
1153}
1154
1155
1156/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
1157static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
1158{
1159 bool mac_up = falcon_xmac_link_ok(efx);
1160
1161 if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
1162 efx_phy_mode_disabled(efx->phy_mode))
1163 /* XAUI link is expected to be down */
1164 return mac_up;
1165
1166 falcon_stop_nic_stats(efx);
1167
1168 while (!mac_up && tries) {
1169 netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
1170 falcon_reset_xaui(efx);
1171 udelay(200);
1172
1173 mac_up = falcon_xmac_link_ok(efx);
1174 --tries;
1175 }
1176
1177 falcon_start_nic_stats(efx);
1178
1179 return mac_up;
1180}
1181
1182static bool falcon_xmac_check_fault(struct efx_nic *efx)
1183{
1184 return !falcon_xmac_link_ok_retry(efx, 5);
1185}
1186
1187static int falcon_reconfigure_xmac(struct efx_nic *efx)
1188{
1189 struct falcon_nic_data *nic_data = efx->nic_data;
1190
1191 efx_farch_filter_sync_rx_mode(efx);
1192
1193 falcon_reconfigure_xgxs_core(efx);
1194 falcon_reconfigure_xmac_core(efx);
1195
1196 falcon_reconfigure_mac_wrapper(efx);
1197
1198 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
1199 falcon_ack_status_intr(efx);
1200
1201 return 0;
1202}
1203
1204static void falcon_poll_xmac(struct efx_nic *efx)
1205{
1206 struct falcon_nic_data *nic_data = efx->nic_data;
1207
1208 /* We expect xgmii faults if the wireside link is down */
1209 if (!efx->link_state.up || !nic_data->xmac_poll_required)
1210 return;
1211
1212 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
1213 falcon_ack_status_intr(efx);
1214}
1215
419/************************************************************************** 1216/**************************************************************************
420 * 1217 *
421 * MAC wrapper 1218 * MAC wrapper
@@ -497,7 +1294,7 @@ static void falcon_reset_macs(struct efx_nic *efx)
497 falcon_setup_xaui(efx); 1294 falcon_setup_xaui(efx);
498} 1295}
499 1296
500void falcon_drain_tx_fifo(struct efx_nic *efx) 1297static void falcon_drain_tx_fifo(struct efx_nic *efx)
501{ 1298{
502 efx_oword_t reg; 1299 efx_oword_t reg;
503 1300
@@ -529,7 +1326,7 @@ static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
529 falcon_drain_tx_fifo(efx); 1326 falcon_drain_tx_fifo(efx);
530} 1327}
531 1328
532void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) 1329static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
533{ 1330{
534 struct efx_link_state *link_state = &efx->link_state; 1331 struct efx_link_state *link_state = &efx->link_state;
535 efx_oword_t reg; 1332 efx_oword_t reg;
@@ -550,7 +1347,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
550 EFX_POPULATE_OWORD_5(reg, 1347 EFX_POPULATE_OWORD_5(reg,
551 FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */, 1348 FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
552 FRF_AB_MAC_BCAD_ACPT, 1, 1349 FRF_AB_MAC_BCAD_ACPT, 1,
553 FRF_AB_MAC_UC_PROM, efx->promiscuous, 1350 FRF_AB_MAC_UC_PROM, !efx->unicast_filter,
554 FRF_AB_MAC_LINK_STATUS, 1, /* always set */ 1351 FRF_AB_MAC_LINK_STATUS, 1, /* always set */
555 FRF_AB_MAC_SPEED, link_speed); 1352 FRF_AB_MAC_SPEED, link_speed);
556 /* On B0, MAC backpressure can be disabled and packets get 1353 /* On B0, MAC backpressure can be disabled and packets get
@@ -583,10 +1380,7 @@ static void falcon_stats_request(struct efx_nic *efx)
583 WARN_ON(nic_data->stats_pending); 1380 WARN_ON(nic_data->stats_pending);
584 WARN_ON(nic_data->stats_disable_count); 1381 WARN_ON(nic_data->stats_disable_count);
585 1382
586 if (nic_data->stats_dma_done == NULL) 1383 FALCON_XMAC_STATS_DMA_FLAG(efx) = 0;
587 return; /* no mac selected */
588
589 *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
590 nic_data->stats_pending = true; 1384 nic_data->stats_pending = true;
591 wmb(); /* ensure done flag is clear */ 1385 wmb(); /* ensure done flag is clear */
592 1386
@@ -608,9 +1402,11 @@ static void falcon_stats_complete(struct efx_nic *efx)
608 return; 1402 return;
609 1403
610 nic_data->stats_pending = false; 1404 nic_data->stats_pending = false;
611 if (*nic_data->stats_dma_done == FALCON_STATS_DONE) { 1405 if (FALCON_XMAC_STATS_DMA_FLAG(efx)) {
612 rmb(); /* read the done flag before the stats */ 1406 rmb(); /* read the done flag before the stats */
613 falcon_update_stats_xmac(efx); 1407 efx_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT,
1408 falcon_stat_mask, nic_data->stats,
1409 efx->stats_buffer.addr, true);
614 } else { 1410 } else {
615 netif_err(efx, hw, efx->net_dev, 1411 netif_err(efx, hw, efx->net_dev,
616 "timed out waiting for statistics\n"); 1412 "timed out waiting for statistics\n");
@@ -678,6 +1474,28 @@ static int falcon_reconfigure_port(struct efx_nic *efx)
678 return 0; 1474 return 0;
679} 1475}
680 1476
1477/* TX flow control may automatically turn itself off if the link
1478 * partner (intermittently) stops responding to pause frames. There
1479 * isn't any indication that this has happened, so the best we do is
1480 * leave it up to the user to spot this and fix it by cycling transmit
1481 * flow control on this end.
1482 */
1483
1484static void falcon_a1_prepare_enable_fc_tx(struct efx_nic *efx)
1485{
1486 /* Schedule a reset to recover */
1487 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
1488}
1489
1490static void falcon_b0_prepare_enable_fc_tx(struct efx_nic *efx)
1491{
1492 /* Recover by resetting the EM block */
1493 falcon_stop_nic_stats(efx);
1494 falcon_drain_tx_fifo(efx);
1495 falcon_reconfigure_xmac(efx);
1496 falcon_start_nic_stats(efx);
1497}
1498
681/************************************************************************** 1499/**************************************************************************
682 * 1500 *
683 * PHY access via GMII 1501 * PHY access via GMII
@@ -861,7 +1679,7 @@ static int falcon_probe_port(struct efx_nic *efx)
861 1679
862 /* Allocate buffer for stats */ 1680 /* Allocate buffer for stats */
863 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, 1681 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
864 FALCON_MAC_STATS_SIZE); 1682 FALCON_MAC_STATS_SIZE, GFP_KERNEL);
865 if (rc) 1683 if (rc)
866 return rc; 1684 return rc;
867 netif_dbg(efx, probe, efx->net_dev, 1685 netif_dbg(efx, probe, efx->net_dev,
@@ -869,7 +1687,6 @@ static int falcon_probe_port(struct efx_nic *efx)
869 (u64)efx->stats_buffer.dma_addr, 1687 (u64)efx->stats_buffer.dma_addr,
870 efx->stats_buffer.addr, 1688 efx->stats_buffer.addr,
871 (u64)virt_to_phys(efx->stats_buffer.addr)); 1689 (u64)virt_to_phys(efx->stats_buffer.addr));
872 nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
873 1690
874 return 0; 1691 return 0;
875} 1692}
@@ -926,15 +1743,15 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
926{ 1743{
927 struct falcon_nic_data *nic_data = efx->nic_data; 1744 struct falcon_nic_data *nic_data = efx->nic_data;
928 struct falcon_nvconfig *nvconfig; 1745 struct falcon_nvconfig *nvconfig;
929 struct efx_spi_device *spi; 1746 struct falcon_spi_device *spi;
930 void *region; 1747 void *region;
931 int rc, magic_num, struct_ver; 1748 int rc, magic_num, struct_ver;
932 __le16 *word, *limit; 1749 __le16 *word, *limit;
933 u32 csum; 1750 u32 csum;
934 1751
935 if (efx_spi_present(&nic_data->spi_flash)) 1752 if (falcon_spi_present(&nic_data->spi_flash))
936 spi = &nic_data->spi_flash; 1753 spi = &nic_data->spi_flash;
937 else if (efx_spi_present(&nic_data->spi_eeprom)) 1754 else if (falcon_spi_present(&nic_data->spi_eeprom))
938 spi = &nic_data->spi_eeprom; 1755 spi = &nic_data->spi_eeprom;
939 else 1756 else
940 return -EINVAL; 1757 return -EINVAL;
@@ -949,7 +1766,7 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
949 mutex_unlock(&nic_data->spi_lock); 1766 mutex_unlock(&nic_data->spi_lock);
950 if (rc) { 1767 if (rc) {
951 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n", 1768 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
952 efx_spi_present(&nic_data->spi_flash) ? 1769 falcon_spi_present(&nic_data->spi_flash) ?
953 "flash" : "EEPROM"); 1770 "flash" : "EEPROM");
954 rc = -EIO; 1771 rc = -EIO;
955 goto out; 1772 goto out;
@@ -998,7 +1815,7 @@ static int falcon_test_nvram(struct efx_nic *efx)
998 return falcon_read_nvram(efx, NULL); 1815 return falcon_read_nvram(efx, NULL);
999} 1816}
1000 1817
1001static const struct efx_nic_register_test falcon_b0_register_tests[] = { 1818static const struct efx_farch_register_test falcon_b0_register_tests[] = {
1002 { FR_AZ_ADR_REGION, 1819 { FR_AZ_ADR_REGION,
1003 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, 1820 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
1004 { FR_AZ_RX_CFG, 1821 { FR_AZ_RX_CFG,
@@ -1058,8 +1875,8 @@ falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
1058 efx_reset_down(efx, reset_method); 1875 efx_reset_down(efx, reset_method);
1059 1876
1060 tests->registers = 1877 tests->registers =
1061 efx_nic_test_registers(efx, falcon_b0_register_tests, 1878 efx_farch_test_registers(efx, falcon_b0_register_tests,
1062 ARRAY_SIZE(falcon_b0_register_tests)) 1879 ARRAY_SIZE(falcon_b0_register_tests))
1063 ? -1 : 1; 1880 ? -1 : 1;
1064 1881
1065 rc = falcon_reset_hw(efx, reset_method); 1882 rc = falcon_reset_hw(efx, reset_method);
@@ -1078,8 +1895,7 @@ static enum reset_type falcon_map_reset_reason(enum reset_type reason)
1078{ 1895{
1079 switch (reason) { 1896 switch (reason) {
1080 case RESET_TYPE_RX_RECOVERY: 1897 case RESET_TYPE_RX_RECOVERY:
1081 case RESET_TYPE_RX_DESC_FETCH: 1898 case RESET_TYPE_DMA_ERROR:
1082 case RESET_TYPE_TX_DESC_FETCH:
1083 case RESET_TYPE_TX_SKIP: 1899 case RESET_TYPE_TX_SKIP:
1084 /* These can occasionally occur due to hardware bugs. 1900 /* These can occasionally occur due to hardware bugs.
1085 * We try to reset without disrupting the link. 1901 * We try to reset without disrupting the link.
@@ -1294,7 +2110,7 @@ static int falcon_reset_sram(struct efx_nic *efx)
1294} 2110}
1295 2111
1296static void falcon_spi_device_init(struct efx_nic *efx, 2112static void falcon_spi_device_init(struct efx_nic *efx,
1297 struct efx_spi_device *spi_device, 2113 struct falcon_spi_device *spi_device,
1298 unsigned int device_id, u32 device_type) 2114 unsigned int device_id, u32 device_type)
1299{ 2115{
1300 if (device_type != 0) { 2116 if (device_type != 0) {
@@ -1360,10 +2176,11 @@ out:
1360 return rc; 2176 return rc;
1361} 2177}
1362 2178
1363static void falcon_dimension_resources(struct efx_nic *efx) 2179static int falcon_dimension_resources(struct efx_nic *efx)
1364{ 2180{
1365 efx->rx_dc_base = 0x20000; 2181 efx->rx_dc_base = 0x20000;
1366 efx->tx_dc_base = 0x26000; 2182 efx->tx_dc_base = 0x26000;
2183 return 0;
1367} 2184}
1368 2185
1369/* Probe all SPI devices on the NIC */ 2186/* Probe all SPI devices on the NIC */
@@ -1410,6 +2227,20 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
1410 large_eeprom_type); 2227 large_eeprom_type);
1411} 2228}
1412 2229
2230static unsigned int falcon_a1_mem_map_size(struct efx_nic *efx)
2231{
2232 return 0x20000;
2233}
2234
2235static unsigned int falcon_b0_mem_map_size(struct efx_nic *efx)
2236{
2237 /* Map everything up to and including the RSS indirection table.
2238 * The PCI core takes care of mapping the MSI-X tables.
2239 */
2240 return FR_BZ_RX_INDIRECTION_TBL +
2241 FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
2242}
2243
1413static int falcon_probe_nic(struct efx_nic *efx) 2244static int falcon_probe_nic(struct efx_nic *efx)
1414{ 2245{
1415 struct falcon_nic_data *nic_data; 2246 struct falcon_nic_data *nic_data;
@@ -1424,7 +2255,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
1424 2255
1425 rc = -ENODEV; 2256 rc = -ENODEV;
1426 2257
1427 if (efx_nic_fpga_ver(efx) != 0) { 2258 if (efx_farch_fpga_ver(efx) != 0) {
1428 netif_err(efx, probe, efx->net_dev, 2259 netif_err(efx, probe, efx->net_dev,
1429 "Falcon FPGA not supported\n"); 2260 "Falcon FPGA not supported\n");
1430 goto fail1; 2261 goto fail1;
@@ -1478,7 +2309,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
1478 } 2309 }
1479 2310
1480 /* Allocate memory for INT_KER */ 2311 /* Allocate memory for INT_KER */
1481 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); 2312 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
2313 GFP_KERNEL);
1482 if (rc) 2314 if (rc)
1483 goto fail4; 2315 goto fail4;
1484 BUG_ON(efx->irq_status.dma_addr & 0x0f); 2316 BUG_ON(efx->irq_status.dma_addr & 0x0f);
@@ -1499,6 +2331,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
1499 goto fail5; 2331 goto fail5;
1500 } 2332 }
1501 2333
2334 efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 :
2335 EFX_MAX_CHANNELS);
1502 efx->timer_quantum_ns = 4968; /* 621 cycles */ 2336 efx->timer_quantum_ns = 4968; /* 621 cycles */
1503 2337
1504 /* Initialise I2C adapter */ 2338 /* Initialise I2C adapter */
@@ -1657,7 +2491,7 @@ static int falcon_init_nic(struct efx_nic *efx)
1657 efx_writeo(efx, &temp, FR_BZ_DP_CTRL); 2491 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
1658 } 2492 }
1659 2493
1660 efx_nic_init_common(efx); 2494 efx_farch_init_common(efx);
1661 2495
1662 return 0; 2496 return 0;
1663} 2497}
@@ -1688,24 +2522,65 @@ static void falcon_remove_nic(struct efx_nic *efx)
1688 efx->nic_data = NULL; 2522 efx->nic_data = NULL;
1689} 2523}
1690 2524
1691static void falcon_update_nic_stats(struct efx_nic *efx) 2525static size_t falcon_describe_nic_stats(struct efx_nic *efx, u8 *names)
2526{
2527 return efx_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
2528 falcon_stat_mask, names);
2529}
2530
2531static size_t falcon_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
2532 struct rtnl_link_stats64 *core_stats)
1692{ 2533{
1693 struct falcon_nic_data *nic_data = efx->nic_data; 2534 struct falcon_nic_data *nic_data = efx->nic_data;
2535 u64 *stats = nic_data->stats;
1694 efx_oword_t cnt; 2536 efx_oword_t cnt;
1695 2537
1696 if (nic_data->stats_disable_count) 2538 if (!nic_data->stats_disable_count) {
1697 return; 2539 efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
2540 stats[FALCON_STAT_rx_nodesc_drop_cnt] +=
2541 EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
2542
2543 if (nic_data->stats_pending &&
2544 FALCON_XMAC_STATS_DMA_FLAG(efx)) {
2545 nic_data->stats_pending = false;
2546 rmb(); /* read the done flag before the stats */
2547 efx_nic_update_stats(
2548 falcon_stat_desc, FALCON_STAT_COUNT,
2549 falcon_stat_mask,
2550 stats, efx->stats_buffer.addr, true);
2551 }
1698 2552
1699 efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP); 2553 /* Update derived statistic */
1700 efx->n_rx_nodesc_drop_cnt += 2554 efx_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes],
1701 EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT); 2555 stats[FALCON_STAT_rx_bytes] -
2556 stats[FALCON_STAT_rx_good_bytes] -
2557 stats[FALCON_STAT_rx_control] * 64);
2558 }
1702 2559
1703 if (nic_data->stats_pending && 2560 if (full_stats)
1704 *nic_data->stats_dma_done == FALCON_STATS_DONE) { 2561 memcpy(full_stats, stats, sizeof(u64) * FALCON_STAT_COUNT);
1705 nic_data->stats_pending = false; 2562
1706 rmb(); /* read the done flag before the stats */ 2563 if (core_stats) {
1707 falcon_update_stats_xmac(efx); 2564 core_stats->rx_packets = stats[FALCON_STAT_rx_packets];
2565 core_stats->tx_packets = stats[FALCON_STAT_tx_packets];
2566 core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes];
2567 core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes];
2568 core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt];
2569 core_stats->multicast = stats[FALCON_STAT_rx_multicast];
2570 core_stats->rx_length_errors =
2571 stats[FALCON_STAT_rx_gtjumbo] +
2572 stats[FALCON_STAT_rx_length_error];
2573 core_stats->rx_crc_errors = stats[FALCON_STAT_rx_bad];
2574 core_stats->rx_frame_errors = stats[FALCON_STAT_rx_align_error];
2575 core_stats->rx_fifo_errors = stats[FALCON_STAT_rx_overflow];
2576
2577 core_stats->rx_errors = (core_stats->rx_length_errors +
2578 core_stats->rx_crc_errors +
2579 core_stats->rx_frame_errors +
2580 stats[FALCON_STAT_rx_symbol_error]);
1708 } 2581 }
2582
2583 return FALCON_STAT_COUNT;
1709} 2584}
1710 2585
1711void falcon_start_nic_stats(struct efx_nic *efx) 2586void falcon_start_nic_stats(struct efx_nic *efx)
@@ -1734,7 +2609,7 @@ void falcon_stop_nic_stats(struct efx_nic *efx)
1734 /* Wait enough time for the most recent transfer to 2609 /* Wait enough time for the most recent transfer to
1735 * complete. */ 2610 * complete. */
1736 for (i = 0; i < 4 && nic_data->stats_pending; i++) { 2611 for (i = 0; i < 4 && nic_data->stats_pending; i++) {
1737 if (*nic_data->stats_dma_done == FALCON_STATS_DONE) 2612 if (FALCON_XMAC_STATS_DMA_FLAG(efx))
1738 break; 2613 break;
1739 msleep(1); 2614 msleep(1);
1740 } 2615 }
@@ -1778,11 +2653,12 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
1778 */ 2653 */
1779 2654
1780const struct efx_nic_type falcon_a1_nic_type = { 2655const struct efx_nic_type falcon_a1_nic_type = {
2656 .mem_map_size = falcon_a1_mem_map_size,
1781 .probe = falcon_probe_nic, 2657 .probe = falcon_probe_nic,
1782 .remove = falcon_remove_nic, 2658 .remove = falcon_remove_nic,
1783 .init = falcon_init_nic, 2659 .init = falcon_init_nic,
1784 .dimension_resources = falcon_dimension_resources, 2660 .dimension_resources = falcon_dimension_resources,
1785 .fini = efx_port_dummy_op_void, 2661 .fini = falcon_irq_ack_a1,
1786 .monitor = falcon_monitor, 2662 .monitor = falcon_monitor,
1787 .map_reset_reason = falcon_map_reset_reason, 2663 .map_reset_reason = falcon_map_reset_reason,
1788 .map_reset_flags = falcon_map_reset_flags, 2664 .map_reset_flags = falcon_map_reset_flags,
@@ -1790,23 +2666,71 @@ const struct efx_nic_type falcon_a1_nic_type = {
1790 .probe_port = falcon_probe_port, 2666 .probe_port = falcon_probe_port,
1791 .remove_port = falcon_remove_port, 2667 .remove_port = falcon_remove_port,
1792 .handle_global_event = falcon_handle_global_event, 2668 .handle_global_event = falcon_handle_global_event,
2669 .fini_dmaq = efx_farch_fini_dmaq,
1793 .prepare_flush = falcon_prepare_flush, 2670 .prepare_flush = falcon_prepare_flush,
1794 .finish_flush = efx_port_dummy_op_void, 2671 .finish_flush = efx_port_dummy_op_void,
2672 .describe_stats = falcon_describe_nic_stats,
1795 .update_stats = falcon_update_nic_stats, 2673 .update_stats = falcon_update_nic_stats,
1796 .start_stats = falcon_start_nic_stats, 2674 .start_stats = falcon_start_nic_stats,
1797 .stop_stats = falcon_stop_nic_stats, 2675 .stop_stats = falcon_stop_nic_stats,
1798 .set_id_led = falcon_set_id_led, 2676 .set_id_led = falcon_set_id_led,
1799 .push_irq_moderation = falcon_push_irq_moderation, 2677 .push_irq_moderation = falcon_push_irq_moderation,
1800 .reconfigure_port = falcon_reconfigure_port, 2678 .reconfigure_port = falcon_reconfigure_port,
2679 .prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx,
1801 .reconfigure_mac = falcon_reconfigure_xmac, 2680 .reconfigure_mac = falcon_reconfigure_xmac,
1802 .check_mac_fault = falcon_xmac_check_fault, 2681 .check_mac_fault = falcon_xmac_check_fault,
1803 .get_wol = falcon_get_wol, 2682 .get_wol = falcon_get_wol,
1804 .set_wol = falcon_set_wol, 2683 .set_wol = falcon_set_wol,
1805 .resume_wol = efx_port_dummy_op_void, 2684 .resume_wol = efx_port_dummy_op_void,
1806 .test_nvram = falcon_test_nvram, 2685 .test_nvram = falcon_test_nvram,
2686 .irq_enable_master = efx_farch_irq_enable_master,
2687 .irq_test_generate = efx_farch_irq_test_generate,
2688 .irq_disable_non_ev = efx_farch_irq_disable_master,
2689 .irq_handle_msi = efx_farch_msi_interrupt,
2690 .irq_handle_legacy = falcon_legacy_interrupt_a1,
2691 .tx_probe = efx_farch_tx_probe,
2692 .tx_init = efx_farch_tx_init,
2693 .tx_remove = efx_farch_tx_remove,
2694 .tx_write = efx_farch_tx_write,
2695 .rx_push_indir_table = efx_farch_rx_push_indir_table,
2696 .rx_probe = efx_farch_rx_probe,
2697 .rx_init = efx_farch_rx_init,
2698 .rx_remove = efx_farch_rx_remove,
2699 .rx_write = efx_farch_rx_write,
2700 .rx_defer_refill = efx_farch_rx_defer_refill,
2701 .ev_probe = efx_farch_ev_probe,
2702 .ev_init = efx_farch_ev_init,
2703 .ev_fini = efx_farch_ev_fini,
2704 .ev_remove = efx_farch_ev_remove,
2705 .ev_process = efx_farch_ev_process,
2706 .ev_read_ack = efx_farch_ev_read_ack,
2707 .ev_test_generate = efx_farch_ev_test_generate,
2708
2709 /* We don't expose the filter table on Falcon A1 as it is not
2710 * mapped into function 0, but these implementations still
2711 * work with a degenerate case of all tables set to size 0.
2712 */
2713 .filter_table_probe = efx_farch_filter_table_probe,
2714 .filter_table_restore = efx_farch_filter_table_restore,
2715 .filter_table_remove = efx_farch_filter_table_remove,
2716 .filter_insert = efx_farch_filter_insert,
2717 .filter_remove_safe = efx_farch_filter_remove_safe,
2718 .filter_get_safe = efx_farch_filter_get_safe,
2719 .filter_clear_rx = efx_farch_filter_clear_rx,
2720 .filter_count_rx_used = efx_farch_filter_count_rx_used,
2721 .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
2722 .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
2723
2724#ifdef CONFIG_SFC_MTD
2725 .mtd_probe = falcon_mtd_probe,
2726 .mtd_rename = falcon_mtd_rename,
2727 .mtd_read = falcon_mtd_read,
2728 .mtd_erase = falcon_mtd_erase,
2729 .mtd_write = falcon_mtd_write,
2730 .mtd_sync = falcon_mtd_sync,
2731#endif
1807 2732
1808 .revision = EFX_REV_FALCON_A1, 2733 .revision = EFX_REV_FALCON_A1,
1809 .mem_map_size = 0x20000,
1810 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, 2734 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
1811 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER, 2735 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
1812 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER, 2736 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
@@ -1816,12 +2740,13 @@ const struct efx_nic_type falcon_a1_nic_type = {
1816 .rx_buffer_padding = 0x24, 2740 .rx_buffer_padding = 0x24,
1817 .can_rx_scatter = false, 2741 .can_rx_scatter = false,
1818 .max_interrupt_mode = EFX_INT_MODE_MSI, 2742 .max_interrupt_mode = EFX_INT_MODE_MSI,
1819 .phys_addr_channels = 4,
1820 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, 2743 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
1821 .offload_features = NETIF_F_IP_CSUM, 2744 .offload_features = NETIF_F_IP_CSUM,
2745 .mcdi_max_ver = -1,
1822}; 2746};
1823 2747
1824const struct efx_nic_type falcon_b0_nic_type = { 2748const struct efx_nic_type falcon_b0_nic_type = {
2749 .mem_map_size = falcon_b0_mem_map_size,
1825 .probe = falcon_probe_nic, 2750 .probe = falcon_probe_nic,
1826 .remove = falcon_remove_nic, 2751 .remove = falcon_remove_nic,
1827 .init = falcon_init_nic, 2752 .init = falcon_init_nic,
@@ -1834,14 +2759,17 @@ const struct efx_nic_type falcon_b0_nic_type = {
1834 .probe_port = falcon_probe_port, 2759 .probe_port = falcon_probe_port,
1835 .remove_port = falcon_remove_port, 2760 .remove_port = falcon_remove_port,
1836 .handle_global_event = falcon_handle_global_event, 2761 .handle_global_event = falcon_handle_global_event,
2762 .fini_dmaq = efx_farch_fini_dmaq,
1837 .prepare_flush = falcon_prepare_flush, 2763 .prepare_flush = falcon_prepare_flush,
1838 .finish_flush = efx_port_dummy_op_void, 2764 .finish_flush = efx_port_dummy_op_void,
2765 .describe_stats = falcon_describe_nic_stats,
1839 .update_stats = falcon_update_nic_stats, 2766 .update_stats = falcon_update_nic_stats,
1840 .start_stats = falcon_start_nic_stats, 2767 .start_stats = falcon_start_nic_stats,
1841 .stop_stats = falcon_stop_nic_stats, 2768 .stop_stats = falcon_stop_nic_stats,
1842 .set_id_led = falcon_set_id_led, 2769 .set_id_led = falcon_set_id_led,
1843 .push_irq_moderation = falcon_push_irq_moderation, 2770 .push_irq_moderation = falcon_push_irq_moderation,
1844 .reconfigure_port = falcon_reconfigure_port, 2771 .reconfigure_port = falcon_reconfigure_port,
2772 .prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx,
1845 .reconfigure_mac = falcon_reconfigure_xmac, 2773 .reconfigure_mac = falcon_reconfigure_xmac,
1846 .check_mac_fault = falcon_xmac_check_fault, 2774 .check_mac_fault = falcon_xmac_check_fault,
1847 .get_wol = falcon_get_wol, 2775 .get_wol = falcon_get_wol,
@@ -1849,28 +2777,67 @@ const struct efx_nic_type falcon_b0_nic_type = {
1849 .resume_wol = efx_port_dummy_op_void, 2777 .resume_wol = efx_port_dummy_op_void,
1850 .test_chip = falcon_b0_test_chip, 2778 .test_chip = falcon_b0_test_chip,
1851 .test_nvram = falcon_test_nvram, 2779 .test_nvram = falcon_test_nvram,
2780 .irq_enable_master = efx_farch_irq_enable_master,
2781 .irq_test_generate = efx_farch_irq_test_generate,
2782 .irq_disable_non_ev = efx_farch_irq_disable_master,
2783 .irq_handle_msi = efx_farch_msi_interrupt,
2784 .irq_handle_legacy = efx_farch_legacy_interrupt,
2785 .tx_probe = efx_farch_tx_probe,
2786 .tx_init = efx_farch_tx_init,
2787 .tx_remove = efx_farch_tx_remove,
2788 .tx_write = efx_farch_tx_write,
2789 .rx_push_indir_table = efx_farch_rx_push_indir_table,
2790 .rx_probe = efx_farch_rx_probe,
2791 .rx_init = efx_farch_rx_init,
2792 .rx_remove = efx_farch_rx_remove,
2793 .rx_write = efx_farch_rx_write,
2794 .rx_defer_refill = efx_farch_rx_defer_refill,
2795 .ev_probe = efx_farch_ev_probe,
2796 .ev_init = efx_farch_ev_init,
2797 .ev_fini = efx_farch_ev_fini,
2798 .ev_remove = efx_farch_ev_remove,
2799 .ev_process = efx_farch_ev_process,
2800 .ev_read_ack = efx_farch_ev_read_ack,
2801 .ev_test_generate = efx_farch_ev_test_generate,
2802 .filter_table_probe = efx_farch_filter_table_probe,
2803 .filter_table_restore = efx_farch_filter_table_restore,
2804 .filter_table_remove = efx_farch_filter_table_remove,
2805 .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
2806 .filter_insert = efx_farch_filter_insert,
2807 .filter_remove_safe = efx_farch_filter_remove_safe,
2808 .filter_get_safe = efx_farch_filter_get_safe,
2809 .filter_clear_rx = efx_farch_filter_clear_rx,
2810 .filter_count_rx_used = efx_farch_filter_count_rx_used,
2811 .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
2812 .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
2813#ifdef CONFIG_RFS_ACCEL
2814 .filter_rfs_insert = efx_farch_filter_rfs_insert,
2815 .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
2816#endif
2817#ifdef CONFIG_SFC_MTD
2818 .mtd_probe = falcon_mtd_probe,
2819 .mtd_rename = falcon_mtd_rename,
2820 .mtd_read = falcon_mtd_read,
2821 .mtd_erase = falcon_mtd_erase,
2822 .mtd_write = falcon_mtd_write,
2823 .mtd_sync = falcon_mtd_sync,
2824#endif
1852 2825
1853 .revision = EFX_REV_FALCON_B0, 2826 .revision = EFX_REV_FALCON_B0,
1854 /* Map everything up to and including the RSS indirection
1855 * table. Don't map MSI-X table, MSI-X PBA since Linux
1856 * requires that they not be mapped. */
1857 .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
1858 FR_BZ_RX_INDIRECTION_TBL_STEP *
1859 FR_BZ_RX_INDIRECTION_TBL_ROWS),
1860 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 2827 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
1861 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, 2828 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
1862 .buf_tbl_base = FR_BZ_BUF_FULL_TBL, 2829 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
1863 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, 2830 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
1864 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, 2831 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
1865 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), 2832 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
1866 .rx_buffer_hash_size = 0x10, 2833 .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
2834 .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
1867 .rx_buffer_padding = 0, 2835 .rx_buffer_padding = 0,
1868 .can_rx_scatter = true, 2836 .can_rx_scatter = true,
1869 .max_interrupt_mode = EFX_INT_MODE_MSIX, 2837 .max_interrupt_mode = EFX_INT_MODE_MSIX,
1870 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
1871 * interrupt handler only supports 32
1872 * channels */
1873 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, 2838 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
1874 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, 2839 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
2840 .mcdi_max_ver = -1,
2841 .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
1875}; 2842};
1876 2843
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index ec1e99d0dcad..1736f4b806af 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2007-2010 Solarflare Communications Inc. 3 * Copyright 2007-2012 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/falcon_xmac.c b/drivers/net/ethernet/sfc/falcon_xmac.c
deleted file mode 100644
index 8333865d4c95..000000000000
--- a/drivers/net/ethernet/sfc/falcon_xmac.c
+++ /dev/null
@@ -1,362 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/delay.h>
12#include "net_driver.h"
13#include "efx.h"
14#include "nic.h"
15#include "regs.h"
16#include "io.h"
17#include "mdio_10g.h"
18#include "workarounds.h"
19
20/**************************************************************************
21 *
22 * MAC operations
23 *
24 *************************************************************************/
25
26/* Configure the XAUI driver that is an output from Falcon */
27void falcon_setup_xaui(struct efx_nic *efx)
28{
29 efx_oword_t sdctl, txdrv;
30
31 /* Move the XAUI into low power, unless there is no PHY, in
32 * which case the XAUI will have to drive a cable. */
33 if (efx->phy_type == PHY_TYPE_NONE)
34 return;
35
36 efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
37 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
38 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
39 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
40 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
41 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
42 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
43 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
44 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
45 efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
46
47 EFX_POPULATE_OWORD_8(txdrv,
48 FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
49 FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
50 FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
51 FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
52 FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
53 FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
54 FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
55 FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
56 efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
57}
58
59int falcon_reset_xaui(struct efx_nic *efx)
60{
61 struct falcon_nic_data *nic_data = efx->nic_data;
62 efx_oword_t reg;
63 int count;
64
65 /* Don't fetch MAC statistics over an XMAC reset */
66 WARN_ON(nic_data->stats_disable_count == 0);
67
68 /* Start reset sequence */
69 EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
70 efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
71
72 /* Wait up to 10 ms for completion, then reinitialise */
73 for (count = 0; count < 1000; count++) {
74 efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
75 if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
76 EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
77 falcon_setup_xaui(efx);
78 return 0;
79 }
80 udelay(10);
81 }
82 netif_err(efx, hw, efx->net_dev,
83 "timed out waiting for XAUI/XGXS reset\n");
84 return -ETIMEDOUT;
85}
86
87static void falcon_ack_status_intr(struct efx_nic *efx)
88{
89 struct falcon_nic_data *nic_data = efx->nic_data;
90 efx_oword_t reg;
91
92 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
93 return;
94
95 /* We expect xgmii faults if the wireside link is down */
96 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
97 return;
98
99 /* We can only use this interrupt to signal the negative edge of
100 * xaui_align [we have to poll the positive edge]. */
101 if (nic_data->xmac_poll_required)
102 return;
103
104 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
105}
106
107static bool falcon_xgxs_link_ok(struct efx_nic *efx)
108{
109 efx_oword_t reg;
110 bool align_done, link_ok = false;
111 int sync_status;
112
113 /* Read link status */
114 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
115
116 align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
117 sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
118 if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
119 link_ok = true;
120
121 /* Clear link status ready for next read */
122 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
123 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
124 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
125 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
126
127 return link_ok;
128}
129
130static bool falcon_xmac_link_ok(struct efx_nic *efx)
131{
132 /*
133 * Check MAC's XGXS link status except when using XGMII loopback
134 * which bypasses the XGXS block.
135 * If possible, check PHY's XGXS link status except when using
136 * MAC loopback.
137 */
138 return (efx->loopback_mode == LOOPBACK_XGMII ||
139 falcon_xgxs_link_ok(efx)) &&
140 (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
141 LOOPBACK_INTERNAL(efx) ||
142 efx_mdio_phyxgxs_lane_sync(efx));
143}
144
145static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
146{
147 unsigned int max_frame_len;
148 efx_oword_t reg;
149 bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
150 bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
151
152 /* Configure MAC - cut-thru mode is hard wired on */
153 EFX_POPULATE_OWORD_3(reg,
154 FRF_AB_XM_RX_JUMBO_MODE, 1,
155 FRF_AB_XM_TX_STAT_EN, 1,
156 FRF_AB_XM_RX_STAT_EN, 1);
157 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
158
159 /* Configure TX */
160 EFX_POPULATE_OWORD_6(reg,
161 FRF_AB_XM_TXEN, 1,
162 FRF_AB_XM_TX_PRMBL, 1,
163 FRF_AB_XM_AUTO_PAD, 1,
164 FRF_AB_XM_TXCRC, 1,
165 FRF_AB_XM_FCNTL, tx_fc,
166 FRF_AB_XM_IPG, 0x3);
167 efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
168
169 /* Configure RX */
170 EFX_POPULATE_OWORD_5(reg,
171 FRF_AB_XM_RXEN, 1,
172 FRF_AB_XM_AUTO_DEPAD, 0,
173 FRF_AB_XM_ACPT_ALL_MCAST, 1,
174 FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
175 FRF_AB_XM_PASS_CRC_ERR, 1);
176 efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
177
178 /* Set frame length */
179 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
180 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
181 efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
182 EFX_POPULATE_OWORD_2(reg,
183 FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
184 FRF_AB_XM_TX_JUMBO_MODE, 1);
185 efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
186
187 EFX_POPULATE_OWORD_2(reg,
188 FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
189 FRF_AB_XM_DIS_FCNTL, !rx_fc);
190 efx_writeo(efx, &reg, FR_AB_XM_FC);
191
192 /* Set MAC address */
193 memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
194 efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
195 memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
196 efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
197}
198
199static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
200{
201 efx_oword_t reg;
202 bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
203 bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
204 bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
205
206 /* XGXS block is flaky and will need to be reset if moving
207 * into our out of XGMII, XGXS or XAUI loopbacks. */
208 if (EFX_WORKAROUND_5147(efx)) {
209 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
210 bool reset_xgxs;
211
212 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
213 old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
214 old_xgmii_loopback =
215 EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
216
217 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
218 old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
219
220 /* The PHY driver may have turned XAUI off */
221 reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
222 (xaui_loopback != old_xaui_loopback) ||
223 (xgmii_loopback != old_xgmii_loopback));
224
225 if (reset_xgxs)
226 falcon_reset_xaui(efx);
227 }
228
229 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
230 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
231 (xgxs_loopback || xaui_loopback) ?
232 FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
233 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
234 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
235 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
236
237 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
238 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
239 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
240 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
241 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
242 efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
243}
244
245
246/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
247static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
248{
249 bool mac_up = falcon_xmac_link_ok(efx);
250
251 if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
252 efx_phy_mode_disabled(efx->phy_mode))
253 /* XAUI link is expected to be down */
254 return mac_up;
255
256 falcon_stop_nic_stats(efx);
257
258 while (!mac_up && tries) {
259 netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
260 falcon_reset_xaui(efx);
261 udelay(200);
262
263 mac_up = falcon_xmac_link_ok(efx);
264 --tries;
265 }
266
267 falcon_start_nic_stats(efx);
268
269 return mac_up;
270}
271
272bool falcon_xmac_check_fault(struct efx_nic *efx)
273{
274 return !falcon_xmac_link_ok_retry(efx, 5);
275}
276
277int falcon_reconfigure_xmac(struct efx_nic *efx)
278{
279 struct falcon_nic_data *nic_data = efx->nic_data;
280
281 falcon_reconfigure_xgxs_core(efx);
282 falcon_reconfigure_xmac_core(efx);
283
284 falcon_reconfigure_mac_wrapper(efx);
285
286 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
287 falcon_ack_status_intr(efx);
288
289 return 0;
290}
291
292void falcon_update_stats_xmac(struct efx_nic *efx)
293{
294 struct efx_mac_stats *mac_stats = &efx->mac_stats;
295
296 /* Update MAC stats from DMAed values */
297 FALCON_STAT(efx, XgRxOctets, rx_bytes);
298 FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
299 FALCON_STAT(efx, XgRxPkts, rx_packets);
300 FALCON_STAT(efx, XgRxPktsOK, rx_good);
301 FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
302 FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
303 FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
304 FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
305 FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
306 FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
307 FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
308 FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
309 FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
310 FALCON_STAT(efx, XgRxAlignError, rx_align_error);
311 FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
312 FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
313 FALCON_STAT(efx, XgRxControlPkts, rx_control);
314 FALCON_STAT(efx, XgRxPausePkts, rx_pause);
315 FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
316 FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
317 FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
318 FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
319 FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
320 FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
321 FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
322 FALCON_STAT(efx, XgRxLengthError, rx_length_error);
323 FALCON_STAT(efx, XgTxPkts, tx_packets);
324 FALCON_STAT(efx, XgTxOctets, tx_bytes);
325 FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
326 FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
327 FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
328 FALCON_STAT(efx, XgTxControlPkts, tx_control);
329 FALCON_STAT(efx, XgTxPausePkts, tx_pause);
330 FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
331 FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
332 FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
333 FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
334 FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
335 FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
336 FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
337 FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
338 FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
339 FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
340 FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
341 FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
342
343 /* Update derived statistics */
344 efx_update_diff_stat(&mac_stats->tx_good_bytes,
345 mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
346 mac_stats->tx_control * 64);
347 efx_update_diff_stat(&mac_stats->rx_bad_bytes,
348 mac_stats->rx_bytes - mac_stats->rx_good_bytes -
349 mac_stats->rx_control * 64);
350}
351
352void falcon_poll_xmac(struct efx_nic *efx)
353{
354 struct falcon_nic_data *nic_data = efx->nic_data;
355
356 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
357 !nic_data->xmac_poll_required)
358 return;
359
360 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
361 falcon_ack_status_intr(efx);
362}
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
new file mode 100644
index 000000000000..c0907d884d75
--- /dev/null
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -0,0 +1,2942 @@
1/****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2013 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/bitops.h>
12#include <linux/delay.h>
13#include <linux/interrupt.h>
14#include <linux/pci.h>
15#include <linux/module.h>
16#include <linux/seq_file.h>
17#include <linux/crc32.h>
18#include "net_driver.h"
19#include "bitfield.h"
20#include "efx.h"
21#include "nic.h"
22#include "farch_regs.h"
23#include "io.h"
24#include "workarounds.h"
25
26/* Falcon-architecture (SFC4000 and SFC9000-family) support */
27
28/**************************************************************************
29 *
30 * Configurable values
31 *
32 **************************************************************************
33 */
34
35/* This is set to 16 for a good reason. In summary, if larger than
36 * 16, the descriptor cache holds more than a default socket
37 * buffer's worth of packets (for UDP we can only have at most one
38 * socket buffer's worth outstanding). This combined with the fact
39 * that we only get 1 TX event per descriptor cache means the NIC
40 * goes idle.
41 */
42#define TX_DC_ENTRIES 16
43#define TX_DC_ENTRIES_ORDER 1
44
45#define RX_DC_ENTRIES 64
46#define RX_DC_ENTRIES_ORDER 3
47
48/* If EFX_MAX_INT_ERRORS internal errors occur within
49 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
50 * disable it.
51 */
52#define EFX_INT_ERROR_EXPIRE 3600
53#define EFX_MAX_INT_ERRORS 5
54
55/* Depth of RX flush request fifo */
56#define EFX_RX_FLUSH_COUNT 4
57
58/* Driver generated events */
59#define _EFX_CHANNEL_MAGIC_TEST 0x000101
60#define _EFX_CHANNEL_MAGIC_FILL 0x000102
61#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
62#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
63
64#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
65#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
66
67#define EFX_CHANNEL_MAGIC_TEST(_channel) \
68 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
69#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
71 efx_rx_queue_index(_rx_queue))
72#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
74 efx_rx_queue_index(_rx_queue))
75#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
76 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
77 (_tx_queue)->queue)
78
79static void efx_farch_magic_event(struct efx_channel *channel, u32 magic);
80
81/**************************************************************************
82 *
83 * Hardware access
84 *
85 **************************************************************************/
86
87static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
88 unsigned int index)
89{
90 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
91 value, index);
92}
93
94static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
95 const efx_oword_t *mask)
96{
97 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
98 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
99}
100
101int efx_farch_test_registers(struct efx_nic *efx,
102 const struct efx_farch_register_test *regs,
103 size_t n_regs)
104{
105 unsigned address = 0, i, j;
106 efx_oword_t mask, imask, original, reg, buf;
107
108 for (i = 0; i < n_regs; ++i) {
109 address = regs[i].address;
110 mask = imask = regs[i].mask;
111 EFX_INVERT_OWORD(imask);
112
113 efx_reado(efx, &original, address);
114
115 /* bit sweep on and off */
116 for (j = 0; j < 128; j++) {
117 if (!EFX_EXTRACT_OWORD32(mask, j, j))
118 continue;
119
120 /* Test this testable bit can be set in isolation */
121 EFX_AND_OWORD(reg, original, mask);
122 EFX_SET_OWORD32(reg, j, j, 1);
123
124 efx_writeo(efx, &reg, address);
125 efx_reado(efx, &buf, address);
126
127 if (efx_masked_compare_oword(&reg, &buf, &mask))
128 goto fail;
129
130 /* Test this testable bit can be cleared in isolation */
131 EFX_OR_OWORD(reg, original, mask);
132 EFX_SET_OWORD32(reg, j, j, 0);
133
134 efx_writeo(efx, &reg, address);
135 efx_reado(efx, &buf, address);
136
137 if (efx_masked_compare_oword(&reg, &buf, &mask))
138 goto fail;
139 }
140
141 efx_writeo(efx, &original, address);
142 }
143
144 return 0;
145
146fail:
147 netif_err(efx, hw, efx->net_dev,
148 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
149 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
150 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
151 return -EIO;
152}
153
154/**************************************************************************
155 *
156 * Special buffer handling
157 * Special buffers are used for event queues and the TX and RX
158 * descriptor rings.
159 *
160 *************************************************************************/
161
162/*
163 * Initialise a special buffer
164 *
165 * This will define a buffer (previously allocated via
166 * efx_alloc_special_buffer()) in the buffer table, allowing
167 * it to be used for event queues, descriptor rings etc.
168 */
169static void
170efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
171{
172 efx_qword_t buf_desc;
173 unsigned int index;
174 dma_addr_t dma_addr;
175 int i;
176
177 EFX_BUG_ON_PARANOID(!buffer->buf.addr);
178
179 /* Write buffer descriptors to NIC */
180 for (i = 0; i < buffer->entries; i++) {
181 index = buffer->index + i;
182 dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE);
183 netif_dbg(efx, probe, efx->net_dev,
184 "mapping special buffer %d at %llx\n",
185 index, (unsigned long long)dma_addr);
186 EFX_POPULATE_QWORD_3(buf_desc,
187 FRF_AZ_BUF_ADR_REGION, 0,
188 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
189 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
190 efx_write_buf_tbl(efx, &buf_desc, index);
191 }
192}
193
194/* Unmaps a buffer and clears the buffer table entries */
195static void
196efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
197{
198 efx_oword_t buf_tbl_upd;
199 unsigned int start = buffer->index;
200 unsigned int end = (buffer->index + buffer->entries - 1);
201
202 if (!buffer->entries)
203 return;
204
205 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
206 buffer->index, buffer->index + buffer->entries - 1);
207
208 EFX_POPULATE_OWORD_4(buf_tbl_upd,
209 FRF_AZ_BUF_UPD_CMD, 0,
210 FRF_AZ_BUF_CLR_CMD, 1,
211 FRF_AZ_BUF_CLR_END_ID, end,
212 FRF_AZ_BUF_CLR_START_ID, start);
213 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
214}
215
216/*
217 * Allocate a new special buffer
218 *
219 * This allocates memory for a new buffer, clears it and allocates a
220 * new buffer ID range. It does not write into the buffer table.
221 *
222 * This call will allocate 4KB buffers, since 8KB buffers can't be
223 * used for event queues and descriptor rings.
224 */
225static int efx_alloc_special_buffer(struct efx_nic *efx,
226 struct efx_special_buffer *buffer,
227 unsigned int len)
228{
229 len = ALIGN(len, EFX_BUF_SIZE);
230
231 if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
232 return -ENOMEM;
233 buffer->entries = len / EFX_BUF_SIZE;
234 BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
235
236 /* Select new buffer ID */
237 buffer->index = efx->next_buffer_table;
238 efx->next_buffer_table += buffer->entries;
239#ifdef CONFIG_SFC_SRIOV
240 BUG_ON(efx_sriov_enabled(efx) &&
241 efx->vf_buftbl_base < efx->next_buffer_table);
242#endif
243
244 netif_dbg(efx, probe, efx->net_dev,
245 "allocating special buffers %d-%d at %llx+%x "
246 "(virt %p phys %llx)\n", buffer->index,
247 buffer->index + buffer->entries - 1,
248 (u64)buffer->buf.dma_addr, len,
249 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
250
251 return 0;
252}
253
254static void
255efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
256{
257 if (!buffer->buf.addr)
258 return;
259
260 netif_dbg(efx, hw, efx->net_dev,
261 "deallocating special buffers %d-%d at %llx+%x "
262 "(virt %p phys %llx)\n", buffer->index,
263 buffer->index + buffer->entries - 1,
264 (u64)buffer->buf.dma_addr, buffer->buf.len,
265 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
266
267 efx_nic_free_buffer(efx, &buffer->buf);
268 buffer->entries = 0;
269}
270
271/**************************************************************************
272 *
273 * TX path
274 *
275 **************************************************************************/
276
277/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
278static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
279{
280 unsigned write_ptr;
281 efx_dword_t reg;
282
283 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
284 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
285 efx_writed_page(tx_queue->efx, &reg,
286 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
287}
288
289/* Write pointer and first descriptor for TX descriptor ring */
290static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
291 const efx_qword_t *txd)
292{
293 unsigned write_ptr;
294 efx_oword_t reg;
295
296 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
297 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
298
299 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
300 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
301 FRF_AZ_TX_DESC_WPTR, write_ptr);
302 reg.qword[0] = *txd;
303 efx_writeo_page(tx_queue->efx, &reg,
304 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
305}
306
307
308/* For each entry inserted into the software descriptor ring, create a
309 * descriptor in the hardware TX descriptor ring (in host memory), and
310 * write a doorbell.
311 */
312void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
313{
314
315 struct efx_tx_buffer *buffer;
316 efx_qword_t *txd;
317 unsigned write_ptr;
318 unsigned old_write_count = tx_queue->write_count;
319
320 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
321
322 do {
323 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
324 buffer = &tx_queue->buffer[write_ptr];
325 txd = efx_tx_desc(tx_queue, write_ptr);
326 ++tx_queue->write_count;
327
328 EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_OPTION);
329
330 /* Create TX descriptor ring entry */
331 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
332 EFX_POPULATE_QWORD_4(*txd,
333 FSF_AZ_TX_KER_CONT,
334 buffer->flags & EFX_TX_BUF_CONT,
335 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
336 FSF_AZ_TX_KER_BUF_REGION, 0,
337 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
338 } while (tx_queue->write_count != tx_queue->insert_count);
339
340 wmb(); /* Ensure descriptors are written before they are fetched */
341
342 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
343 txd = efx_tx_desc(tx_queue,
344 old_write_count & tx_queue->ptr_mask);
345 efx_farch_push_tx_desc(tx_queue, txd);
346 ++tx_queue->pushes;
347 } else {
348 efx_farch_notify_tx_desc(tx_queue);
349 }
350}
351
352/* Allocate hardware resources for a TX queue */
353int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
354{
355 struct efx_nic *efx = tx_queue->efx;
356 unsigned entries;
357
358 entries = tx_queue->ptr_mask + 1;
359 return efx_alloc_special_buffer(efx, &tx_queue->txd,
360 entries * sizeof(efx_qword_t));
361}
362
363void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
364{
365 struct efx_nic *efx = tx_queue->efx;
366 efx_oword_t reg;
367
368 /* Pin TX descriptor ring */
369 efx_init_special_buffer(efx, &tx_queue->txd);
370
371 /* Push TX descriptor ring to card */
372 EFX_POPULATE_OWORD_10(reg,
373 FRF_AZ_TX_DESCQ_EN, 1,
374 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
375 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
376 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
377 FRF_AZ_TX_DESCQ_EVQ_ID,
378 tx_queue->channel->channel,
379 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
380 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
381 FRF_AZ_TX_DESCQ_SIZE,
382 __ffs(tx_queue->txd.entries),
383 FRF_AZ_TX_DESCQ_TYPE, 0,
384 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
385
386 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
387 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
388 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
389 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
390 !csum);
391 }
392
393 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
394 tx_queue->queue);
395
396 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
397 /* Only 128 bits in this register */
398 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
399
400 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
401 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
402 __clear_bit_le(tx_queue->queue, &reg);
403 else
404 __set_bit_le(tx_queue->queue, &reg);
405 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
406 }
407
408 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
409 EFX_POPULATE_OWORD_1(reg,
410 FRF_BZ_TX_PACE,
411 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
412 FFE_BZ_TX_PACE_OFF :
413 FFE_BZ_TX_PACE_RESERVED);
414 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
415 tx_queue->queue);
416 }
417}
418
419static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
420{
421 struct efx_nic *efx = tx_queue->efx;
422 efx_oword_t tx_flush_descq;
423
424 WARN_ON(atomic_read(&tx_queue->flush_outstanding));
425 atomic_set(&tx_queue->flush_outstanding, 1);
426
427 EFX_POPULATE_OWORD_2(tx_flush_descq,
428 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
429 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
430 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
431}
432
433void efx_farch_tx_fini(struct efx_tx_queue *tx_queue)
434{
435 struct efx_nic *efx = tx_queue->efx;
436 efx_oword_t tx_desc_ptr;
437
438 /* Remove TX descriptor ring from card */
439 EFX_ZERO_OWORD(tx_desc_ptr);
440 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
441 tx_queue->queue);
442
443 /* Unpin TX descriptor ring */
444 efx_fini_special_buffer(efx, &tx_queue->txd);
445}
446
447/* Free buffers backing TX queue */
448void efx_farch_tx_remove(struct efx_tx_queue *tx_queue)
449{
450 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
451}
452
453/**************************************************************************
454 *
455 * RX path
456 *
457 **************************************************************************/
458
459/* This creates an entry in the RX descriptor queue */
460static inline void
461efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
462{
463 struct efx_rx_buffer *rx_buf;
464 efx_qword_t *rxd;
465
466 rxd = efx_rx_desc(rx_queue, index);
467 rx_buf = efx_rx_buffer(rx_queue, index);
468 EFX_POPULATE_QWORD_3(*rxd,
469 FSF_AZ_RX_KER_BUF_SIZE,
470 rx_buf->len -
471 rx_queue->efx->type->rx_buffer_padding,
472 FSF_AZ_RX_KER_BUF_REGION, 0,
473 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
474}
475
476/* This writes to the RX_DESC_WPTR register for the specified receive
477 * descriptor ring.
478 */
479void efx_farch_rx_write(struct efx_rx_queue *rx_queue)
480{
481 struct efx_nic *efx = rx_queue->efx;
482 efx_dword_t reg;
483 unsigned write_ptr;
484
485 while (rx_queue->notified_count != rx_queue->added_count) {
486 efx_farch_build_rx_desc(
487 rx_queue,
488 rx_queue->notified_count & rx_queue->ptr_mask);
489 ++rx_queue->notified_count;
490 }
491
492 wmb();
493 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
494 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
495 efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
496 efx_rx_queue_index(rx_queue));
497}
498
499int efx_farch_rx_probe(struct efx_rx_queue *rx_queue)
500{
501 struct efx_nic *efx = rx_queue->efx;
502 unsigned entries;
503
504 entries = rx_queue->ptr_mask + 1;
505 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
506 entries * sizeof(efx_qword_t));
507}
508
509void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
510{
511 efx_oword_t rx_desc_ptr;
512 struct efx_nic *efx = rx_queue->efx;
513 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
514 bool iscsi_digest_en = is_b0;
515 bool jumbo_en;
516
517 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
518 * DMA to continue after a PCIe page boundary (and scattering
519 * is not possible). In Falcon B0 and Siena, it enables
520 * scatter.
521 */
522 jumbo_en = !is_b0 || efx->rx_scatter;
523
524 netif_dbg(efx, hw, efx->net_dev,
525 "RX queue %d ring in special buffers %d-%d\n",
526 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
527 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
528
529 rx_queue->scatter_n = 0;
530
531 /* Pin RX descriptor ring */
532 efx_init_special_buffer(efx, &rx_queue->rxd);
533
534 /* Push RX descriptor ring to card */
535 EFX_POPULATE_OWORD_10(rx_desc_ptr,
536 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
537 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
538 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
539 FRF_AZ_RX_DESCQ_EVQ_ID,
540 efx_rx_queue_channel(rx_queue)->channel,
541 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
542 FRF_AZ_RX_DESCQ_LABEL,
543 efx_rx_queue_index(rx_queue),
544 FRF_AZ_RX_DESCQ_SIZE,
545 __ffs(rx_queue->rxd.entries),
546 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
547 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
548 FRF_AZ_RX_DESCQ_EN, 1);
549 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
550 efx_rx_queue_index(rx_queue));
551}
552
553static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue)
554{
555 struct efx_nic *efx = rx_queue->efx;
556 efx_oword_t rx_flush_descq;
557
558 EFX_POPULATE_OWORD_2(rx_flush_descq,
559 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
560 FRF_AZ_RX_FLUSH_DESCQ,
561 efx_rx_queue_index(rx_queue));
562 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
563}
564
565void efx_farch_rx_fini(struct efx_rx_queue *rx_queue)
566{
567 efx_oword_t rx_desc_ptr;
568 struct efx_nic *efx = rx_queue->efx;
569
570 /* Remove RX descriptor ring from card */
571 EFX_ZERO_OWORD(rx_desc_ptr);
572 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
573 efx_rx_queue_index(rx_queue));
574
575 /* Unpin RX descriptor ring */
576 efx_fini_special_buffer(efx, &rx_queue->rxd);
577}
578
579/* Free buffers backing RX queue */
580void efx_farch_rx_remove(struct efx_rx_queue *rx_queue)
581{
582 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
583}
584
585/**************************************************************************
586 *
587 * Flush handling
588 *
589 **************************************************************************/
590
591/* efx_farch_flush_queues() must be woken up when all flushes are completed,
592 * or more RX flushes can be kicked off.
593 */
594static bool efx_farch_flush_wake(struct efx_nic *efx)
595{
596 /* Ensure that all updates are visible to efx_farch_flush_queues() */
597 smp_mb();
598
599 return (atomic_read(&efx->active_queues) == 0 ||
600 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
601 && atomic_read(&efx->rxq_flush_pending) > 0));
602}
603
604static bool efx_check_tx_flush_complete(struct efx_nic *efx)
605{
606 bool i = true;
607 efx_oword_t txd_ptr_tbl;
608 struct efx_channel *channel;
609 struct efx_tx_queue *tx_queue;
610
611 efx_for_each_channel(channel, efx) {
612 efx_for_each_channel_tx_queue(tx_queue, channel) {
613 efx_reado_table(efx, &txd_ptr_tbl,
614 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
615 if (EFX_OWORD_FIELD(txd_ptr_tbl,
616 FRF_AZ_TX_DESCQ_FLUSH) ||
617 EFX_OWORD_FIELD(txd_ptr_tbl,
618 FRF_AZ_TX_DESCQ_EN)) {
619 netif_dbg(efx, hw, efx->net_dev,
620 "flush did not complete on TXQ %d\n",
621 tx_queue->queue);
622 i = false;
623 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
624 1, 0)) {
625 /* The flush is complete, but we didn't
626 * receive a flush completion event
627 */
628 netif_dbg(efx, hw, efx->net_dev,
629 "flush complete on TXQ %d, so drain "
630 "the queue\n", tx_queue->queue);
631 /* Don't need to increment active_queues as it
632 * has already been incremented for the queues
633 * which did not drain
634 */
635 efx_farch_magic_event(channel,
636 EFX_CHANNEL_MAGIC_TX_DRAIN(
637 tx_queue));
638 }
639 }
640 }
641
642 return i;
643}
644
645/* Flush all the transmit queues, and continue flushing receive queues until
646 * they're all flushed. Wait for the DRAIN events to be recieved so that there
647 * are no more RX and TX events left on any channel. */
648static int efx_farch_do_flush(struct efx_nic *efx)
649{
650 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
651 struct efx_channel *channel;
652 struct efx_rx_queue *rx_queue;
653 struct efx_tx_queue *tx_queue;
654 int rc = 0;
655
656 efx_for_each_channel(channel, efx) {
657 efx_for_each_channel_tx_queue(tx_queue, channel) {
658 efx_farch_flush_tx_queue(tx_queue);
659 }
660 efx_for_each_channel_rx_queue(rx_queue, channel) {
661 rx_queue->flush_pending = true;
662 atomic_inc(&efx->rxq_flush_pending);
663 }
664 }
665
666 while (timeout && atomic_read(&efx->active_queues) > 0) {
667 /* If SRIOV is enabled, then offload receive queue flushing to
668 * the firmware (though we will still have to poll for
669 * completion). If that fails, fall back to the old scheme.
670 */
671 if (efx_sriov_enabled(efx)) {
672 rc = efx_mcdi_flush_rxqs(efx);
673 if (!rc)
674 goto wait;
675 }
676
677 /* The hardware supports four concurrent rx flushes, each of
678 * which may need to be retried if there is an outstanding
679 * descriptor fetch
680 */
681 efx_for_each_channel(channel, efx) {
682 efx_for_each_channel_rx_queue(rx_queue, channel) {
683 if (atomic_read(&efx->rxq_flush_outstanding) >=
684 EFX_RX_FLUSH_COUNT)
685 break;
686
687 if (rx_queue->flush_pending) {
688 rx_queue->flush_pending = false;
689 atomic_dec(&efx->rxq_flush_pending);
690 atomic_inc(&efx->rxq_flush_outstanding);
691 efx_farch_flush_rx_queue(rx_queue);
692 }
693 }
694 }
695
696 wait:
697 timeout = wait_event_timeout(efx->flush_wq,
698 efx_farch_flush_wake(efx),
699 timeout);
700 }
701
702 if (atomic_read(&efx->active_queues) &&
703 !efx_check_tx_flush_complete(efx)) {
704 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
705 "(rx %d+%d)\n", atomic_read(&efx->active_queues),
706 atomic_read(&efx->rxq_flush_outstanding),
707 atomic_read(&efx->rxq_flush_pending));
708 rc = -ETIMEDOUT;
709
710 atomic_set(&efx->active_queues, 0);
711 atomic_set(&efx->rxq_flush_pending, 0);
712 atomic_set(&efx->rxq_flush_outstanding, 0);
713 }
714
715 return rc;
716}
717
718int efx_farch_fini_dmaq(struct efx_nic *efx)
719{
720 struct efx_channel *channel;
721 struct efx_tx_queue *tx_queue;
722 struct efx_rx_queue *rx_queue;
723 int rc = 0;
724
725 /* Do not attempt to write to the NIC during EEH recovery */
726 if (efx->state != STATE_RECOVERY) {
727 /* Only perform flush if DMA is enabled */
728 if (efx->pci_dev->is_busmaster) {
729 efx->type->prepare_flush(efx);
730 rc = efx_farch_do_flush(efx);
731 efx->type->finish_flush(efx);
732 }
733
734 efx_for_each_channel(channel, efx) {
735 efx_for_each_channel_rx_queue(rx_queue, channel)
736 efx_farch_rx_fini(rx_queue);
737 efx_for_each_channel_tx_queue(tx_queue, channel)
738 efx_farch_tx_fini(tx_queue);
739 }
740 }
741
742 return rc;
743}
744
745/**************************************************************************
746 *
747 * Event queue processing
748 * Event queues are processed by per-channel tasklets.
749 *
750 **************************************************************************/
751
752/* Update a channel's event queue's read pointer (RPTR) register
753 *
754 * This writes the EVQ_RPTR_REG register for the specified channel's
755 * event queue.
756 */
757void efx_farch_ev_read_ack(struct efx_channel *channel)
758{
759 efx_dword_t reg;
760 struct efx_nic *efx = channel->efx;
761
762 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
763 channel->eventq_read_ptr & channel->eventq_mask);
764
765 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
766 * of 4 bytes, but it is really 16 bytes just like later revisions.
767 */
768 efx_writed(efx, &reg,
769 efx->type->evq_rptr_tbl_base +
770 FR_BZ_EVQ_RPTR_STEP * channel->channel);
771}
772
773/* Use HW to insert a SW defined event */
774void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
775 efx_qword_t *event)
776{
777 efx_oword_t drv_ev_reg;
778
779 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
780 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
781 drv_ev_reg.u32[0] = event->u32[0];
782 drv_ev_reg.u32[1] = event->u32[1];
783 drv_ev_reg.u32[2] = 0;
784 drv_ev_reg.u32[3] = 0;
785 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
786 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
787}
788
789static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
790{
791 efx_qword_t event;
792
793 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
794 FSE_AZ_EV_CODE_DRV_GEN_EV,
795 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
796 efx_farch_generate_event(channel->efx, channel->channel, &event);
797}
798
799/* Handle a transmit completion event
800 *
801 * The NIC batches TX completion events; the message we receive is of
802 * the form "complete all TX events up to this index".
803 */
804static int
805efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
806{
807 unsigned int tx_ev_desc_ptr;
808 unsigned int tx_ev_q_label;
809 struct efx_tx_queue *tx_queue;
810 struct efx_nic *efx = channel->efx;
811 int tx_packets = 0;
812
813 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
814 return 0;
815
816 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
817 /* Transmit completion */
818 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
819 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
820 tx_queue = efx_channel_get_tx_queue(
821 channel, tx_ev_q_label % EFX_TXQ_TYPES);
822 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
823 tx_queue->ptr_mask);
824 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
825 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
826 /* Rewrite the FIFO write pointer */
827 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
828 tx_queue = efx_channel_get_tx_queue(
829 channel, tx_ev_q_label % EFX_TXQ_TYPES);
830
831 netif_tx_lock(efx->net_dev);
832 efx_farch_notify_tx_desc(tx_queue);
833 netif_tx_unlock(efx->net_dev);
834 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
835 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
836 } else {
837 netif_err(efx, tx_err, efx->net_dev,
838 "channel %d unexpected TX event "
839 EFX_QWORD_FMT"\n", channel->channel,
840 EFX_QWORD_VAL(*event));
841 }
842
843 return tx_packets;
844}
845
846/* Detect errors included in the rx_evt_pkt_ok bit. */
847static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
848 const efx_qword_t *event)
849{
850 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
851 struct efx_nic *efx = rx_queue->efx;
852 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
853 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
854 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
855 bool rx_ev_other_err, rx_ev_pause_frm;
856 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
857 unsigned rx_ev_pkt_type;
858
859 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
860 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
861 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
862 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
863 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
864 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
865 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
866 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
867 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
868 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
869 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
870 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
871 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
872 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
873 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
874
875 /* Every error apart from tobe_disc and pause_frm */
876 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
877 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
878 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
879
880 /* Count errors that are not in MAC stats. Ignore expected
881 * checksum errors during self-test. */
882 if (rx_ev_frm_trunc)
883 ++channel->n_rx_frm_trunc;
884 else if (rx_ev_tobe_disc)
885 ++channel->n_rx_tobe_disc;
886 else if (!efx->loopback_selftest) {
887 if (rx_ev_ip_hdr_chksum_err)
888 ++channel->n_rx_ip_hdr_chksum_err;
889 else if (rx_ev_tcp_udp_chksum_err)
890 ++channel->n_rx_tcp_udp_chksum_err;
891 }
892
893 /* TOBE_DISC is expected on unicast mismatches; don't print out an
894 * error message. FRM_TRUNC indicates RXDP dropped the packet due
895 * to a FIFO overflow.
896 */
897#ifdef DEBUG
898 if (rx_ev_other_err && net_ratelimit()) {
899 netif_dbg(efx, rx_err, efx->net_dev,
900 " RX queue %d unexpected RX event "
901 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
902 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
903 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
904 rx_ev_ip_hdr_chksum_err ?
905 " [IP_HDR_CHKSUM_ERR]" : "",
906 rx_ev_tcp_udp_chksum_err ?
907 " [TCP_UDP_CHKSUM_ERR]" : "",
908 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
909 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
910 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
911 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
912 rx_ev_pause_frm ? " [PAUSE]" : "");
913 }
914#endif
915
916 /* The frame must be discarded if any of these are true. */
917 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
918 rx_ev_tobe_disc | rx_ev_pause_frm) ?
919 EFX_RX_PKT_DISCARD : 0;
920}
921
922/* Handle receive events that are not in-order. Return true if this
923 * can be handled as a partial packet discard, false if it's more
924 * serious.
925 */
926static bool
927efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
928{
929 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
930 struct efx_nic *efx = rx_queue->efx;
931 unsigned expected, dropped;
932
933 if (rx_queue->scatter_n &&
934 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
935 rx_queue->ptr_mask)) {
936 ++channel->n_rx_nodesc_trunc;
937 return true;
938 }
939
940 expected = rx_queue->removed_count & rx_queue->ptr_mask;
941 dropped = (index - expected) & rx_queue->ptr_mask;
942 netif_info(efx, rx_err, efx->net_dev,
943 "dropped %d events (index=%d expected=%d)\n",
944 dropped, index, expected);
945
946 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
947 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
948 return false;
949}
950
951/* Handle a packet received event
952 *
953 * The NIC gives a "discard" flag if it's a unicast packet with the
954 * wrong destination address
955 * Also "is multicast" and "matches multicast filter" flags can be used to
956 * discard non-matching multicast packets.
957 */
958static void
959efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
960{
961 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
962 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
963 unsigned expected_ptr;
964 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
965 u16 flags;
966 struct efx_rx_queue *rx_queue;
967 struct efx_nic *efx = channel->efx;
968
969 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
970 return;
971
972 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
973 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
974 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
975 channel->channel);
976
977 rx_queue = efx_channel_get_rx_queue(channel);
978
979 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
980 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
981 rx_queue->ptr_mask);
982
983 /* Check for partial drops and other errors */
984 if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
985 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
986 if (rx_ev_desc_ptr != expected_ptr &&
987 !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
988 return;
989
990 /* Discard all pending fragments */
991 if (rx_queue->scatter_n) {
992 efx_rx_packet(
993 rx_queue,
994 rx_queue->removed_count & rx_queue->ptr_mask,
995 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
996 rx_queue->removed_count += rx_queue->scatter_n;
997 rx_queue->scatter_n = 0;
998 }
999
1000 /* Return if there is no new fragment */
1001 if (rx_ev_desc_ptr != expected_ptr)
1002 return;
1003
1004 /* Discard new fragment if not SOP */
1005 if (!rx_ev_sop) {
1006 efx_rx_packet(
1007 rx_queue,
1008 rx_queue->removed_count & rx_queue->ptr_mask,
1009 1, 0, EFX_RX_PKT_DISCARD);
1010 ++rx_queue->removed_count;
1011 return;
1012 }
1013 }
1014
1015 ++rx_queue->scatter_n;
1016 if (rx_ev_cont)
1017 return;
1018
1019 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1020 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1021 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1022
1023 if (likely(rx_ev_pkt_ok)) {
1024 /* If packet is marked as OK then we can rely on the
1025 * hardware checksum and classification.
1026 */
1027 flags = 0;
1028 switch (rx_ev_hdr_type) {
1029 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
1030 flags |= EFX_RX_PKT_TCP;
1031 /* fall through */
1032 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
1033 flags |= EFX_RX_PKT_CSUMMED;
1034 /* fall through */
1035 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
1036 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
1037 break;
1038 }
1039 } else {
1040 flags = efx_farch_handle_rx_not_ok(rx_queue, event);
1041 }
1042
1043 /* Detect multicast packets that didn't match the filter */
1044 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1045 if (rx_ev_mcast_pkt) {
1046 unsigned int rx_ev_mcast_hash_match =
1047 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1048
1049 if (unlikely(!rx_ev_mcast_hash_match)) {
1050 ++channel->n_rx_mcast_mismatch;
1051 flags |= EFX_RX_PKT_DISCARD;
1052 }
1053 }
1054
1055 channel->irq_mod_score += 2;
1056
1057 /* Handle received packet */
1058 efx_rx_packet(rx_queue,
1059 rx_queue->removed_count & rx_queue->ptr_mask,
1060 rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1061 rx_queue->removed_count += rx_queue->scatter_n;
1062 rx_queue->scatter_n = 0;
1063}
1064
1065/* If this flush done event corresponds to a &struct efx_tx_queue, then
1066 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1067 * of all transmit completions.
1068 */
1069static void
1070efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1071{
1072 struct efx_tx_queue *tx_queue;
1073 int qid;
1074
1075 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1076 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1077 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
1078 qid % EFX_TXQ_TYPES);
1079 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1080 efx_farch_magic_event(tx_queue->channel,
1081 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1082 }
1083 }
1084}
1085
1086/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1087 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1088 * the RX queue back to the mask of RX queues in need of flushing.
1089 */
1090static void
1091efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1092{
1093 struct efx_channel *channel;
1094 struct efx_rx_queue *rx_queue;
1095 int qid;
1096 bool failed;
1097
1098 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1099 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1100 if (qid >= efx->n_channels)
1101 return;
1102 channel = efx_get_channel(efx, qid);
1103 if (!efx_channel_has_rx_queue(channel))
1104 return;
1105 rx_queue = efx_channel_get_rx_queue(channel);
1106
1107 if (failed) {
1108 netif_info(efx, hw, efx->net_dev,
1109 "RXQ %d flush retry\n", qid);
1110 rx_queue->flush_pending = true;
1111 atomic_inc(&efx->rxq_flush_pending);
1112 } else {
1113 efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
1114 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1115 }
1116 atomic_dec(&efx->rxq_flush_outstanding);
1117 if (efx_farch_flush_wake(efx))
1118 wake_up(&efx->flush_wq);
1119}
1120
1121static void
1122efx_farch_handle_drain_event(struct efx_channel *channel)
1123{
1124 struct efx_nic *efx = channel->efx;
1125
1126 WARN_ON(atomic_read(&efx->active_queues) == 0);
1127 atomic_dec(&efx->active_queues);
1128 if (efx_farch_flush_wake(efx))
1129 wake_up(&efx->flush_wq);
1130}
1131
1132static void efx_farch_handle_generated_event(struct efx_channel *channel,
1133 efx_qword_t *event)
1134{
1135 struct efx_nic *efx = channel->efx;
1136 struct efx_rx_queue *rx_queue =
1137 efx_channel_has_rx_queue(channel) ?
1138 efx_channel_get_rx_queue(channel) : NULL;
1139 unsigned magic, code;
1140
1141 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1142 code = _EFX_CHANNEL_MAGIC_CODE(magic);
1143
1144 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
1145 channel->event_test_cpu = raw_smp_processor_id();
1146 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
1147 /* The queue must be empty, so we won't receive any rx
1148 * events, so efx_process_channel() won't refill the
1149 * queue. Refill it here */
1150 efx_fast_push_rx_descriptors(rx_queue);
1151 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1152 efx_farch_handle_drain_event(channel);
1153 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1154 efx_farch_handle_drain_event(channel);
1155 } else {
1156 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1157 "generated event "EFX_QWORD_FMT"\n",
1158 channel->channel, EFX_QWORD_VAL(*event));
1159 }
1160}
1161
1162static void
1163efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1164{
1165 struct efx_nic *efx = channel->efx;
1166 unsigned int ev_sub_code;
1167 unsigned int ev_sub_data;
1168
1169 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1170 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1171
1172 switch (ev_sub_code) {
1173 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1174 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1175 channel->channel, ev_sub_data);
1176 efx_farch_handle_tx_flush_done(efx, event);
1177 efx_sriov_tx_flush_done(efx, event);
1178 break;
1179 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1180 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1181 channel->channel, ev_sub_data);
1182 efx_farch_handle_rx_flush_done(efx, event);
1183 efx_sriov_rx_flush_done(efx, event);
1184 break;
1185 case FSE_AZ_EVQ_INIT_DONE_EV:
1186 netif_dbg(efx, hw, efx->net_dev,
1187 "channel %d EVQ %d initialised\n",
1188 channel->channel, ev_sub_data);
1189 break;
1190 case FSE_AZ_SRM_UPD_DONE_EV:
1191 netif_vdbg(efx, hw, efx->net_dev,
1192 "channel %d SRAM update done\n", channel->channel);
1193 break;
1194 case FSE_AZ_WAKE_UP_EV:
1195 netif_vdbg(efx, hw, efx->net_dev,
1196 "channel %d RXQ %d wakeup event\n",
1197 channel->channel, ev_sub_data);
1198 break;
1199 case FSE_AZ_TIMER_EV:
1200 netif_vdbg(efx, hw, efx->net_dev,
1201 "channel %d RX queue %d timer expired\n",
1202 channel->channel, ev_sub_data);
1203 break;
1204 case FSE_AA_RX_RECOVER_EV:
1205 netif_err(efx, rx_err, efx->net_dev,
1206 "channel %d seen DRIVER RX_RESET event. "
1207 "Resetting.\n", channel->channel);
1208 atomic_inc(&efx->rx_reset);
1209 efx_schedule_reset(efx,
1210 EFX_WORKAROUND_6555(efx) ?
1211 RESET_TYPE_RX_RECOVERY :
1212 RESET_TYPE_DISABLE);
1213 break;
1214 case FSE_BZ_RX_DSC_ERROR_EV:
1215 if (ev_sub_data < EFX_VI_BASE) {
1216 netif_err(efx, rx_err, efx->net_dev,
1217 "RX DMA Q %d reports descriptor fetch error."
1218 " RX Q %d is disabled.\n", ev_sub_data,
1219 ev_sub_data);
1220 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1221 } else
1222 efx_sriov_desc_fetch_err(efx, ev_sub_data);
1223 break;
1224 case FSE_BZ_TX_DSC_ERROR_EV:
1225 if (ev_sub_data < EFX_VI_BASE) {
1226 netif_err(efx, tx_err, efx->net_dev,
1227 "TX DMA Q %d reports descriptor fetch error."
1228 " TX Q %d is disabled.\n", ev_sub_data,
1229 ev_sub_data);
1230 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1231 } else
1232 efx_sriov_desc_fetch_err(efx, ev_sub_data);
1233 break;
1234 default:
1235 netif_vdbg(efx, hw, efx->net_dev,
1236 "channel %d unknown driver event code %d "
1237 "data %04x\n", channel->channel, ev_sub_code,
1238 ev_sub_data);
1239 break;
1240 }
1241}
1242
1243int efx_farch_ev_process(struct efx_channel *channel, int budget)
1244{
1245 struct efx_nic *efx = channel->efx;
1246 unsigned int read_ptr;
1247 efx_qword_t event, *p_event;
1248 int ev_code;
1249 int tx_packets = 0;
1250 int spent = 0;
1251
1252 read_ptr = channel->eventq_read_ptr;
1253
1254 for (;;) {
1255 p_event = efx_event(channel, read_ptr);
1256 event = *p_event;
1257
1258 if (!efx_event_present(&event))
1259 /* End of events */
1260 break;
1261
1262 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1263 "channel %d event is "EFX_QWORD_FMT"\n",
1264 channel->channel, EFX_QWORD_VAL(event));
1265
1266 /* Clear this event by marking it all ones */
1267 EFX_SET_QWORD(*p_event);
1268
1269 ++read_ptr;
1270
1271 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1272
1273 switch (ev_code) {
1274 case FSE_AZ_EV_CODE_RX_EV:
1275 efx_farch_handle_rx_event(channel, &event);
1276 if (++spent == budget)
1277 goto out;
1278 break;
1279 case FSE_AZ_EV_CODE_TX_EV:
1280 tx_packets += efx_farch_handle_tx_event(channel,
1281 &event);
1282 if (tx_packets > efx->txq_entries) {
1283 spent = budget;
1284 goto out;
1285 }
1286 break;
1287 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1288 efx_farch_handle_generated_event(channel, &event);
1289 break;
1290 case FSE_AZ_EV_CODE_DRIVER_EV:
1291 efx_farch_handle_driver_event(channel, &event);
1292 break;
1293 case FSE_CZ_EV_CODE_USER_EV:
1294 efx_sriov_event(channel, &event);
1295 break;
1296 case FSE_CZ_EV_CODE_MCDI_EV:
1297 efx_mcdi_process_event(channel, &event);
1298 break;
1299 case FSE_AZ_EV_CODE_GLOBAL_EV:
1300 if (efx->type->handle_global_event &&
1301 efx->type->handle_global_event(channel, &event))
1302 break;
1303 /* else fall through */
1304 default:
1305 netif_err(channel->efx, hw, channel->efx->net_dev,
1306 "channel %d unknown event type %d (data "
1307 EFX_QWORD_FMT ")\n", channel->channel,
1308 ev_code, EFX_QWORD_VAL(event));
1309 }
1310 }
1311
1312out:
1313 channel->eventq_read_ptr = read_ptr;
1314 return spent;
1315}
1316
1317/* Allocate buffer table entries for event queue */
1318int efx_farch_ev_probe(struct efx_channel *channel)
1319{
1320 struct efx_nic *efx = channel->efx;
1321 unsigned entries;
1322
1323 entries = channel->eventq_mask + 1;
1324 return efx_alloc_special_buffer(efx, &channel->eventq,
1325 entries * sizeof(efx_qword_t));
1326}
1327
1328int efx_farch_ev_init(struct efx_channel *channel)
1329{
1330 efx_oword_t reg;
1331 struct efx_nic *efx = channel->efx;
1332
1333 netif_dbg(efx, hw, efx->net_dev,
1334 "channel %d event queue in special buffers %d-%d\n",
1335 channel->channel, channel->eventq.index,
1336 channel->eventq.index + channel->eventq.entries - 1);
1337
1338 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1339 EFX_POPULATE_OWORD_3(reg,
1340 FRF_CZ_TIMER_Q_EN, 1,
1341 FRF_CZ_HOST_NOTIFY_MODE, 0,
1342 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1343 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1344 }
1345
1346 /* Pin event queue buffer */
1347 efx_init_special_buffer(efx, &channel->eventq);
1348
1349 /* Fill event queue with all ones (i.e. empty events) */
1350 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1351
1352 /* Push event queue to card */
1353 EFX_POPULATE_OWORD_3(reg,
1354 FRF_AZ_EVQ_EN, 1,
1355 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1356 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1357 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1358 channel->channel);
1359
1360 return 0;
1361}
1362
1363void efx_farch_ev_fini(struct efx_channel *channel)
1364{
1365 efx_oword_t reg;
1366 struct efx_nic *efx = channel->efx;
1367
1368 /* Remove event queue from card */
1369 EFX_ZERO_OWORD(reg);
1370 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1371 channel->channel);
1372 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1373 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1374
1375 /* Unpin event queue */
1376 efx_fini_special_buffer(efx, &channel->eventq);
1377}
1378
1379/* Free buffers backing event queue */
1380void efx_farch_ev_remove(struct efx_channel *channel)
1381{
1382 efx_free_special_buffer(channel->efx, &channel->eventq);
1383}
1384
1385
1386void efx_farch_ev_test_generate(struct efx_channel *channel)
1387{
1388 efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
1389}
1390
1391void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue)
1392{
1393 efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
1394 EFX_CHANNEL_MAGIC_FILL(rx_queue));
1395}
1396
1397/**************************************************************************
1398 *
1399 * Hardware interrupts
1400 * The hardware interrupt handler does very little work; all the event
1401 * queue processing is carried out by per-channel tasklets.
1402 *
1403 **************************************************************************/
1404
1405/* Enable/disable/generate interrupts */
1406static inline void efx_farch_interrupts(struct efx_nic *efx,
1407 bool enabled, bool force)
1408{
1409 efx_oword_t int_en_reg_ker;
1410
1411 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1412 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1413 FRF_AZ_KER_INT_KER, force,
1414 FRF_AZ_DRV_INT_EN_KER, enabled);
1415 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1416}
1417
1418void efx_farch_irq_enable_master(struct efx_nic *efx)
1419{
1420 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1421 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1422
1423 efx_farch_interrupts(efx, true, false);
1424}
1425
1426void efx_farch_irq_disable_master(struct efx_nic *efx)
1427{
1428 /* Disable interrupts */
1429 efx_farch_interrupts(efx, false, false);
1430}
1431
1432/* Generate a test interrupt
1433 * Interrupt must already have been enabled, otherwise nasty things
1434 * may happen.
1435 */
1436void efx_farch_irq_test_generate(struct efx_nic *efx)
1437{
1438 efx_farch_interrupts(efx, true, true);
1439}
1440
1441/* Process a fatal interrupt
1442 * Disable bus mastering ASAP and schedule a reset
1443 */
1444irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
1445{
1446 struct falcon_nic_data *nic_data = efx->nic_data;
1447 efx_oword_t *int_ker = efx->irq_status.addr;
1448 efx_oword_t fatal_intr;
1449 int error, mem_perr;
1450
1451 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1452 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1453
1454 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1455 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1456 EFX_OWORD_VAL(fatal_intr),
1457 error ? "disabling bus mastering" : "no recognised error");
1458
1459 /* If this is a memory parity error dump which blocks are offending */
1460 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1461 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1462 if (mem_perr) {
1463 efx_oword_t reg;
1464 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1465 netif_err(efx, hw, efx->net_dev,
1466 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1467 EFX_OWORD_VAL(reg));
1468 }
1469
1470 /* Disable both devices */
1471 pci_clear_master(efx->pci_dev);
1472 if (efx_nic_is_dual_func(efx))
1473 pci_clear_master(nic_data->pci_dev2);
1474 efx_farch_irq_disable_master(efx);
1475
1476 /* Count errors and reset or disable the NIC accordingly */
1477 if (efx->int_error_count == 0 ||
1478 time_after(jiffies, efx->int_error_expire)) {
1479 efx->int_error_count = 0;
1480 efx->int_error_expire =
1481 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1482 }
1483 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1484 netif_err(efx, hw, efx->net_dev,
1485 "SYSTEM ERROR - reset scheduled\n");
1486 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1487 } else {
1488 netif_err(efx, hw, efx->net_dev,
1489 "SYSTEM ERROR - max number of errors seen."
1490 "NIC will be disabled\n");
1491 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1492 }
1493
1494 return IRQ_HANDLED;
1495}
1496
1497/* Handle a legacy interrupt
1498 * Acknowledges the interrupt and schedule event queue processing.
1499 */
1500irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
1501{
1502 struct efx_nic *efx = dev_id;
1503 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
1504 efx_oword_t *int_ker = efx->irq_status.addr;
1505 irqreturn_t result = IRQ_NONE;
1506 struct efx_channel *channel;
1507 efx_dword_t reg;
1508 u32 queues;
1509 int syserr;
1510
1511 /* Read the ISR which also ACKs the interrupts */
1512 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1513 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1514
1515 /* Legacy interrupts are disabled too late by the EEH kernel
1516 * code. Disable them earlier.
1517 * If an EEH error occurred, the read will have returned all ones.
1518 */
1519 if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
1520 !efx->eeh_disabled_legacy_irq) {
1521 disable_irq_nosync(efx->legacy_irq);
1522 efx->eeh_disabled_legacy_irq = true;
1523 }
1524
1525 /* Handle non-event-queue sources */
1526 if (queues & (1U << efx->irq_level) && soft_enabled) {
1527 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1528 if (unlikely(syserr))
1529 return efx_farch_fatal_interrupt(efx);
1530 efx->last_irq_cpu = raw_smp_processor_id();
1531 }
1532
1533 if (queues != 0) {
1534 efx->irq_zero_count = 0;
1535
1536 /* Schedule processing of any interrupting queues */
1537 if (likely(soft_enabled)) {
1538 efx_for_each_channel(channel, efx) {
1539 if (queues & 1)
1540 efx_schedule_channel_irq(channel);
1541 queues >>= 1;
1542 }
1543 }
1544 result = IRQ_HANDLED;
1545
1546 } else {
1547 efx_qword_t *event;
1548
1549 /* Legacy ISR read can return zero once (SF bug 15783) */
1550
1551 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1552 * because this might be a shared interrupt. */
1553 if (efx->irq_zero_count++ == 0)
1554 result = IRQ_HANDLED;
1555
1556 /* Ensure we schedule or rearm all event queues */
1557 if (likely(soft_enabled)) {
1558 efx_for_each_channel(channel, efx) {
1559 event = efx_event(channel,
1560 channel->eventq_read_ptr);
1561 if (efx_event_present(event))
1562 efx_schedule_channel_irq(channel);
1563 else
1564 efx_farch_ev_read_ack(channel);
1565 }
1566 }
1567 }
1568
1569 if (result == IRQ_HANDLED)
1570 netif_vdbg(efx, intr, efx->net_dev,
1571 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1572 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1573
1574 return result;
1575}
1576
1577/* Handle an MSI interrupt
1578 *
1579 * Handle an MSI hardware interrupt. This routine schedules event
1580 * queue processing. No interrupt acknowledgement cycle is necessary.
1581 * Also, we never need to check that the interrupt is for us, since
1582 * MSI interrupts cannot be shared.
1583 */
1584irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
1585{
1586 struct efx_msi_context *context = dev_id;
1587 struct efx_nic *efx = context->efx;
1588 efx_oword_t *int_ker = efx->irq_status.addr;
1589 int syserr;
1590
1591 netif_vdbg(efx, intr, efx->net_dev,
1592 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1593 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1594
1595 if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
1596 return IRQ_HANDLED;
1597
1598 /* Handle non-event-queue sources */
1599 if (context->index == efx->irq_level) {
1600 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1601 if (unlikely(syserr))
1602 return efx_farch_fatal_interrupt(efx);
1603 efx->last_irq_cpu = raw_smp_processor_id();
1604 }
1605
1606 /* Schedule processing of the channel */
1607 efx_schedule_channel_irq(efx->channel[context->index]);
1608
1609 return IRQ_HANDLED;
1610}
1611
1612
1613/* Setup RSS indirection table.
1614 * This maps from the hash value of the packet to RXQ
1615 */
1616void efx_farch_rx_push_indir_table(struct efx_nic *efx)
1617{
1618 size_t i = 0;
1619 efx_dword_t dword;
1620
1621 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1622 return;
1623
1624 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1625 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1626
1627 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1628 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1629 efx->rx_indir_table[i]);
1630 efx_writed(efx, &dword,
1631 FR_BZ_RX_INDIRECTION_TBL +
1632 FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1633 }
1634}
1635
1636/* Looks at available SRAM resources and works out how many queues we
1637 * can support, and where things like descriptor caches should live.
1638 *
1639 * SRAM is split up as follows:
1640 * 0 buftbl entries for channels
1641 * efx->vf_buftbl_base buftbl entries for SR-IOV
1642 * efx->rx_dc_base RX descriptor caches
1643 * efx->tx_dc_base TX descriptor caches
1644 */
1645void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
1646{
1647 unsigned vi_count, buftbl_min;
1648
1649 /* Account for the buffer table entries backing the datapath channels
1650 * and the descriptor caches for those channels.
1651 */
1652 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
1653 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
1654 efx->n_channels * EFX_MAX_EVQ_SIZE)
1655 * sizeof(efx_qword_t) / EFX_BUF_SIZE);
1656 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
1657
1658#ifdef CONFIG_SFC_SRIOV
1659 if (efx_sriov_wanted(efx)) {
1660 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
1661
1662 efx->vf_buftbl_base = buftbl_min;
1663
1664 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
1665 vi_count = max(vi_count, EFX_VI_BASE);
1666 buftbl_free = (sram_lim_qw - buftbl_min -
1667 vi_count * vi_dc_entries);
1668
1669 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
1670 efx_vf_size(efx));
1671 vf_limit = min(buftbl_free / entries_per_vf,
1672 (1024U - EFX_VI_BASE) >> efx->vi_scale);
1673
1674 if (efx->vf_count > vf_limit) {
1675 netif_err(efx, probe, efx->net_dev,
1676 "Reducing VF count from from %d to %d\n",
1677 efx->vf_count, vf_limit);
1678 efx->vf_count = vf_limit;
1679 }
1680 vi_count += efx->vf_count * efx_vf_size(efx);
1681 }
1682#endif
1683
1684 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1685 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1686}
1687
1688u32 efx_farch_fpga_ver(struct efx_nic *efx)
1689{
1690 efx_oword_t altera_build;
1691 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1692 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1693}
1694
1695void efx_farch_init_common(struct efx_nic *efx)
1696{
1697 efx_oword_t temp;
1698
1699 /* Set positions of descriptor caches in SRAM. */
1700 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1701 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1702 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1703 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1704
1705 /* Set TX descriptor cache size. */
1706 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1707 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1708 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1709
1710 /* Set RX descriptor cache size. Set low watermark to size-8, as
1711 * this allows most efficient prefetching.
1712 */
1713 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1714 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1715 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1716 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1717 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1718
1719 /* Program INT_KER address */
1720 EFX_POPULATE_OWORD_2(temp,
1721 FRF_AZ_NORM_INT_VEC_DIS_KER,
1722 EFX_INT_MODE_USE_MSI(efx),
1723 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1724 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1725
1726 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1727 /* Use an interrupt level unused by event queues */
1728 efx->irq_level = 0x1f;
1729 else
1730 /* Use a valid MSI-X vector */
1731 efx->irq_level = 0;
1732
1733 /* Enable all the genuinely fatal interrupts. (They are still
1734 * masked by the overall interrupt mask, controlled by
1735 * falcon_interrupts()).
1736 *
1737 * Note: All other fatal interrupts are enabled
1738 */
1739 EFX_POPULATE_OWORD_3(temp,
1740 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1741 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1742 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1743 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1744 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1745 EFX_INVERT_OWORD(temp);
1746 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1747
1748 efx_farch_rx_push_indir_table(efx);
1749
1750 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1751 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1752 */
1753 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1754 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1755 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1756 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1757 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1758 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1759 /* Enable SW_EV to inherit in char driver - assume harmless here */
1760 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1761 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1762 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1763 /* Disable hardware watchdog which can misfire */
1764 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1765 /* Squash TX of packets of 16 bytes or less */
1766 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1767 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1768 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1769
1770 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1771 EFX_POPULATE_OWORD_4(temp,
1772 /* Default values */
1773 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1774 FRF_BZ_TX_PACE_SB_AF, 0xb,
1775 FRF_BZ_TX_PACE_FB_BASE, 0,
1776 /* Allow large pace values in the
1777 * fast bin. */
1778 FRF_BZ_TX_PACE_BIN_TH,
1779 FFE_BZ_TX_PACE_RESERVED);
1780 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1781 }
1782}
1783
1784/**************************************************************************
1785 *
1786 * Filter tables
1787 *
1788 **************************************************************************
1789 */
1790
1791/* "Fudge factors" - difference between programmed value and actual depth.
1792 * Due to pipelined implementation we need to program H/W with a value that
1793 * is larger than the hop limit we want.
1794 */
1795#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1796#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1797
1798/* Hard maximum search limit. Hardware will time-out beyond 200-something.
1799 * We also need to avoid infinite loops in efx_farch_filter_search() when the
1800 * table is full.
1801 */
1802#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
1803
1804/* Don't try very hard to find space for performance hints, as this is
1805 * counter-productive. */
1806#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1807
1808enum efx_farch_filter_type {
1809 EFX_FARCH_FILTER_TCP_FULL = 0,
1810 EFX_FARCH_FILTER_TCP_WILD,
1811 EFX_FARCH_FILTER_UDP_FULL,
1812 EFX_FARCH_FILTER_UDP_WILD,
1813 EFX_FARCH_FILTER_MAC_FULL = 4,
1814 EFX_FARCH_FILTER_MAC_WILD,
1815 EFX_FARCH_FILTER_UC_DEF = 8,
1816 EFX_FARCH_FILTER_MC_DEF,
1817 EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
1818};
1819
1820enum efx_farch_filter_table_id {
1821 EFX_FARCH_FILTER_TABLE_RX_IP = 0,
1822 EFX_FARCH_FILTER_TABLE_RX_MAC,
1823 EFX_FARCH_FILTER_TABLE_RX_DEF,
1824 EFX_FARCH_FILTER_TABLE_TX_MAC,
1825 EFX_FARCH_FILTER_TABLE_COUNT,
1826};
1827
1828enum efx_farch_filter_index {
1829 EFX_FARCH_FILTER_INDEX_UC_DEF,
1830 EFX_FARCH_FILTER_INDEX_MC_DEF,
1831 EFX_FARCH_FILTER_SIZE_RX_DEF,
1832};
1833
1834struct efx_farch_filter_spec {
1835 u8 type:4;
1836 u8 priority:4;
1837 u8 flags;
1838 u16 dmaq_id;
1839 u32 data[3];
1840};
1841
1842struct efx_farch_filter_table {
1843 enum efx_farch_filter_table_id id;
1844 u32 offset; /* address of table relative to BAR */
1845 unsigned size; /* number of entries */
1846 unsigned step; /* step between entries */
1847 unsigned used; /* number currently used */
1848 unsigned long *used_bitmap;
1849 struct efx_farch_filter_spec *spec;
1850 unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
1851};
1852
1853struct efx_farch_filter_state {
1854 struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
1855};
1856
1857static void
1858efx_farch_filter_table_clear_entry(struct efx_nic *efx,
1859 struct efx_farch_filter_table *table,
1860 unsigned int filter_idx);
1861
1862/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1863 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
1864static u16 efx_farch_filter_hash(u32 key)
1865{
1866 u16 tmp;
1867
1868 /* First 16 rounds */
1869 tmp = 0x1fff ^ key >> 16;
1870 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1871 tmp = tmp ^ tmp >> 9;
1872 /* Last 16 rounds */
1873 tmp = tmp ^ tmp << 13 ^ key;
1874 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1875 return tmp ^ tmp >> 9;
1876}
1877
1878/* To allow for hash collisions, filter search continues at these
1879 * increments from the first possible entry selected by the hash. */
1880static u16 efx_farch_filter_increment(u32 key)
1881{
1882 return key * 2 - 1;
1883}
1884
1885static enum efx_farch_filter_table_id
1886efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
1887{
1888 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1889 (EFX_FARCH_FILTER_TCP_FULL >> 2));
1890 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1891 (EFX_FARCH_FILTER_TCP_WILD >> 2));
1892 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1893 (EFX_FARCH_FILTER_UDP_FULL >> 2));
1894 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1895 (EFX_FARCH_FILTER_UDP_WILD >> 2));
1896 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1897 (EFX_FARCH_FILTER_MAC_FULL >> 2));
1898 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1899 (EFX_FARCH_FILTER_MAC_WILD >> 2));
1900 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
1901 EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
1902 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
1903}
1904
1905static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
1906{
1907 struct efx_farch_filter_state *state = efx->filter_state;
1908 struct efx_farch_filter_table *table;
1909 efx_oword_t filter_ctl;
1910
1911 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1912
1913 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
1914 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
1915 table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
1916 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1917 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
1918 table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
1919 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1920 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
1921 table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
1922 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1923 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
1924 table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
1925 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1926
1927 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
1928 if (table->size) {
1929 EFX_SET_OWORD_FIELD(
1930 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
1931 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
1932 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1933 EFX_SET_OWORD_FIELD(
1934 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
1935 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
1936 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1937 }
1938
1939 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
1940 if (table->size) {
1941 EFX_SET_OWORD_FIELD(
1942 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
1943 table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
1944 EFX_SET_OWORD_FIELD(
1945 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
1946 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
1947 EFX_FILTER_FLAG_RX_RSS));
1948 EFX_SET_OWORD_FIELD(
1949 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
1950 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
1951 EFX_SET_OWORD_FIELD(
1952 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
1953 !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
1954 EFX_FILTER_FLAG_RX_RSS));
1955
1956 /* There is a single bit to enable RX scatter for all
1957 * unmatched packets. Only set it if scatter is
1958 * enabled in both filter specs.
1959 */
1960 EFX_SET_OWORD_FIELD(
1961 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1962 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
1963 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
1964 EFX_FILTER_FLAG_RX_SCATTER));
1965 } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1966 /* We don't expose 'default' filters because unmatched
1967 * packets always go to the queue number found in the
1968 * RSS table. But we still need to set the RX scatter
1969 * bit here.
1970 */
1971 EFX_SET_OWORD_FIELD(
1972 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1973 efx->rx_scatter);
1974 }
1975
1976 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1977}
1978
1979static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
1980{
1981 struct efx_farch_filter_state *state = efx->filter_state;
1982 struct efx_farch_filter_table *table;
1983 efx_oword_t tx_cfg;
1984
1985 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
1986
1987 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
1988 if (table->size) {
1989 EFX_SET_OWORD_FIELD(
1990 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
1991 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
1992 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1993 EFX_SET_OWORD_FIELD(
1994 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
1995 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
1996 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1997 }
1998
1999 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
2000}
2001
2002static int
2003efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
2004 const struct efx_filter_spec *gen_spec)
2005{
2006 bool is_full = false;
2007
2008 if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) &&
2009 gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT)
2010 return -EINVAL;
2011
2012 spec->priority = gen_spec->priority;
2013 spec->flags = gen_spec->flags;
2014 spec->dmaq_id = gen_spec->dmaq_id;
2015
2016 switch (gen_spec->match_flags) {
2017 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2018 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
2019 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
2020 is_full = true;
2021 /* fall through */
2022 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2023 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
2024 __be32 rhost, host1, host2;
2025 __be16 rport, port1, port2;
2026
2027 EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
2028
2029 if (gen_spec->ether_type != htons(ETH_P_IP))
2030 return -EPROTONOSUPPORT;
2031 if (gen_spec->loc_port == 0 ||
2032 (is_full && gen_spec->rem_port == 0))
2033 return -EADDRNOTAVAIL;
2034 switch (gen_spec->ip_proto) {
2035 case IPPROTO_TCP:
2036 spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
2037 EFX_FARCH_FILTER_TCP_WILD);
2038 break;
2039 case IPPROTO_UDP:
2040 spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
2041 EFX_FARCH_FILTER_UDP_WILD);
2042 break;
2043 default:
2044 return -EPROTONOSUPPORT;
2045 }
2046
2047 /* Filter is constructed in terms of source and destination,
2048 * with the odd wrinkle that the ports are swapped in a UDP
2049 * wildcard filter. We need to convert from local and remote
2050 * (= zero for wildcard) addresses.
2051 */
2052 rhost = is_full ? gen_spec->rem_host[0] : 0;
2053 rport = is_full ? gen_spec->rem_port : 0;
2054 host1 = rhost;
2055 host2 = gen_spec->loc_host[0];
2056 if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
2057 port1 = gen_spec->loc_port;
2058 port2 = rport;
2059 } else {
2060 port1 = rport;
2061 port2 = gen_spec->loc_port;
2062 }
2063 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
2064 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
2065 spec->data[2] = ntohl(host2);
2066
2067 break;
2068 }
2069
2070 case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
2071 is_full = true;
2072 /* fall through */
2073 case EFX_FILTER_MATCH_LOC_MAC:
2074 spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
2075 EFX_FARCH_FILTER_MAC_WILD);
2076 spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
2077 spec->data[1] = (gen_spec->loc_mac[2] << 24 |
2078 gen_spec->loc_mac[3] << 16 |
2079 gen_spec->loc_mac[4] << 8 |
2080 gen_spec->loc_mac[5]);
2081 spec->data[2] = (gen_spec->loc_mac[0] << 8 |
2082 gen_spec->loc_mac[1]);
2083 break;
2084
2085 case EFX_FILTER_MATCH_LOC_MAC_IG:
2086 spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
2087 EFX_FARCH_FILTER_MC_DEF :
2088 EFX_FARCH_FILTER_UC_DEF);
2089 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
2090 break;
2091
2092 default:
2093 return -EPROTONOSUPPORT;
2094 }
2095
2096 return 0;
2097}
2098
2099static void
2100efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
2101 const struct efx_farch_filter_spec *spec)
2102{
2103 bool is_full = false;
2104
2105 /* *gen_spec should be completely initialised, to be consistent
2106 * with efx_filter_init_{rx,tx}() and in case we want to copy
2107 * it back to userland.
2108 */
2109 memset(gen_spec, 0, sizeof(*gen_spec));
2110
2111 gen_spec->priority = spec->priority;
2112 gen_spec->flags = spec->flags;
2113 gen_spec->dmaq_id = spec->dmaq_id;
2114
2115 switch (spec->type) {
2116 case EFX_FARCH_FILTER_TCP_FULL:
2117 case EFX_FARCH_FILTER_UDP_FULL:
2118 is_full = true;
2119 /* fall through */
2120 case EFX_FARCH_FILTER_TCP_WILD:
2121 case EFX_FARCH_FILTER_UDP_WILD: {
2122 __be32 host1, host2;
2123 __be16 port1, port2;
2124
2125 gen_spec->match_flags =
2126 EFX_FILTER_MATCH_ETHER_TYPE |
2127 EFX_FILTER_MATCH_IP_PROTO |
2128 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
2129 if (is_full)
2130 gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
2131 EFX_FILTER_MATCH_REM_PORT);
2132 gen_spec->ether_type = htons(ETH_P_IP);
2133 gen_spec->ip_proto =
2134 (spec->type == EFX_FARCH_FILTER_TCP_FULL ||
2135 spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
2136 IPPROTO_TCP : IPPROTO_UDP;
2137
2138 host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
2139 port1 = htons(spec->data[0]);
2140 host2 = htonl(spec->data[2]);
2141 port2 = htons(spec->data[1] >> 16);
2142 if (spec->flags & EFX_FILTER_FLAG_TX) {
2143 gen_spec->loc_host[0] = host1;
2144 gen_spec->rem_host[0] = host2;
2145 } else {
2146 gen_spec->loc_host[0] = host2;
2147 gen_spec->rem_host[0] = host1;
2148 }
2149 if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
2150 (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
2151 gen_spec->loc_port = port1;
2152 gen_spec->rem_port = port2;
2153 } else {
2154 gen_spec->loc_port = port2;
2155 gen_spec->rem_port = port1;
2156 }
2157
2158 break;
2159 }
2160
2161 case EFX_FARCH_FILTER_MAC_FULL:
2162 is_full = true;
2163 /* fall through */
2164 case EFX_FARCH_FILTER_MAC_WILD:
2165 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
2166 if (is_full)
2167 gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
2168 gen_spec->loc_mac[0] = spec->data[2] >> 8;
2169 gen_spec->loc_mac[1] = spec->data[2];
2170 gen_spec->loc_mac[2] = spec->data[1] >> 24;
2171 gen_spec->loc_mac[3] = spec->data[1] >> 16;
2172 gen_spec->loc_mac[4] = spec->data[1] >> 8;
2173 gen_spec->loc_mac[5] = spec->data[1];
2174 gen_spec->outer_vid = htons(spec->data[0]);
2175 break;
2176
2177 case EFX_FARCH_FILTER_UC_DEF:
2178 case EFX_FARCH_FILTER_MC_DEF:
2179 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
2180 gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
2181 break;
2182
2183 default:
2184 WARN_ON(1);
2185 break;
2186 }
2187}
2188
2189static void
2190efx_farch_filter_init_rx_for_stack(struct efx_nic *efx,
2191 struct efx_farch_filter_spec *spec)
2192{
2193 /* If there's only one channel then disable RSS for non VF
2194 * traffic, thereby allowing VFs to use RSS when the PF can't.
2195 */
2196 spec->priority = EFX_FILTER_PRI_REQUIRED;
2197 spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK |
2198 (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
2199 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
2200 spec->dmaq_id = 0;
2201}
2202
2203/* Build a filter entry and return its n-tuple key. */
2204static u32 efx_farch_filter_build(efx_oword_t *filter,
2205 struct efx_farch_filter_spec *spec)
2206{
2207 u32 data3;
2208
2209 switch (efx_farch_filter_spec_table_id(spec)) {
2210 case EFX_FARCH_FILTER_TABLE_RX_IP: {
2211 bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
2212 spec->type == EFX_FARCH_FILTER_UDP_WILD);
2213 EFX_POPULATE_OWORD_7(
2214 *filter,
2215 FRF_BZ_RSS_EN,
2216 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2217 FRF_BZ_SCATTER_EN,
2218 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2219 FRF_BZ_TCP_UDP, is_udp,
2220 FRF_BZ_RXQ_ID, spec->dmaq_id,
2221 EFX_DWORD_2, spec->data[2],
2222 EFX_DWORD_1, spec->data[1],
2223 EFX_DWORD_0, spec->data[0]);
2224 data3 = is_udp;
2225 break;
2226 }
2227
2228 case EFX_FARCH_FILTER_TABLE_RX_MAC: {
2229 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2230 EFX_POPULATE_OWORD_7(
2231 *filter,
2232 FRF_CZ_RMFT_RSS_EN,
2233 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2234 FRF_CZ_RMFT_SCATTER_EN,
2235 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2236 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
2237 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
2238 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
2239 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
2240 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
2241 data3 = is_wild;
2242 break;
2243 }
2244
2245 case EFX_FARCH_FILTER_TABLE_TX_MAC: {
2246 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2247 EFX_POPULATE_OWORD_5(*filter,
2248 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
2249 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
2250 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
2251 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
2252 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
2253 data3 = is_wild | spec->dmaq_id << 1;
2254 break;
2255 }
2256
2257 default:
2258 BUG();
2259 }
2260
2261 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
2262}
2263
2264static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
2265 const struct efx_farch_filter_spec *right)
2266{
2267 if (left->type != right->type ||
2268 memcmp(left->data, right->data, sizeof(left->data)))
2269 return false;
2270
2271 if (left->flags & EFX_FILTER_FLAG_TX &&
2272 left->dmaq_id != right->dmaq_id)
2273 return false;
2274
2275 return true;
2276}
2277
2278/*
2279 * Construct/deconstruct external filter IDs. At least the RX filter
2280 * IDs must be ordered by matching priority, for RX NFC semantics.
2281 *
2282 * Deconstruction needs to be robust against invalid IDs so that
2283 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
2284 * accept user-provided IDs.
2285 */
2286
2287#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
2288
2289static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
2290 [EFX_FARCH_FILTER_TCP_FULL] = 0,
2291 [EFX_FARCH_FILTER_UDP_FULL] = 0,
2292 [EFX_FARCH_FILTER_TCP_WILD] = 1,
2293 [EFX_FARCH_FILTER_UDP_WILD] = 1,
2294 [EFX_FARCH_FILTER_MAC_FULL] = 2,
2295 [EFX_FARCH_FILTER_MAC_WILD] = 3,
2296 [EFX_FARCH_FILTER_UC_DEF] = 4,
2297 [EFX_FARCH_FILTER_MC_DEF] = 4,
2298};
2299
2300static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
2301 EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
2302 EFX_FARCH_FILTER_TABLE_RX_IP,
2303 EFX_FARCH_FILTER_TABLE_RX_MAC,
2304 EFX_FARCH_FILTER_TABLE_RX_MAC,
2305 EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
2306 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
2307 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
2308};
2309
2310#define EFX_FARCH_FILTER_INDEX_WIDTH 13
2311#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
2312
2313static inline u32
2314efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
2315 unsigned int index)
2316{
2317 unsigned int range;
2318
2319 range = efx_farch_filter_type_match_pri[spec->type];
2320 if (!(spec->flags & EFX_FILTER_FLAG_RX))
2321 range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
2322
2323 return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
2324}
2325
2326static inline enum efx_farch_filter_table_id
2327efx_farch_filter_id_table_id(u32 id)
2328{
2329 unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
2330
2331 if (range < ARRAY_SIZE(efx_farch_filter_range_table))
2332 return efx_farch_filter_range_table[range];
2333 else
2334 return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
2335}
2336
2337static inline unsigned int efx_farch_filter_id_index(u32 id)
2338{
2339 return id & EFX_FARCH_FILTER_INDEX_MASK;
2340}
2341
2342u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
2343{
2344 struct efx_farch_filter_state *state = efx->filter_state;
2345 unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
2346 enum efx_farch_filter_table_id table_id;
2347
2348 do {
2349 table_id = efx_farch_filter_range_table[range];
2350 if (state->table[table_id].size != 0)
2351 return range << EFX_FARCH_FILTER_INDEX_WIDTH |
2352 state->table[table_id].size;
2353 } while (range--);
2354
2355 return 0;
2356}
2357
2358s32 efx_farch_filter_insert(struct efx_nic *efx,
2359 struct efx_filter_spec *gen_spec,
2360 bool replace_equal)
2361{
2362 struct efx_farch_filter_state *state = efx->filter_state;
2363 struct efx_farch_filter_table *table;
2364 struct efx_farch_filter_spec spec;
2365 efx_oword_t filter;
2366 int rep_index, ins_index;
2367 unsigned int depth = 0;
2368 int rc;
2369
2370 rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
2371 if (rc)
2372 return rc;
2373
2374 table = &state->table[efx_farch_filter_spec_table_id(&spec)];
2375 if (table->size == 0)
2376 return -EINVAL;
2377
2378 netif_vdbg(efx, hw, efx->net_dev,
2379 "%s: type %d search_limit=%d", __func__, spec.type,
2380 table->search_limit[spec.type]);
2381
2382 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2383 /* One filter spec per type */
2384 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
2385 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
2386 EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
2387 rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
2388 ins_index = rep_index;
2389
2390 spin_lock_bh(&efx->filter_lock);
2391 } else {
2392 /* Search concurrently for
2393 * (1) a filter to be replaced (rep_index): any filter
2394 * with the same match values, up to the current
2395 * search depth for this type, and
2396 * (2) the insertion point (ins_index): (1) or any
2397 * free slot before it or up to the maximum search
2398 * depth for this priority
2399 * We fail if we cannot find (2).
2400 *
2401 * We can stop once either
2402 * (a) we find (1), in which case we have definitely
2403 * found (2) as well; or
2404 * (b) we have searched exhaustively for (1), and have
2405 * either found (2) or searched exhaustively for it
2406 */
2407 u32 key = efx_farch_filter_build(&filter, &spec);
2408 unsigned int hash = efx_farch_filter_hash(key);
2409 unsigned int incr = efx_farch_filter_increment(key);
2410 unsigned int max_rep_depth = table->search_limit[spec.type];
2411 unsigned int max_ins_depth =
2412 spec.priority <= EFX_FILTER_PRI_HINT ?
2413 EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
2414 EFX_FARCH_FILTER_CTL_SRCH_MAX;
2415 unsigned int i = hash & (table->size - 1);
2416
2417 ins_index = -1;
2418 depth = 1;
2419
2420 spin_lock_bh(&efx->filter_lock);
2421
2422 for (;;) {
2423 if (!test_bit(i, table->used_bitmap)) {
2424 if (ins_index < 0)
2425 ins_index = i;
2426 } else if (efx_farch_filter_equal(&spec,
2427 &table->spec[i])) {
2428 /* Case (a) */
2429 if (ins_index < 0)
2430 ins_index = i;
2431 rep_index = i;
2432 break;
2433 }
2434
2435 if (depth >= max_rep_depth &&
2436 (ins_index >= 0 || depth >= max_ins_depth)) {
2437 /* Case (b) */
2438 if (ins_index < 0) {
2439 rc = -EBUSY;
2440 goto out;
2441 }
2442 rep_index = -1;
2443 break;
2444 }
2445
2446 i = (i + incr) & (table->size - 1);
2447 ++depth;
2448 }
2449 }
2450
2451 /* If we found a filter to be replaced, check whether we
2452 * should do so
2453 */
2454 if (rep_index >= 0) {
2455 struct efx_farch_filter_spec *saved_spec =
2456 &table->spec[rep_index];
2457
2458 if (spec.priority == saved_spec->priority && !replace_equal) {
2459 rc = -EEXIST;
2460 goto out;
2461 }
2462 if (spec.priority < saved_spec->priority &&
2463 !(saved_spec->priority == EFX_FILTER_PRI_REQUIRED &&
2464 saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) {
2465 rc = -EPERM;
2466 goto out;
2467 }
2468 if (spec.flags & EFX_FILTER_FLAG_RX_STACK) {
2469 /* Just make sure it won't be removed */
2470 saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
2471 rc = 0;
2472 goto out;
2473 }
2474 /* Retain the RX_STACK flag */
2475 spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK;
2476 }
2477
2478 /* Insert the filter */
2479 if (ins_index != rep_index) {
2480 __set_bit(ins_index, table->used_bitmap);
2481 ++table->used;
2482 }
2483 table->spec[ins_index] = spec;
2484
2485 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2486 efx_farch_filter_push_rx_config(efx);
2487 } else {
2488 if (table->search_limit[spec.type] < depth) {
2489 table->search_limit[spec.type] = depth;
2490 if (spec.flags & EFX_FILTER_FLAG_TX)
2491 efx_farch_filter_push_tx_limits(efx);
2492 else
2493 efx_farch_filter_push_rx_config(efx);
2494 }
2495
2496 efx_writeo(efx, &filter,
2497 table->offset + table->step * ins_index);
2498
2499 /* If we were able to replace a filter by inserting
2500 * at a lower depth, clear the replaced filter
2501 */
2502 if (ins_index != rep_index && rep_index >= 0)
2503 efx_farch_filter_table_clear_entry(efx, table,
2504 rep_index);
2505 }
2506
2507 netif_vdbg(efx, hw, efx->net_dev,
2508 "%s: filter type %d index %d rxq %u set",
2509 __func__, spec.type, ins_index, spec.dmaq_id);
2510 rc = efx_farch_filter_make_id(&spec, ins_index);
2511
2512out:
2513 spin_unlock_bh(&efx->filter_lock);
2514 return rc;
2515}
2516
2517static void
2518efx_farch_filter_table_clear_entry(struct efx_nic *efx,
2519 struct efx_farch_filter_table *table,
2520 unsigned int filter_idx)
2521{
2522 static efx_oword_t filter;
2523
2524 EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
2525 BUG_ON(table->offset == 0); /* can't clear MAC default filters */
2526
2527 __clear_bit(filter_idx, table->used_bitmap);
2528 --table->used;
2529 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
2530
2531 efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
2532
2533 /* If this filter required a greater search depth than
2534 * any other, the search limit for its type can now be
2535 * decreased. However, it is hard to determine that
2536 * unless the table has become completely empty - in
2537 * which case, all its search limits can be set to 0.
2538 */
2539 if (unlikely(table->used == 0)) {
2540 memset(table->search_limit, 0, sizeof(table->search_limit));
2541 if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
2542 efx_farch_filter_push_tx_limits(efx);
2543 else
2544 efx_farch_filter_push_rx_config(efx);
2545 }
2546}
2547
2548static int efx_farch_filter_remove(struct efx_nic *efx,
2549 struct efx_farch_filter_table *table,
2550 unsigned int filter_idx,
2551 enum efx_filter_priority priority)
2552{
2553 struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
2554
2555 if (!test_bit(filter_idx, table->used_bitmap) ||
2556 spec->priority > priority)
2557 return -ENOENT;
2558
2559 if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
2560 efx_farch_filter_init_rx_for_stack(efx, spec);
2561 efx_farch_filter_push_rx_config(efx);
2562 } else {
2563 efx_farch_filter_table_clear_entry(efx, table, filter_idx);
2564 }
2565
2566 return 0;
2567}
2568
2569int efx_farch_filter_remove_safe(struct efx_nic *efx,
2570 enum efx_filter_priority priority,
2571 u32 filter_id)
2572{
2573 struct efx_farch_filter_state *state = efx->filter_state;
2574 enum efx_farch_filter_table_id table_id;
2575 struct efx_farch_filter_table *table;
2576 unsigned int filter_idx;
2577 struct efx_farch_filter_spec *spec;
2578 int rc;
2579
2580 table_id = efx_farch_filter_id_table_id(filter_id);
2581 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2582 return -ENOENT;
2583 table = &state->table[table_id];
2584
2585 filter_idx = efx_farch_filter_id_index(filter_id);
2586 if (filter_idx >= table->size)
2587 return -ENOENT;
2588 spec = &table->spec[filter_idx];
2589
2590 spin_lock_bh(&efx->filter_lock);
2591 rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
2592 spin_unlock_bh(&efx->filter_lock);
2593
2594 return rc;
2595}
2596
2597int efx_farch_filter_get_safe(struct efx_nic *efx,
2598 enum efx_filter_priority priority,
2599 u32 filter_id, struct efx_filter_spec *spec_buf)
2600{
2601 struct efx_farch_filter_state *state = efx->filter_state;
2602 enum efx_farch_filter_table_id table_id;
2603 struct efx_farch_filter_table *table;
2604 struct efx_farch_filter_spec *spec;
2605 unsigned int filter_idx;
2606 int rc;
2607
2608 table_id = efx_farch_filter_id_table_id(filter_id);
2609 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2610 return -ENOENT;
2611 table = &state->table[table_id];
2612
2613 filter_idx = efx_farch_filter_id_index(filter_id);
2614 if (filter_idx >= table->size)
2615 return -ENOENT;
2616 spec = &table->spec[filter_idx];
2617
2618 spin_lock_bh(&efx->filter_lock);
2619
2620 if (test_bit(filter_idx, table->used_bitmap) &&
2621 spec->priority == priority) {
2622 efx_farch_filter_to_gen_spec(spec_buf, spec);
2623 rc = 0;
2624 } else {
2625 rc = -ENOENT;
2626 }
2627
2628 spin_unlock_bh(&efx->filter_lock);
2629
2630 return rc;
2631}
2632
2633static void
2634efx_farch_filter_table_clear(struct efx_nic *efx,
2635 enum efx_farch_filter_table_id table_id,
2636 enum efx_filter_priority priority)
2637{
2638 struct efx_farch_filter_state *state = efx->filter_state;
2639 struct efx_farch_filter_table *table = &state->table[table_id];
2640 unsigned int filter_idx;
2641
2642 spin_lock_bh(&efx->filter_lock);
2643 for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
2644 efx_farch_filter_remove(efx, table, filter_idx, priority);
2645 spin_unlock_bh(&efx->filter_lock);
2646}
2647
2648void efx_farch_filter_clear_rx(struct efx_nic *efx,
2649 enum efx_filter_priority priority)
2650{
2651 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
2652 priority);
2653 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
2654 priority);
2655 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
2656 priority);
2657}
2658
2659u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
2660 enum efx_filter_priority priority)
2661{
2662 struct efx_farch_filter_state *state = efx->filter_state;
2663 enum efx_farch_filter_table_id table_id;
2664 struct efx_farch_filter_table *table;
2665 unsigned int filter_idx;
2666 u32 count = 0;
2667
2668 spin_lock_bh(&efx->filter_lock);
2669
2670 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2671 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2672 table_id++) {
2673 table = &state->table[table_id];
2674 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2675 if (test_bit(filter_idx, table->used_bitmap) &&
2676 table->spec[filter_idx].priority == priority)
2677 ++count;
2678 }
2679 }
2680
2681 spin_unlock_bh(&efx->filter_lock);
2682
2683 return count;
2684}
2685
2686s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
2687 enum efx_filter_priority priority,
2688 u32 *buf, u32 size)
2689{
2690 struct efx_farch_filter_state *state = efx->filter_state;
2691 enum efx_farch_filter_table_id table_id;
2692 struct efx_farch_filter_table *table;
2693 unsigned int filter_idx;
2694 s32 count = 0;
2695
2696 spin_lock_bh(&efx->filter_lock);
2697
2698 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2699 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2700 table_id++) {
2701 table = &state->table[table_id];
2702 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2703 if (test_bit(filter_idx, table->used_bitmap) &&
2704 table->spec[filter_idx].priority == priority) {
2705 if (count == size) {
2706 count = -EMSGSIZE;
2707 goto out;
2708 }
2709 buf[count++] = efx_farch_filter_make_id(
2710 &table->spec[filter_idx], filter_idx);
2711 }
2712 }
2713 }
2714out:
2715 spin_unlock_bh(&efx->filter_lock);
2716
2717 return count;
2718}
2719
2720/* Restore filter stater after reset */
2721void efx_farch_filter_table_restore(struct efx_nic *efx)
2722{
2723 struct efx_farch_filter_state *state = efx->filter_state;
2724 enum efx_farch_filter_table_id table_id;
2725 struct efx_farch_filter_table *table;
2726 efx_oword_t filter;
2727 unsigned int filter_idx;
2728
2729 spin_lock_bh(&efx->filter_lock);
2730
2731 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2732 table = &state->table[table_id];
2733
2734 /* Check whether this is a regular register table */
2735 if (table->step == 0)
2736 continue;
2737
2738 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2739 if (!test_bit(filter_idx, table->used_bitmap))
2740 continue;
2741 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2742 efx_writeo(efx, &filter,
2743 table->offset + table->step * filter_idx);
2744 }
2745 }
2746
2747 efx_farch_filter_push_rx_config(efx);
2748 efx_farch_filter_push_tx_limits(efx);
2749
2750 spin_unlock_bh(&efx->filter_lock);
2751}
2752
2753void efx_farch_filter_table_remove(struct efx_nic *efx)
2754{
2755 struct efx_farch_filter_state *state = efx->filter_state;
2756 enum efx_farch_filter_table_id table_id;
2757
2758 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2759 kfree(state->table[table_id].used_bitmap);
2760 vfree(state->table[table_id].spec);
2761 }
2762 kfree(state);
2763}
2764
2765int efx_farch_filter_table_probe(struct efx_nic *efx)
2766{
2767 struct efx_farch_filter_state *state;
2768 struct efx_farch_filter_table *table;
2769 unsigned table_id;
2770
2771 state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
2772 if (!state)
2773 return -ENOMEM;
2774 efx->filter_state = state;
2775
2776 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
2777 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2778 table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
2779 table->offset = FR_BZ_RX_FILTER_TBL0;
2780 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
2781 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
2782 }
2783
2784 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
2785 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
2786 table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
2787 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
2788 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
2789 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
2790
2791 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2792 table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
2793 table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
2794
2795 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
2796 table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
2797 table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
2798 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
2799 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
2800 }
2801
2802 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2803 table = &state->table[table_id];
2804 if (table->size == 0)
2805 continue;
2806 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
2807 sizeof(unsigned long),
2808 GFP_KERNEL);
2809 if (!table->used_bitmap)
2810 goto fail;
2811 table->spec = vzalloc(table->size * sizeof(*table->spec));
2812 if (!table->spec)
2813 goto fail;
2814 }
2815
2816 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2817 if (table->size) {
2818 /* RX default filters must always exist */
2819 struct efx_farch_filter_spec *spec;
2820 unsigned i;
2821
2822 for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
2823 spec = &table->spec[i];
2824 spec->type = EFX_FARCH_FILTER_UC_DEF + i;
2825 efx_farch_filter_init_rx_for_stack(efx, spec);
2826 __set_bit(i, table->used_bitmap);
2827 }
2828 }
2829
2830 efx_farch_filter_push_rx_config(efx);
2831
2832 return 0;
2833
2834fail:
2835 efx_farch_filter_table_remove(efx);
2836 return -ENOMEM;
2837}
2838
2839/* Update scatter enable flags for filters pointing to our own RX queues */
2840void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
2841{
2842 struct efx_farch_filter_state *state = efx->filter_state;
2843 enum efx_farch_filter_table_id table_id;
2844 struct efx_farch_filter_table *table;
2845 efx_oword_t filter;
2846 unsigned int filter_idx;
2847
2848 spin_lock_bh(&efx->filter_lock);
2849
2850 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2851 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2852 table_id++) {
2853 table = &state->table[table_id];
2854
2855 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2856 if (!test_bit(filter_idx, table->used_bitmap) ||
2857 table->spec[filter_idx].dmaq_id >=
2858 efx->n_rx_channels)
2859 continue;
2860
2861 if (efx->rx_scatter)
2862 table->spec[filter_idx].flags |=
2863 EFX_FILTER_FLAG_RX_SCATTER;
2864 else
2865 table->spec[filter_idx].flags &=
2866 ~EFX_FILTER_FLAG_RX_SCATTER;
2867
2868 if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
2869 /* Pushed by efx_farch_filter_push_rx_config() */
2870 continue;
2871
2872 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2873 efx_writeo(efx, &filter,
2874 table->offset + table->step * filter_idx);
2875 }
2876 }
2877
2878 efx_farch_filter_push_rx_config(efx);
2879
2880 spin_unlock_bh(&efx->filter_lock);
2881}
2882
2883#ifdef CONFIG_RFS_ACCEL
2884
2885s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
2886 struct efx_filter_spec *gen_spec)
2887{
2888 return efx_farch_filter_insert(efx, gen_spec, true);
2889}
2890
2891bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2892 unsigned int index)
2893{
2894 struct efx_farch_filter_state *state = efx->filter_state;
2895 struct efx_farch_filter_table *table =
2896 &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2897
2898 if (test_bit(index, table->used_bitmap) &&
2899 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
2900 rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
2901 flow_id, index)) {
2902 efx_farch_filter_table_clear_entry(efx, table, index);
2903 return true;
2904 }
2905
2906 return false;
2907}
2908
2909#endif /* CONFIG_RFS_ACCEL */
2910
2911void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
2912{
2913 struct net_device *net_dev = efx->net_dev;
2914 struct netdev_hw_addr *ha;
2915 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2916 u32 crc;
2917 int bit;
2918
2919 netif_addr_lock_bh(net_dev);
2920
2921 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
2922
2923 /* Build multicast hash table */
2924 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2925 memset(mc_hash, 0xff, sizeof(*mc_hash));
2926 } else {
2927 memset(mc_hash, 0x00, sizeof(*mc_hash));
2928 netdev_for_each_mc_addr(ha, net_dev) {
2929 crc = ether_crc_le(ETH_ALEN, ha->addr);
2930 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
2931 __set_bit_le(bit, mc_hash);
2932 }
2933
2934 /* Broadcast packets go through the multicast hash filter.
2935 * ether_crc_le() of the broadcast address is 0xbe2612ff
2936 * so we always add bit 0xff to the mask.
2937 */
2938 __set_bit_le(0xff, mc_hash);
2939 }
2940
2941 netif_addr_unlock_bh(net_dev);
2942}
diff --git a/drivers/net/ethernet/sfc/regs.h b/drivers/net/ethernet/sfc/farch_regs.h
index ade4c4dc56ca..7019a712e799 100644
--- a/drivers/net/ethernet/sfc/regs.h
+++ b/drivers/net/ethernet/sfc/farch_regs.h
@@ -1,15 +1,15 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2012 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference. 8 * by the Free Software Foundation, incorporated herein by reference.
9 */ 9 */
10 10
11#ifndef EFX_REGS_H 11#ifndef EFX_FARCH_REGS_H
12#define EFX_REGS_H 12#define EFX_FARCH_REGS_H
13 13
14/* 14/*
15 * Falcon hardware architecture definitions have a name prefix following 15 * Falcon hardware architecture definitions have a name prefix following
@@ -2925,264 +2925,8 @@
2925#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0 2925#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
2926#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32 2926#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
2927 2927
2928/************************************************************************** 2928/* RX packet prefix */
2929 * 2929#define FS_BZ_RX_PREFIX_HASH_OFST 12
2930 * Falcon MAC stats 2930#define FS_BZ_RX_PREFIX_SIZE 16
2931 *
2932 **************************************************************************
2933 *
2934 */
2935
2936#define GRxGoodOct_offset 0x0
2937#define GRxGoodOct_WIDTH 48
2938#define GRxBadOct_offset 0x8
2939#define GRxBadOct_WIDTH 48
2940#define GRxMissPkt_offset 0x10
2941#define GRxMissPkt_WIDTH 32
2942#define GRxFalseCRS_offset 0x14
2943#define GRxFalseCRS_WIDTH 32
2944#define GRxPausePkt_offset 0x18
2945#define GRxPausePkt_WIDTH 32
2946#define GRxBadPkt_offset 0x1C
2947#define GRxBadPkt_WIDTH 32
2948#define GRxUcastPkt_offset 0x20
2949#define GRxUcastPkt_WIDTH 32
2950#define GRxMcastPkt_offset 0x24
2951#define GRxMcastPkt_WIDTH 32
2952#define GRxBcastPkt_offset 0x28
2953#define GRxBcastPkt_WIDTH 32
2954#define GRxGoodLt64Pkt_offset 0x2C
2955#define GRxGoodLt64Pkt_WIDTH 32
2956#define GRxBadLt64Pkt_offset 0x30
2957#define GRxBadLt64Pkt_WIDTH 32
2958#define GRx64Pkt_offset 0x34
2959#define GRx64Pkt_WIDTH 32
2960#define GRx65to127Pkt_offset 0x38
2961#define GRx65to127Pkt_WIDTH 32
2962#define GRx128to255Pkt_offset 0x3C
2963#define GRx128to255Pkt_WIDTH 32
2964#define GRx256to511Pkt_offset 0x40
2965#define GRx256to511Pkt_WIDTH 32
2966#define GRx512to1023Pkt_offset 0x44
2967#define GRx512to1023Pkt_WIDTH 32
2968#define GRx1024to15xxPkt_offset 0x48
2969#define GRx1024to15xxPkt_WIDTH 32
2970#define GRx15xxtoJumboPkt_offset 0x4C
2971#define GRx15xxtoJumboPkt_WIDTH 32
2972#define GRxGtJumboPkt_offset 0x50
2973#define GRxGtJumboPkt_WIDTH 32
2974#define GRxFcsErr64to15xxPkt_offset 0x54
2975#define GRxFcsErr64to15xxPkt_WIDTH 32
2976#define GRxFcsErr15xxtoJumboPkt_offset 0x58
2977#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
2978#define GRxFcsErrGtJumboPkt_offset 0x5C
2979#define GRxFcsErrGtJumboPkt_WIDTH 32
2980#define GTxGoodBadOct_offset 0x80
2981#define GTxGoodBadOct_WIDTH 48
2982#define GTxGoodOct_offset 0x88
2983#define GTxGoodOct_WIDTH 48
2984#define GTxSglColPkt_offset 0x90
2985#define GTxSglColPkt_WIDTH 32
2986#define GTxMultColPkt_offset 0x94
2987#define GTxMultColPkt_WIDTH 32
2988#define GTxExColPkt_offset 0x98
2989#define GTxExColPkt_WIDTH 32
2990#define GTxDefPkt_offset 0x9C
2991#define GTxDefPkt_WIDTH 32
2992#define GTxLateCol_offset 0xA0
2993#define GTxLateCol_WIDTH 32
2994#define GTxExDefPkt_offset 0xA4
2995#define GTxExDefPkt_WIDTH 32
2996#define GTxPausePkt_offset 0xA8
2997#define GTxPausePkt_WIDTH 32
2998#define GTxBadPkt_offset 0xAC
2999#define GTxBadPkt_WIDTH 32
3000#define GTxUcastPkt_offset 0xB0
3001#define GTxUcastPkt_WIDTH 32
3002#define GTxMcastPkt_offset 0xB4
3003#define GTxMcastPkt_WIDTH 32
3004#define GTxBcastPkt_offset 0xB8
3005#define GTxBcastPkt_WIDTH 32
3006#define GTxLt64Pkt_offset 0xBC
3007#define GTxLt64Pkt_WIDTH 32
3008#define GTx64Pkt_offset 0xC0
3009#define GTx64Pkt_WIDTH 32
3010#define GTx65to127Pkt_offset 0xC4
3011#define GTx65to127Pkt_WIDTH 32
3012#define GTx128to255Pkt_offset 0xC8
3013#define GTx128to255Pkt_WIDTH 32
3014#define GTx256to511Pkt_offset 0xCC
3015#define GTx256to511Pkt_WIDTH 32
3016#define GTx512to1023Pkt_offset 0xD0
3017#define GTx512to1023Pkt_WIDTH 32
3018#define GTx1024to15xxPkt_offset 0xD4
3019#define GTx1024to15xxPkt_WIDTH 32
3020#define GTx15xxtoJumboPkt_offset 0xD8
3021#define GTx15xxtoJumboPkt_WIDTH 32
3022#define GTxGtJumboPkt_offset 0xDC
3023#define GTxGtJumboPkt_WIDTH 32
3024#define GTxNonTcpUdpPkt_offset 0xE0
3025#define GTxNonTcpUdpPkt_WIDTH 16
3026#define GTxMacSrcErrPkt_offset 0xE4
3027#define GTxMacSrcErrPkt_WIDTH 16
3028#define GTxIpSrcErrPkt_offset 0xE8
3029#define GTxIpSrcErrPkt_WIDTH 16
3030#define GDmaDone_offset 0xEC
3031#define GDmaDone_WIDTH 32
3032
3033#define XgRxOctets_offset 0x0
3034#define XgRxOctets_WIDTH 48
3035#define XgRxOctetsOK_offset 0x8
3036#define XgRxOctetsOK_WIDTH 48
3037#define XgRxPkts_offset 0x10
3038#define XgRxPkts_WIDTH 32
3039#define XgRxPktsOK_offset 0x14
3040#define XgRxPktsOK_WIDTH 32
3041#define XgRxBroadcastPkts_offset 0x18
3042#define XgRxBroadcastPkts_WIDTH 32
3043#define XgRxMulticastPkts_offset 0x1C
3044#define XgRxMulticastPkts_WIDTH 32
3045#define XgRxUnicastPkts_offset 0x20
3046#define XgRxUnicastPkts_WIDTH 32
3047#define XgRxUndersizePkts_offset 0x24
3048#define XgRxUndersizePkts_WIDTH 32
3049#define XgRxOversizePkts_offset 0x28
3050#define XgRxOversizePkts_WIDTH 32
3051#define XgRxJabberPkts_offset 0x2C
3052#define XgRxJabberPkts_WIDTH 32
3053#define XgRxUndersizeFCSerrorPkts_offset 0x30
3054#define XgRxUndersizeFCSerrorPkts_WIDTH 32
3055#define XgRxDropEvents_offset 0x34
3056#define XgRxDropEvents_WIDTH 32
3057#define XgRxFCSerrorPkts_offset 0x38
3058#define XgRxFCSerrorPkts_WIDTH 32
3059#define XgRxAlignError_offset 0x3C
3060#define XgRxAlignError_WIDTH 32
3061#define XgRxSymbolError_offset 0x40
3062#define XgRxSymbolError_WIDTH 32
3063#define XgRxInternalMACError_offset 0x44
3064#define XgRxInternalMACError_WIDTH 32
3065#define XgRxControlPkts_offset 0x48
3066#define XgRxControlPkts_WIDTH 32
3067#define XgRxPausePkts_offset 0x4C
3068#define XgRxPausePkts_WIDTH 32
3069#define XgRxPkts64Octets_offset 0x50
3070#define XgRxPkts64Octets_WIDTH 32
3071#define XgRxPkts65to127Octets_offset 0x54
3072#define XgRxPkts65to127Octets_WIDTH 32
3073#define XgRxPkts128to255Octets_offset 0x58
3074#define XgRxPkts128to255Octets_WIDTH 32
3075#define XgRxPkts256to511Octets_offset 0x5C
3076#define XgRxPkts256to511Octets_WIDTH 32
3077#define XgRxPkts512to1023Octets_offset 0x60
3078#define XgRxPkts512to1023Octets_WIDTH 32
3079#define XgRxPkts1024to15xxOctets_offset 0x64
3080#define XgRxPkts1024to15xxOctets_WIDTH 32
3081#define XgRxPkts15xxtoMaxOctets_offset 0x68
3082#define XgRxPkts15xxtoMaxOctets_WIDTH 32
3083#define XgRxLengthError_offset 0x6C
3084#define XgRxLengthError_WIDTH 32
3085#define XgTxPkts_offset 0x80
3086#define XgTxPkts_WIDTH 32
3087#define XgTxOctets_offset 0x88
3088#define XgTxOctets_WIDTH 48
3089#define XgTxMulticastPkts_offset 0x90
3090#define XgTxMulticastPkts_WIDTH 32
3091#define XgTxBroadcastPkts_offset 0x94
3092#define XgTxBroadcastPkts_WIDTH 32
3093#define XgTxUnicastPkts_offset 0x98
3094#define XgTxUnicastPkts_WIDTH 32
3095#define XgTxControlPkts_offset 0x9C
3096#define XgTxControlPkts_WIDTH 32
3097#define XgTxPausePkts_offset 0xA0
3098#define XgTxPausePkts_WIDTH 32
3099#define XgTxPkts64Octets_offset 0xA4
3100#define XgTxPkts64Octets_WIDTH 32
3101#define XgTxPkts65to127Octets_offset 0xA8
3102#define XgTxPkts65to127Octets_WIDTH 32
3103#define XgTxPkts128to255Octets_offset 0xAC
3104#define XgTxPkts128to255Octets_WIDTH 32
3105#define XgTxPkts256to511Octets_offset 0xB0
3106#define XgTxPkts256to511Octets_WIDTH 32
3107#define XgTxPkts512to1023Octets_offset 0xB4
3108#define XgTxPkts512to1023Octets_WIDTH 32
3109#define XgTxPkts1024to15xxOctets_offset 0xB8
3110#define XgTxPkts1024to15xxOctets_WIDTH 32
3111#define XgTxPkts1519toMaxOctets_offset 0xBC
3112#define XgTxPkts1519toMaxOctets_WIDTH 32
3113#define XgTxUndersizePkts_offset 0xC0
3114#define XgTxUndersizePkts_WIDTH 32
3115#define XgTxOversizePkts_offset 0xC4
3116#define XgTxOversizePkts_WIDTH 32
3117#define XgTxNonTcpUdpPkt_offset 0xC8
3118#define XgTxNonTcpUdpPkt_WIDTH 16
3119#define XgTxMacSrcErrPkt_offset 0xCC
3120#define XgTxMacSrcErrPkt_WIDTH 16
3121#define XgTxIpSrcErrPkt_offset 0xD0
3122#define XgTxIpSrcErrPkt_WIDTH 16
3123#define XgDmaDone_offset 0xD4
3124#define XgDmaDone_WIDTH 32
3125
3126#define FALCON_STATS_NOT_DONE 0x00000000
3127#define FALCON_STATS_DONE 0xffffffff
3128
3129/**************************************************************************
3130 *
3131 * Falcon non-volatile configuration
3132 *
3133 **************************************************************************
3134 */
3135 2931
3136/* Board configuration v2 (v1 is obsolete; later versions are compatible) */ 2932#endif /* EFX_FARCH_REGS_H */
3137struct falcon_nvconfig_board_v2 {
3138 __le16 nports;
3139 u8 port0_phy_addr;
3140 u8 port0_phy_type;
3141 u8 port1_phy_addr;
3142 u8 port1_phy_type;
3143 __le16 asic_sub_revision;
3144 __le16 board_revision;
3145} __packed;
3146
3147/* Board configuration v3 extra information */
3148struct falcon_nvconfig_board_v3 {
3149 __le32 spi_device_type[2];
3150} __packed;
3151
3152/* Bit numbers for spi_device_type */
3153#define SPI_DEV_TYPE_SIZE_LBN 0
3154#define SPI_DEV_TYPE_SIZE_WIDTH 5
3155#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
3156#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
3157#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
3158#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
3159#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
3160#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
3161#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
3162#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
3163#define SPI_DEV_TYPE_FIELD(type, field) \
3164 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
3165
3166#define FALCON_NVCONFIG_OFFSET 0x300
3167
3168#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
3169struct falcon_nvconfig {
3170 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
3171 u8 mac_address[2][8]; /* 0x310 */
3172 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
3173 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
3174 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
3175 efx_oword_t hw_init_reg; /* 0x350 */
3176 efx_oword_t nic_stat_reg; /* 0x360 */
3177 efx_oword_t glb_ctl_reg; /* 0x370 */
3178 efx_oword_t srm_cfg_reg; /* 0x380 */
3179 efx_oword_t spare_reg; /* 0x390 */
3180 __le16 board_magic_num; /* 0x3A0 */
3181 __le16 board_struct_ver;
3182 __le16 board_checksum;
3183 struct falcon_nvconfig_board_v2 board_v2;
3184 efx_oword_t ee_base_page_reg; /* 0x3B0 */
3185 struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
3186} __packed;
3187
3188#endif /* EFX_REGS_H */
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
deleted file mode 100644
index 30d744235d27..000000000000
--- a/drivers/net/ethernet/sfc/filter.c
+++ /dev/null
@@ -1,1274 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2010 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/in.h>
11#include <net/ip.h>
12#include "efx.h"
13#include "filter.h"
14#include "io.h"
15#include "nic.h"
16#include "regs.h"
17
18/* "Fudge factors" - difference between programmed value and actual depth.
19 * Due to pipelined implementation we need to program H/W with a value that
20 * is larger than the hop limit we want.
21 */
22#define FILTER_CTL_SRCH_FUDGE_WILD 3
23#define FILTER_CTL_SRCH_FUDGE_FULL 1
24
25/* Hard maximum hop limit. Hardware will time-out beyond 200-something.
26 * We also need to avoid infinite loops in efx_filter_search() when the
27 * table is full.
28 */
29#define FILTER_CTL_SRCH_MAX 200
30
31/* Don't try very hard to find space for performance hints, as this is
32 * counter-productive. */
33#define FILTER_CTL_SRCH_HINT_MAX 5
34
35enum efx_filter_table_id {
36 EFX_FILTER_TABLE_RX_IP = 0,
37 EFX_FILTER_TABLE_RX_MAC,
38 EFX_FILTER_TABLE_RX_DEF,
39 EFX_FILTER_TABLE_TX_MAC,
40 EFX_FILTER_TABLE_COUNT,
41};
42
43enum efx_filter_index {
44 EFX_FILTER_INDEX_UC_DEF,
45 EFX_FILTER_INDEX_MC_DEF,
46 EFX_FILTER_SIZE_RX_DEF,
47};
48
49struct efx_filter_table {
50 enum efx_filter_table_id id;
51 u32 offset; /* address of table relative to BAR */
52 unsigned size; /* number of entries */
53 unsigned step; /* step between entries */
54 unsigned used; /* number currently used */
55 unsigned long *used_bitmap;
56 struct efx_filter_spec *spec;
57 unsigned search_depth[EFX_FILTER_TYPE_COUNT];
58};
59
60struct efx_filter_state {
61 spinlock_t lock;
62 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
63#ifdef CONFIG_RFS_ACCEL
64 u32 *rps_flow_id;
65 unsigned rps_expire_index;
66#endif
67};
68
69static void efx_filter_table_clear_entry(struct efx_nic *efx,
70 struct efx_filter_table *table,
71 unsigned int filter_idx);
72
73/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
74 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
75static u16 efx_filter_hash(u32 key)
76{
77 u16 tmp;
78
79 /* First 16 rounds */
80 tmp = 0x1fff ^ key >> 16;
81 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
82 tmp = tmp ^ tmp >> 9;
83 /* Last 16 rounds */
84 tmp = tmp ^ tmp << 13 ^ key;
85 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
86 return tmp ^ tmp >> 9;
87}
88
89/* To allow for hash collisions, filter search continues at these
90 * increments from the first possible entry selected by the hash. */
91static u16 efx_filter_increment(u32 key)
92{
93 return key * 2 - 1;
94}
95
96static enum efx_filter_table_id
97efx_filter_spec_table_id(const struct efx_filter_spec *spec)
98{
99 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
100 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
101 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
102 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
103 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
104 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
105 BUILD_BUG_ON(EFX_FILTER_TABLE_TX_MAC != EFX_FILTER_TABLE_RX_MAC + 2);
106 EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
107 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
108}
109
110static struct efx_filter_table *
111efx_filter_spec_table(struct efx_filter_state *state,
112 const struct efx_filter_spec *spec)
113{
114 if (spec->type == EFX_FILTER_UNSPEC)
115 return NULL;
116 else
117 return &state->table[efx_filter_spec_table_id(spec)];
118}
119
120static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
121{
122 memset(table->search_depth, 0, sizeof(table->search_depth));
123}
124
125static void efx_filter_push_rx_config(struct efx_nic *efx)
126{
127 struct efx_filter_state *state = efx->filter_state;
128 struct efx_filter_table *table;
129 efx_oword_t filter_ctl;
130
131 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
132
133 table = &state->table[EFX_FILTER_TABLE_RX_IP];
134 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
135 table->search_depth[EFX_FILTER_TCP_FULL] +
136 FILTER_CTL_SRCH_FUDGE_FULL);
137 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
138 table->search_depth[EFX_FILTER_TCP_WILD] +
139 FILTER_CTL_SRCH_FUDGE_WILD);
140 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
141 table->search_depth[EFX_FILTER_UDP_FULL] +
142 FILTER_CTL_SRCH_FUDGE_FULL);
143 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
144 table->search_depth[EFX_FILTER_UDP_WILD] +
145 FILTER_CTL_SRCH_FUDGE_WILD);
146
147 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
148 if (table->size) {
149 EFX_SET_OWORD_FIELD(
150 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
151 table->search_depth[EFX_FILTER_MAC_FULL] +
152 FILTER_CTL_SRCH_FUDGE_FULL);
153 EFX_SET_OWORD_FIELD(
154 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
155 table->search_depth[EFX_FILTER_MAC_WILD] +
156 FILTER_CTL_SRCH_FUDGE_WILD);
157 }
158
159 table = &state->table[EFX_FILTER_TABLE_RX_DEF];
160 if (table->size) {
161 EFX_SET_OWORD_FIELD(
162 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
163 table->spec[EFX_FILTER_INDEX_UC_DEF].dmaq_id);
164 EFX_SET_OWORD_FIELD(
165 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
166 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
167 EFX_FILTER_FLAG_RX_RSS));
168 EFX_SET_OWORD_FIELD(
169 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
170 table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
171 EFX_SET_OWORD_FIELD(
172 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
173 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
174 EFX_FILTER_FLAG_RX_RSS));
175
176 /* There is a single bit to enable RX scatter for all
177 * unmatched packets. Only set it if scatter is
178 * enabled in both filter specs.
179 */
180 EFX_SET_OWORD_FIELD(
181 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
182 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
183 table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
184 EFX_FILTER_FLAG_RX_SCATTER));
185 } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
186 /* We don't expose 'default' filters because unmatched
187 * packets always go to the queue number found in the
188 * RSS table. But we still need to set the RX scatter
189 * bit here.
190 */
191 EFX_SET_OWORD_FIELD(
192 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
193 efx->rx_scatter);
194 }
195
196 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
197}
198
199static void efx_filter_push_tx_limits(struct efx_nic *efx)
200{
201 struct efx_filter_state *state = efx->filter_state;
202 struct efx_filter_table *table;
203 efx_oword_t tx_cfg;
204
205 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
206
207 table = &state->table[EFX_FILTER_TABLE_TX_MAC];
208 if (table->size) {
209 EFX_SET_OWORD_FIELD(
210 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
211 table->search_depth[EFX_FILTER_MAC_FULL] +
212 FILTER_CTL_SRCH_FUDGE_FULL);
213 EFX_SET_OWORD_FIELD(
214 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
215 table->search_depth[EFX_FILTER_MAC_WILD] +
216 FILTER_CTL_SRCH_FUDGE_WILD);
217 }
218
219 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
220}
221
222static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
223 __be32 host1, __be16 port1,
224 __be32 host2, __be16 port2)
225{
226 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
227 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
228 spec->data[2] = ntohl(host2);
229}
230
231static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
232 __be32 *host1, __be16 *port1,
233 __be32 *host2, __be16 *port2)
234{
235 *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
236 *port1 = htons(spec->data[0]);
237 *host2 = htonl(spec->data[2]);
238 *port2 = htons(spec->data[1] >> 16);
239}
240
241/**
242 * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
243 * @spec: Specification to initialise
244 * @proto: Transport layer protocol number
245 * @host: Local host address (network byte order)
246 * @port: Local port (network byte order)
247 */
248int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
249 __be32 host, __be16 port)
250{
251 __be32 host1;
252 __be16 port1;
253
254 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
255
256 /* This cannot currently be combined with other filtering */
257 if (spec->type != EFX_FILTER_UNSPEC)
258 return -EPROTONOSUPPORT;
259
260 if (port == 0)
261 return -EINVAL;
262
263 switch (proto) {
264 case IPPROTO_TCP:
265 spec->type = EFX_FILTER_TCP_WILD;
266 break;
267 case IPPROTO_UDP:
268 spec->type = EFX_FILTER_UDP_WILD;
269 break;
270 default:
271 return -EPROTONOSUPPORT;
272 }
273
274 /* Filter is constructed in terms of source and destination,
275 * with the odd wrinkle that the ports are swapped in a UDP
276 * wildcard filter. We need to convert from local and remote
277 * (= zero for wildcard) addresses.
278 */
279 host1 = 0;
280 if (proto != IPPROTO_UDP) {
281 port1 = 0;
282 } else {
283 port1 = port;
284 port = 0;
285 }
286
287 __efx_filter_set_ipv4(spec, host1, port1, host, port);
288 return 0;
289}
290
291int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
292 u8 *proto, __be32 *host, __be16 *port)
293{
294 __be32 host1;
295 __be16 port1;
296
297 switch (spec->type) {
298 case EFX_FILTER_TCP_WILD:
299 *proto = IPPROTO_TCP;
300 __efx_filter_get_ipv4(spec, &host1, &port1, host, port);
301 return 0;
302 case EFX_FILTER_UDP_WILD:
303 *proto = IPPROTO_UDP;
304 __efx_filter_get_ipv4(spec, &host1, port, host, &port1);
305 return 0;
306 default:
307 return -EINVAL;
308 }
309}
310
311/**
312 * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
313 * @spec: Specification to initialise
314 * @proto: Transport layer protocol number
315 * @host: Local host address (network byte order)
316 * @port: Local port (network byte order)
317 * @rhost: Remote host address (network byte order)
318 * @rport: Remote port (network byte order)
319 */
320int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
321 __be32 host, __be16 port,
322 __be32 rhost, __be16 rport)
323{
324 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
325
326 /* This cannot currently be combined with other filtering */
327 if (spec->type != EFX_FILTER_UNSPEC)
328 return -EPROTONOSUPPORT;
329
330 if (port == 0 || rport == 0)
331 return -EINVAL;
332
333 switch (proto) {
334 case IPPROTO_TCP:
335 spec->type = EFX_FILTER_TCP_FULL;
336 break;
337 case IPPROTO_UDP:
338 spec->type = EFX_FILTER_UDP_FULL;
339 break;
340 default:
341 return -EPROTONOSUPPORT;
342 }
343
344 __efx_filter_set_ipv4(spec, rhost, rport, host, port);
345 return 0;
346}
347
348int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
349 u8 *proto, __be32 *host, __be16 *port,
350 __be32 *rhost, __be16 *rport)
351{
352 switch (spec->type) {
353 case EFX_FILTER_TCP_FULL:
354 *proto = IPPROTO_TCP;
355 break;
356 case EFX_FILTER_UDP_FULL:
357 *proto = IPPROTO_UDP;
358 break;
359 default:
360 return -EINVAL;
361 }
362
363 __efx_filter_get_ipv4(spec, rhost, rport, host, port);
364 return 0;
365}
366
367/**
368 * efx_filter_set_eth_local - specify local Ethernet address and optional VID
369 * @spec: Specification to initialise
370 * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
371 * @addr: Local Ethernet MAC address
372 */
373int efx_filter_set_eth_local(struct efx_filter_spec *spec,
374 u16 vid, const u8 *addr)
375{
376 EFX_BUG_ON_PARANOID(!(spec->flags &
377 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
378
379 /* This cannot currently be combined with other filtering */
380 if (spec->type != EFX_FILTER_UNSPEC)
381 return -EPROTONOSUPPORT;
382
383 if (vid == EFX_FILTER_VID_UNSPEC) {
384 spec->type = EFX_FILTER_MAC_WILD;
385 spec->data[0] = 0;
386 } else {
387 spec->type = EFX_FILTER_MAC_FULL;
388 spec->data[0] = vid;
389 }
390
391 spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
392 spec->data[2] = addr[0] << 8 | addr[1];
393 return 0;
394}
395
396/**
397 * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
398 * @spec: Specification to initialise
399 */
400int efx_filter_set_uc_def(struct efx_filter_spec *spec)
401{
402 EFX_BUG_ON_PARANOID(!(spec->flags &
403 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
404
405 if (spec->type != EFX_FILTER_UNSPEC)
406 return -EINVAL;
407
408 spec->type = EFX_FILTER_UC_DEF;
409 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
410 return 0;
411}
412
413/**
414 * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
415 * @spec: Specification to initialise
416 */
417int efx_filter_set_mc_def(struct efx_filter_spec *spec)
418{
419 EFX_BUG_ON_PARANOID(!(spec->flags &
420 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
421
422 if (spec->type != EFX_FILTER_UNSPEC)
423 return -EINVAL;
424
425 spec->type = EFX_FILTER_MC_DEF;
426 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
427 return 0;
428}
429
430static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
431{
432 struct efx_filter_state *state = efx->filter_state;
433 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
434 struct efx_filter_spec *spec = &table->spec[filter_idx];
435 enum efx_filter_flags flags = 0;
436
437 /* If there's only one channel then disable RSS for non VF
438 * traffic, thereby allowing VFs to use RSS when the PF can't.
439 */
440 if (efx->n_rx_channels > 1)
441 flags |= EFX_FILTER_FLAG_RX_RSS;
442
443 if (efx->rx_scatter)
444 flags |= EFX_FILTER_FLAG_RX_SCATTER;
445
446 efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0);
447 spec->type = EFX_FILTER_UC_DEF + filter_idx;
448 table->used_bitmap[0] |= 1 << filter_idx;
449}
450
451int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
452 u16 *vid, u8 *addr)
453{
454 switch (spec->type) {
455 case EFX_FILTER_MAC_WILD:
456 *vid = EFX_FILTER_VID_UNSPEC;
457 break;
458 case EFX_FILTER_MAC_FULL:
459 *vid = spec->data[0];
460 break;
461 default:
462 return -EINVAL;
463 }
464
465 addr[0] = spec->data[2] >> 8;
466 addr[1] = spec->data[2];
467 addr[2] = spec->data[1] >> 24;
468 addr[3] = spec->data[1] >> 16;
469 addr[4] = spec->data[1] >> 8;
470 addr[5] = spec->data[1];
471 return 0;
472}
473
474/* Build a filter entry and return its n-tuple key. */
475static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
476{
477 u32 data3;
478
479 switch (efx_filter_spec_table_id(spec)) {
480 case EFX_FILTER_TABLE_RX_IP: {
481 bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
482 spec->type == EFX_FILTER_UDP_WILD);
483 EFX_POPULATE_OWORD_7(
484 *filter,
485 FRF_BZ_RSS_EN,
486 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
487 FRF_BZ_SCATTER_EN,
488 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
489 FRF_BZ_TCP_UDP, is_udp,
490 FRF_BZ_RXQ_ID, spec->dmaq_id,
491 EFX_DWORD_2, spec->data[2],
492 EFX_DWORD_1, spec->data[1],
493 EFX_DWORD_0, spec->data[0]);
494 data3 = is_udp;
495 break;
496 }
497
498 case EFX_FILTER_TABLE_RX_MAC: {
499 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
500 EFX_POPULATE_OWORD_7(
501 *filter,
502 FRF_CZ_RMFT_RSS_EN,
503 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
504 FRF_CZ_RMFT_SCATTER_EN,
505 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
506 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
507 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
508 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
509 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
510 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
511 data3 = is_wild;
512 break;
513 }
514
515 case EFX_FILTER_TABLE_TX_MAC: {
516 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
517 EFX_POPULATE_OWORD_5(*filter,
518 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
519 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
520 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
521 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
522 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
523 data3 = is_wild | spec->dmaq_id << 1;
524 break;
525 }
526
527 default:
528 BUG();
529 }
530
531 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
532}
533
534static bool efx_filter_equal(const struct efx_filter_spec *left,
535 const struct efx_filter_spec *right)
536{
537 if (left->type != right->type ||
538 memcmp(left->data, right->data, sizeof(left->data)))
539 return false;
540
541 if (left->flags & EFX_FILTER_FLAG_TX &&
542 left->dmaq_id != right->dmaq_id)
543 return false;
544
545 return true;
546}
547
548/*
549 * Construct/deconstruct external filter IDs. At least the RX filter
550 * IDs must be ordered by matching priority, for RX NFC semantics.
551 *
552 * Deconstruction needs to be robust against invalid IDs so that
553 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
554 * accept user-provided IDs.
555 */
556
557#define EFX_FILTER_MATCH_PRI_COUNT 5
558
559static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = {
560 [EFX_FILTER_TCP_FULL] = 0,
561 [EFX_FILTER_UDP_FULL] = 0,
562 [EFX_FILTER_TCP_WILD] = 1,
563 [EFX_FILTER_UDP_WILD] = 1,
564 [EFX_FILTER_MAC_FULL] = 2,
565 [EFX_FILTER_MAC_WILD] = 3,
566 [EFX_FILTER_UC_DEF] = 4,
567 [EFX_FILTER_MC_DEF] = 4,
568};
569
570static const enum efx_filter_table_id efx_filter_range_table[] = {
571 EFX_FILTER_TABLE_RX_IP, /* RX match pri 0 */
572 EFX_FILTER_TABLE_RX_IP,
573 EFX_FILTER_TABLE_RX_MAC,
574 EFX_FILTER_TABLE_RX_MAC,
575 EFX_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
576 EFX_FILTER_TABLE_COUNT, /* TX match pri 0; invalid */
577 EFX_FILTER_TABLE_COUNT, /* invalid */
578 EFX_FILTER_TABLE_TX_MAC,
579 EFX_FILTER_TABLE_TX_MAC, /* TX match pri 3 */
580};
581
582#define EFX_FILTER_INDEX_WIDTH 13
583#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
584
585static inline u32
586efx_filter_make_id(const struct efx_filter_spec *spec, unsigned int index)
587{
588 unsigned int range;
589
590 range = efx_filter_type_match_pri[spec->type];
591 if (!(spec->flags & EFX_FILTER_FLAG_RX))
592 range += EFX_FILTER_MATCH_PRI_COUNT;
593
594 return range << EFX_FILTER_INDEX_WIDTH | index;
595}
596
597static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
598{
599 unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
600
601 if (range < ARRAY_SIZE(efx_filter_range_table))
602 return efx_filter_range_table[range];
603 else
604 return EFX_FILTER_TABLE_COUNT; /* invalid */
605}
606
607static inline unsigned int efx_filter_id_index(u32 id)
608{
609 return id & EFX_FILTER_INDEX_MASK;
610}
611
612static inline u8 efx_filter_id_flags(u32 id)
613{
614 unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
615
616 if (range < EFX_FILTER_MATCH_PRI_COUNT)
617 return EFX_FILTER_FLAG_RX;
618 else
619 return EFX_FILTER_FLAG_TX;
620}
621
622u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
623{
624 struct efx_filter_state *state = efx->filter_state;
625 unsigned int range = EFX_FILTER_MATCH_PRI_COUNT - 1;
626 enum efx_filter_table_id table_id;
627
628 do {
629 table_id = efx_filter_range_table[range];
630 if (state->table[table_id].size != 0)
631 return range << EFX_FILTER_INDEX_WIDTH |
632 state->table[table_id].size;
633 } while (range--);
634
635 return 0;
636}
637
638/**
639 * efx_filter_insert_filter - add or replace a filter
640 * @efx: NIC in which to insert the filter
641 * @spec: Specification for the filter
642 * @replace_equal: Flag for whether the specified filter may replace an
643 * existing filter with equal priority
644 *
645 * On success, return the filter ID.
646 * On failure, return a negative error code.
647 *
648 * If an existing filter has equal match values to the new filter
649 * spec, then the new filter might replace it, depending on the
650 * relative priorities. If the existing filter has lower priority, or
651 * if @replace_equal is set and it has equal priority, then it is
652 * replaced. Otherwise the function fails, returning -%EPERM if
653 * the existing filter has higher priority or -%EEXIST if it has
654 * equal priority.
655 */
656s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
657 bool replace_equal)
658{
659 struct efx_filter_state *state = efx->filter_state;
660 struct efx_filter_table *table = efx_filter_spec_table(state, spec);
661 efx_oword_t filter;
662 int rep_index, ins_index;
663 unsigned int depth = 0;
664 int rc;
665
666 if (!table || table->size == 0)
667 return -EINVAL;
668
669 netif_vdbg(efx, hw, efx->net_dev,
670 "%s: type %d search_depth=%d", __func__, spec->type,
671 table->search_depth[spec->type]);
672
673 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
674 /* One filter spec per type */
675 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
676 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
677 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
678 rep_index = spec->type - EFX_FILTER_UC_DEF;
679 ins_index = rep_index;
680
681 spin_lock_bh(&state->lock);
682 } else {
683 /* Search concurrently for
684 * (1) a filter to be replaced (rep_index): any filter
685 * with the same match values, up to the current
686 * search depth for this type, and
687 * (2) the insertion point (ins_index): (1) or any
688 * free slot before it or up to the maximum search
689 * depth for this priority
690 * We fail if we cannot find (2).
691 *
692 * We can stop once either
693 * (a) we find (1), in which case we have definitely
694 * found (2) as well; or
695 * (b) we have searched exhaustively for (1), and have
696 * either found (2) or searched exhaustively for it
697 */
698 u32 key = efx_filter_build(&filter, spec);
699 unsigned int hash = efx_filter_hash(key);
700 unsigned int incr = efx_filter_increment(key);
701 unsigned int max_rep_depth = table->search_depth[spec->type];
702 unsigned int max_ins_depth =
703 spec->priority <= EFX_FILTER_PRI_HINT ?
704 FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX;
705 unsigned int i = hash & (table->size - 1);
706
707 ins_index = -1;
708 depth = 1;
709
710 spin_lock_bh(&state->lock);
711
712 for (;;) {
713 if (!test_bit(i, table->used_bitmap)) {
714 if (ins_index < 0)
715 ins_index = i;
716 } else if (efx_filter_equal(spec, &table->spec[i])) {
717 /* Case (a) */
718 if (ins_index < 0)
719 ins_index = i;
720 rep_index = i;
721 break;
722 }
723
724 if (depth >= max_rep_depth &&
725 (ins_index >= 0 || depth >= max_ins_depth)) {
726 /* Case (b) */
727 if (ins_index < 0) {
728 rc = -EBUSY;
729 goto out;
730 }
731 rep_index = -1;
732 break;
733 }
734
735 i = (i + incr) & (table->size - 1);
736 ++depth;
737 }
738 }
739
740 /* If we found a filter to be replaced, check whether we
741 * should do so
742 */
743 if (rep_index >= 0) {
744 struct efx_filter_spec *saved_spec = &table->spec[rep_index];
745
746 if (spec->priority == saved_spec->priority && !replace_equal) {
747 rc = -EEXIST;
748 goto out;
749 }
750 if (spec->priority < saved_spec->priority) {
751 rc = -EPERM;
752 goto out;
753 }
754 }
755
756 /* Insert the filter */
757 if (ins_index != rep_index) {
758 __set_bit(ins_index, table->used_bitmap);
759 ++table->used;
760 }
761 table->spec[ins_index] = *spec;
762
763 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
764 efx_filter_push_rx_config(efx);
765 } else {
766 if (table->search_depth[spec->type] < depth) {
767 table->search_depth[spec->type] = depth;
768 if (spec->flags & EFX_FILTER_FLAG_TX)
769 efx_filter_push_tx_limits(efx);
770 else
771 efx_filter_push_rx_config(efx);
772 }
773
774 efx_writeo(efx, &filter,
775 table->offset + table->step * ins_index);
776
777 /* If we were able to replace a filter by inserting
778 * at a lower depth, clear the replaced filter
779 */
780 if (ins_index != rep_index && rep_index >= 0)
781 efx_filter_table_clear_entry(efx, table, rep_index);
782 }
783
784 netif_vdbg(efx, hw, efx->net_dev,
785 "%s: filter type %d index %d rxq %u set",
786 __func__, spec->type, ins_index, spec->dmaq_id);
787 rc = efx_filter_make_id(spec, ins_index);
788
789out:
790 spin_unlock_bh(&state->lock);
791 return rc;
792}
793
794static void efx_filter_table_clear_entry(struct efx_nic *efx,
795 struct efx_filter_table *table,
796 unsigned int filter_idx)
797{
798 static efx_oword_t filter;
799
800 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
801 /* RX default filters must always exist */
802 efx_filter_reset_rx_def(efx, filter_idx);
803 efx_filter_push_rx_config(efx);
804 } else if (test_bit(filter_idx, table->used_bitmap)) {
805 __clear_bit(filter_idx, table->used_bitmap);
806 --table->used;
807 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
808
809 efx_writeo(efx, &filter,
810 table->offset + table->step * filter_idx);
811 }
812}
813
814/**
815 * efx_filter_remove_id_safe - remove a filter by ID, carefully
816 * @efx: NIC from which to remove the filter
817 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
818 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
819 *
820 * This function will range-check @filter_id, so it is safe to call
821 * with a value passed from userland.
822 */
823int efx_filter_remove_id_safe(struct efx_nic *efx,
824 enum efx_filter_priority priority,
825 u32 filter_id)
826{
827 struct efx_filter_state *state = efx->filter_state;
828 enum efx_filter_table_id table_id;
829 struct efx_filter_table *table;
830 unsigned int filter_idx;
831 struct efx_filter_spec *spec;
832 u8 filter_flags;
833 int rc;
834
835 table_id = efx_filter_id_table_id(filter_id);
836 if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
837 return -ENOENT;
838 table = &state->table[table_id];
839
840 filter_idx = efx_filter_id_index(filter_id);
841 if (filter_idx >= table->size)
842 return -ENOENT;
843 spec = &table->spec[filter_idx];
844
845 filter_flags = efx_filter_id_flags(filter_id);
846
847 spin_lock_bh(&state->lock);
848
849 if (test_bit(filter_idx, table->used_bitmap) &&
850 spec->priority == priority) {
851 efx_filter_table_clear_entry(efx, table, filter_idx);
852 if (table->used == 0)
853 efx_filter_table_reset_search_depth(table);
854 rc = 0;
855 } else {
856 rc = -ENOENT;
857 }
858
859 spin_unlock_bh(&state->lock);
860
861 return rc;
862}
863
864/**
865 * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
866 * @efx: NIC from which to remove the filter
867 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
868 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
869 * @spec: Buffer in which to store filter specification
870 *
871 * This function will range-check @filter_id, so it is safe to call
872 * with a value passed from userland.
873 */
874int efx_filter_get_filter_safe(struct efx_nic *efx,
875 enum efx_filter_priority priority,
876 u32 filter_id, struct efx_filter_spec *spec_buf)
877{
878 struct efx_filter_state *state = efx->filter_state;
879 enum efx_filter_table_id table_id;
880 struct efx_filter_table *table;
881 struct efx_filter_spec *spec;
882 unsigned int filter_idx;
883 u8 filter_flags;
884 int rc;
885
886 table_id = efx_filter_id_table_id(filter_id);
887 if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
888 return -ENOENT;
889 table = &state->table[table_id];
890
891 filter_idx = efx_filter_id_index(filter_id);
892 if (filter_idx >= table->size)
893 return -ENOENT;
894 spec = &table->spec[filter_idx];
895
896 filter_flags = efx_filter_id_flags(filter_id);
897
898 spin_lock_bh(&state->lock);
899
900 if (test_bit(filter_idx, table->used_bitmap) &&
901 spec->priority == priority) {
902 *spec_buf = *spec;
903 rc = 0;
904 } else {
905 rc = -ENOENT;
906 }
907
908 spin_unlock_bh(&state->lock);
909
910 return rc;
911}
912
913static void efx_filter_table_clear(struct efx_nic *efx,
914 enum efx_filter_table_id table_id,
915 enum efx_filter_priority priority)
916{
917 struct efx_filter_state *state = efx->filter_state;
918 struct efx_filter_table *table = &state->table[table_id];
919 unsigned int filter_idx;
920
921 spin_lock_bh(&state->lock);
922
923 for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
924 if (table->spec[filter_idx].priority <= priority)
925 efx_filter_table_clear_entry(efx, table, filter_idx);
926 if (table->used == 0)
927 efx_filter_table_reset_search_depth(table);
928
929 spin_unlock_bh(&state->lock);
930}
931
932/**
933 * efx_filter_clear_rx - remove RX filters by priority
934 * @efx: NIC from which to remove the filters
935 * @priority: Maximum priority to remove
936 */
937void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
938{
939 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
940 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
941}
942
943u32 efx_filter_count_rx_used(struct efx_nic *efx,
944 enum efx_filter_priority priority)
945{
946 struct efx_filter_state *state = efx->filter_state;
947 enum efx_filter_table_id table_id;
948 struct efx_filter_table *table;
949 unsigned int filter_idx;
950 u32 count = 0;
951
952 spin_lock_bh(&state->lock);
953
954 for (table_id = EFX_FILTER_TABLE_RX_IP;
955 table_id <= EFX_FILTER_TABLE_RX_DEF;
956 table_id++) {
957 table = &state->table[table_id];
958 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
959 if (test_bit(filter_idx, table->used_bitmap) &&
960 table->spec[filter_idx].priority == priority)
961 ++count;
962 }
963 }
964
965 spin_unlock_bh(&state->lock);
966
967 return count;
968}
969
970s32 efx_filter_get_rx_ids(struct efx_nic *efx,
971 enum efx_filter_priority priority,
972 u32 *buf, u32 size)
973{
974 struct efx_filter_state *state = efx->filter_state;
975 enum efx_filter_table_id table_id;
976 struct efx_filter_table *table;
977 unsigned int filter_idx;
978 s32 count = 0;
979
980 spin_lock_bh(&state->lock);
981
982 for (table_id = EFX_FILTER_TABLE_RX_IP;
983 table_id <= EFX_FILTER_TABLE_RX_DEF;
984 table_id++) {
985 table = &state->table[table_id];
986 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
987 if (test_bit(filter_idx, table->used_bitmap) &&
988 table->spec[filter_idx].priority == priority) {
989 if (count == size) {
990 count = -EMSGSIZE;
991 goto out;
992 }
993 buf[count++] = efx_filter_make_id(
994 &table->spec[filter_idx], filter_idx);
995 }
996 }
997 }
998out:
999 spin_unlock_bh(&state->lock);
1000
1001 return count;
1002}
1003
1004/* Restore filter stater after reset */
1005void efx_restore_filters(struct efx_nic *efx)
1006{
1007 struct efx_filter_state *state = efx->filter_state;
1008 enum efx_filter_table_id table_id;
1009 struct efx_filter_table *table;
1010 efx_oword_t filter;
1011 unsigned int filter_idx;
1012
1013 spin_lock_bh(&state->lock);
1014
1015 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1016 table = &state->table[table_id];
1017
1018 /* Check whether this is a regular register table */
1019 if (table->step == 0)
1020 continue;
1021
1022 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
1023 if (!test_bit(filter_idx, table->used_bitmap))
1024 continue;
1025 efx_filter_build(&filter, &table->spec[filter_idx]);
1026 efx_writeo(efx, &filter,
1027 table->offset + table->step * filter_idx);
1028 }
1029 }
1030
1031 efx_filter_push_rx_config(efx);
1032 efx_filter_push_tx_limits(efx);
1033
1034 spin_unlock_bh(&state->lock);
1035}
1036
1037int efx_probe_filters(struct efx_nic *efx)
1038{
1039 struct efx_filter_state *state;
1040 struct efx_filter_table *table;
1041 unsigned table_id;
1042
1043 state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
1044 if (!state)
1045 return -ENOMEM;
1046 efx->filter_state = state;
1047
1048 spin_lock_init(&state->lock);
1049
1050 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1051#ifdef CONFIG_RFS_ACCEL
1052 state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
1053 sizeof(*state->rps_flow_id),
1054 GFP_KERNEL);
1055 if (!state->rps_flow_id)
1056 goto fail;
1057#endif
1058 table = &state->table[EFX_FILTER_TABLE_RX_IP];
1059 table->id = EFX_FILTER_TABLE_RX_IP;
1060 table->offset = FR_BZ_RX_FILTER_TBL0;
1061 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
1062 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
1063 }
1064
1065 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1066 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
1067 table->id = EFX_FILTER_TABLE_RX_MAC;
1068 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
1069 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
1070 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
1071
1072 table = &state->table[EFX_FILTER_TABLE_RX_DEF];
1073 table->id = EFX_FILTER_TABLE_RX_DEF;
1074 table->size = EFX_FILTER_SIZE_RX_DEF;
1075
1076 table = &state->table[EFX_FILTER_TABLE_TX_MAC];
1077 table->id = EFX_FILTER_TABLE_TX_MAC;
1078 table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
1079 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
1080 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
1081 }
1082
1083 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1084 table = &state->table[table_id];
1085 if (table->size == 0)
1086 continue;
1087 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
1088 sizeof(unsigned long),
1089 GFP_KERNEL);
1090 if (!table->used_bitmap)
1091 goto fail;
1092 table->spec = vzalloc(table->size * sizeof(*table->spec));
1093 if (!table->spec)
1094 goto fail;
1095 }
1096
1097 if (state->table[EFX_FILTER_TABLE_RX_DEF].size) {
1098 /* RX default filters must always exist */
1099 unsigned i;
1100 for (i = 0; i < EFX_FILTER_SIZE_RX_DEF; i++)
1101 efx_filter_reset_rx_def(efx, i);
1102 }
1103
1104 efx_filter_push_rx_config(efx);
1105
1106 return 0;
1107
1108fail:
1109 efx_remove_filters(efx);
1110 return -ENOMEM;
1111}
1112
1113void efx_remove_filters(struct efx_nic *efx)
1114{
1115 struct efx_filter_state *state = efx->filter_state;
1116 enum efx_filter_table_id table_id;
1117
1118 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1119 kfree(state->table[table_id].used_bitmap);
1120 vfree(state->table[table_id].spec);
1121 }
1122#ifdef CONFIG_RFS_ACCEL
1123 kfree(state->rps_flow_id);
1124#endif
1125 kfree(state);
1126}
1127
1128/* Update scatter enable flags for filters pointing to our own RX queues */
1129void efx_filter_update_rx_scatter(struct efx_nic *efx)
1130{
1131 struct efx_filter_state *state = efx->filter_state;
1132 enum efx_filter_table_id table_id;
1133 struct efx_filter_table *table;
1134 efx_oword_t filter;
1135 unsigned int filter_idx;
1136
1137 spin_lock_bh(&state->lock);
1138
1139 for (table_id = EFX_FILTER_TABLE_RX_IP;
1140 table_id <= EFX_FILTER_TABLE_RX_DEF;
1141 table_id++) {
1142 table = &state->table[table_id];
1143
1144 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
1145 if (!test_bit(filter_idx, table->used_bitmap) ||
1146 table->spec[filter_idx].dmaq_id >=
1147 efx->n_rx_channels)
1148 continue;
1149
1150 if (efx->rx_scatter)
1151 table->spec[filter_idx].flags |=
1152 EFX_FILTER_FLAG_RX_SCATTER;
1153 else
1154 table->spec[filter_idx].flags &=
1155 ~EFX_FILTER_FLAG_RX_SCATTER;
1156
1157 if (table_id == EFX_FILTER_TABLE_RX_DEF)
1158 /* Pushed by efx_filter_push_rx_config() */
1159 continue;
1160
1161 efx_filter_build(&filter, &table->spec[filter_idx]);
1162 efx_writeo(efx, &filter,
1163 table->offset + table->step * filter_idx);
1164 }
1165 }
1166
1167 efx_filter_push_rx_config(efx);
1168
1169 spin_unlock_bh(&state->lock);
1170}
1171
1172#ifdef CONFIG_RFS_ACCEL
1173
1174int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
1175 u16 rxq_index, u32 flow_id)
1176{
1177 struct efx_nic *efx = netdev_priv(net_dev);
1178 struct efx_channel *channel;
1179 struct efx_filter_state *state = efx->filter_state;
1180 struct efx_filter_spec spec;
1181 const struct iphdr *ip;
1182 const __be16 *ports;
1183 int nhoff;
1184 int rc;
1185
1186 nhoff = skb_network_offset(skb);
1187
1188 if (skb->protocol == htons(ETH_P_8021Q)) {
1189 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
1190 nhoff + sizeof(struct vlan_hdr));
1191 if (((const struct vlan_hdr *)skb->data + nhoff)->
1192 h_vlan_encapsulated_proto != htons(ETH_P_IP))
1193 return -EPROTONOSUPPORT;
1194
1195 /* This is IP over 802.1q VLAN. We can't filter on the
1196 * IP 5-tuple and the vlan together, so just strip the
1197 * vlan header and filter on the IP part.
1198 */
1199 nhoff += sizeof(struct vlan_hdr);
1200 } else if (skb->protocol != htons(ETH_P_IP)) {
1201 return -EPROTONOSUPPORT;
1202 }
1203
1204 /* RFS must validate the IP header length before calling us */
1205 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
1206 ip = (const struct iphdr *)(skb->data + nhoff);
1207 if (ip_is_fragment(ip))
1208 return -EPROTONOSUPPORT;
1209 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
1210 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
1211
1212 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
1213 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
1214 rxq_index);
1215 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
1216 ip->daddr, ports[1], ip->saddr, ports[0]);
1217 if (rc)
1218 return rc;
1219
1220 rc = efx_filter_insert_filter(efx, &spec, true);
1221 if (rc < 0)
1222 return rc;
1223
1224 /* Remember this so we can check whether to expire the filter later */
1225 state->rps_flow_id[rc] = flow_id;
1226 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
1227 ++channel->rfs_filters_added;
1228
1229 netif_info(efx, rx_status, efx->net_dev,
1230 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
1231 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
1232 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
1233 rxq_index, flow_id, rc);
1234
1235 return rc;
1236}
1237
1238bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
1239{
1240 struct efx_filter_state *state = efx->filter_state;
1241 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
1242 unsigned mask = table->size - 1;
1243 unsigned index;
1244 unsigned stop;
1245
1246 if (!spin_trylock_bh(&state->lock))
1247 return false;
1248
1249 index = state->rps_expire_index;
1250 stop = (index + quota) & mask;
1251
1252 while (index != stop) {
1253 if (test_bit(index, table->used_bitmap) &&
1254 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
1255 rps_may_expire_flow(efx->net_dev,
1256 table->spec[index].dmaq_id,
1257 state->rps_flow_id[index], index)) {
1258 netif_info(efx, rx_status, efx->net_dev,
1259 "expiring filter %d [flow %u]\n",
1260 index, state->rps_flow_id[index]);
1261 efx_filter_table_clear_entry(efx, table, index);
1262 }
1263 index = (index + 1) & mask;
1264 }
1265
1266 state->rps_expire_index = stop;
1267 if (table->used == 0)
1268 efx_filter_table_reset_search_depth(table);
1269
1270 spin_unlock_bh(&state->lock);
1271 return true;
1272}
1273
1274#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 5cb54723b824..63c77a557178 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2010 Solarflare Communications Inc. 3 * Copyright 2005-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -11,32 +11,49 @@
11#define EFX_FILTER_H 11#define EFX_FILTER_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/if_ether.h>
15#include <asm/byteorder.h>
14 16
15/** 17/**
16 * enum efx_filter_type - type of hardware filter 18 * enum efx_filter_match_flags - Flags for hardware filter match type
17 * @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple 19 * @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address
18 * @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port) 20 * @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address
19 * @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple 21 * @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address
20 * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port) 22 * @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port
21 * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID 23 * @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address
22 * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address 24 * @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port
23 * @EFX_FILTER_UC_DEF: Matching all otherwise unmatched unicast 25 * @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type
24 * @EFX_FILTER_MC_DEF: Matching all otherwise unmatched multicast 26 * @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID
25 * @EFX_FILTER_UNSPEC: Match type is unspecified 27 * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
28 * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
29 * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
30 * Used for RX default unicast and multicast/broadcast filters.
26 * 31 *
27 * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types. 32 * Only some combinations are supported, depending on NIC type:
33 *
34 * - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
35 * local 2-tuple (only implemented for Falcon B0)
36 *
37 * - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
38 * or local 2-tuple, or local MAC with or without outer VID, and RX
39 * default filters
40 *
41 * - Huntington supports filter matching controlled by firmware, potentially
42 * using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
43 * with or without outer and inner VID
28 */ 44 */
29enum efx_filter_type { 45enum efx_filter_match_flags {
30 EFX_FILTER_TCP_FULL = 0, 46 EFX_FILTER_MATCH_REM_HOST = 0x0001,
31 EFX_FILTER_TCP_WILD, 47 EFX_FILTER_MATCH_LOC_HOST = 0x0002,
32 EFX_FILTER_UDP_FULL, 48 EFX_FILTER_MATCH_REM_MAC = 0x0004,
33 EFX_FILTER_UDP_WILD, 49 EFX_FILTER_MATCH_REM_PORT = 0x0008,
34 EFX_FILTER_MAC_FULL = 4, 50 EFX_FILTER_MATCH_LOC_MAC = 0x0010,
35 EFX_FILTER_MAC_WILD, 51 EFX_FILTER_MATCH_LOC_PORT = 0x0020,
36 EFX_FILTER_UC_DEF = 8, 52 EFX_FILTER_MATCH_ETHER_TYPE = 0x0040,
37 EFX_FILTER_MC_DEF, 53 EFX_FILTER_MATCH_INNER_VID = 0x0080,
38 EFX_FILTER_TYPE_COUNT, /* number of specific types */ 54 EFX_FILTER_MATCH_OUTER_VID = 0x0100,
39 EFX_FILTER_UNSPEC = 0xf, 55 EFX_FILTER_MATCH_IP_PROTO = 0x0200,
56 EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400,
40}; 57};
41 58
42/** 59/**
@@ -61,37 +78,75 @@ enum efx_filter_priority {
61 * according to the indirection table. 78 * according to the indirection table.
62 * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving 79 * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
63 * queue. 80 * queue.
81 * @EFX_FILTER_FLAG_RX_STACK: Indicates a filter inserted for the
82 * network stack. The filter must have a priority of
83 * %EFX_FILTER_PRI_REQUIRED. It can be steered by a replacement
84 * request with priority %EFX_FILTER_PRI_MANUAL, and a removal
85 * request with priority %EFX_FILTER_PRI_MANUAL will reset the
86 * steering (but not remove the filter).
64 * @EFX_FILTER_FLAG_RX: Filter is for RX 87 * @EFX_FILTER_FLAG_RX: Filter is for RX
65 * @EFX_FILTER_FLAG_TX: Filter is for TX 88 * @EFX_FILTER_FLAG_TX: Filter is for TX
66 */ 89 */
67enum efx_filter_flags { 90enum efx_filter_flags {
68 EFX_FILTER_FLAG_RX_RSS = 0x01, 91 EFX_FILTER_FLAG_RX_RSS = 0x01,
69 EFX_FILTER_FLAG_RX_SCATTER = 0x02, 92 EFX_FILTER_FLAG_RX_SCATTER = 0x02,
93 EFX_FILTER_FLAG_RX_STACK = 0x04,
70 EFX_FILTER_FLAG_RX = 0x08, 94 EFX_FILTER_FLAG_RX = 0x08,
71 EFX_FILTER_FLAG_TX = 0x10, 95 EFX_FILTER_FLAG_TX = 0x10,
72}; 96};
73 97
74/** 98/**
75 * struct efx_filter_spec - specification for a hardware filter 99 * struct efx_filter_spec - specification for a hardware filter
76 * @type: Type of match to be performed, from &enum efx_filter_type 100 * @match_flags: Match type flags, from &enum efx_filter_match_flags
77 * @priority: Priority of the filter, from &enum efx_filter_priority 101 * @priority: Priority of the filter, from &enum efx_filter_priority
78 * @flags: Miscellaneous flags, from &enum efx_filter_flags 102 * @flags: Miscellaneous flags, from &enum efx_filter_flags
79 * @dmaq_id: Source/target queue index 103 * @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set
80 * @data: Match data (type-dependent) 104 * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
105 * an RX drop filter
106 * @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
107 * @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
108 * @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
109 * %EFX_FILTER_MATCH_LOC_MAC_IG is set
110 * @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set
111 * @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set
112 * @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO
113 * is set
114 * @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set
115 * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set
116 * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set
117 * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set
81 * 118 *
82 * Use the efx_filter_set_*() functions to initialise the @type and 119 * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be
83 * @data fields. 120 * used to initialise the structure. The efx_filter_set_*() functions
121 * may then be used to set @rss_context, @match_flags and related
122 * fields.
84 * 123 *
85 * The @priority field is used by software to determine whether a new 124 * The @priority field is used by software to determine whether a new
86 * filter may replace an old one. The hardware priority of a filter 125 * filter may replace an old one. The hardware priority of a filter
87 * depends on the filter type. 126 * depends on which fields are matched.
88 */ 127 */
89struct efx_filter_spec { 128struct efx_filter_spec {
90 u8 type:4; 129 u32 match_flags:12;
91 u8 priority:4; 130 u32 priority:2;
92 u8 flags; 131 u32 flags:6;
93 u16 dmaq_id; 132 u32 dmaq_id:12;
94 u32 data[3]; 133 u32 rss_context;
134 __be16 outer_vid __aligned(4); /* allow jhash2() of match values */
135 __be16 inner_vid;
136 u8 loc_mac[ETH_ALEN];
137 u8 rem_mac[ETH_ALEN];
138 __be16 ether_type;
139 u8 ip_proto;
140 __be32 loc_host[4];
141 __be32 rem_host[4];
142 __be16 loc_port;
143 __be16 rem_port;
144 /* total 64 bytes */
145};
146
147enum {
148 EFX_FILTER_RSS_CONTEXT_DEFAULT = 0xffffffff,
149 EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff
95}; 150};
96 151
97static inline void efx_filter_init_rx(struct efx_filter_spec *spec, 152static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
@@ -99,39 +154,116 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
99 enum efx_filter_flags flags, 154 enum efx_filter_flags flags,
100 unsigned rxq_id) 155 unsigned rxq_id)
101{ 156{
102 spec->type = EFX_FILTER_UNSPEC; 157 memset(spec, 0, sizeof(*spec));
103 spec->priority = priority; 158 spec->priority = priority;
104 spec->flags = EFX_FILTER_FLAG_RX | flags; 159 spec->flags = EFX_FILTER_FLAG_RX | flags;
160 spec->rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
105 spec->dmaq_id = rxq_id; 161 spec->dmaq_id = rxq_id;
106} 162}
107 163
108static inline void efx_filter_init_tx(struct efx_filter_spec *spec, 164static inline void efx_filter_init_tx(struct efx_filter_spec *spec,
109 unsigned txq_id) 165 unsigned txq_id)
110{ 166{
111 spec->type = EFX_FILTER_UNSPEC; 167 memset(spec, 0, sizeof(*spec));
112 spec->priority = EFX_FILTER_PRI_REQUIRED; 168 spec->priority = EFX_FILTER_PRI_REQUIRED;
113 spec->flags = EFX_FILTER_FLAG_TX; 169 spec->flags = EFX_FILTER_FLAG_TX;
114 spec->dmaq_id = txq_id; 170 spec->dmaq_id = txq_id;
115} 171}
116 172
117extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, 173/**
118 __be32 host, __be16 port); 174 * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
119extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec, 175 * @spec: Specification to initialise
120 u8 *proto, __be32 *host, __be16 *port); 176 * @proto: Transport layer protocol number
121extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, 177 * @host: Local host address (network byte order)
122 __be32 host, __be16 port, 178 * @port: Local port (network byte order)
123 __be32 rhost, __be16 rport); 179 */
124extern int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec, 180static inline int
125 u8 *proto, __be32 *host, __be16 *port, 181efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
126 __be32 *rhost, __be16 *rport); 182 __be32 host, __be16 port)
127extern int efx_filter_set_eth_local(struct efx_filter_spec *spec, 183{
128 u16 vid, const u8 *addr); 184 spec->match_flags |=
129extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec, 185 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
130 u16 *vid, u8 *addr); 186 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
131extern int efx_filter_set_uc_def(struct efx_filter_spec *spec); 187 spec->ether_type = htons(ETH_P_IP);
132extern int efx_filter_set_mc_def(struct efx_filter_spec *spec); 188 spec->ip_proto = proto;
189 spec->loc_host[0] = host;
190 spec->loc_port = port;
191 return 0;
192}
193
194/**
195 * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
196 * @spec: Specification to initialise
197 * @proto: Transport layer protocol number
198 * @lhost: Local host address (network byte order)
199 * @lport: Local port (network byte order)
200 * @rhost: Remote host address (network byte order)
201 * @rport: Remote port (network byte order)
202 */
203static inline int
204efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
205 __be32 lhost, __be16 lport,
206 __be32 rhost, __be16 rport)
207{
208 spec->match_flags |=
209 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
210 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
211 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
212 spec->ether_type = htons(ETH_P_IP);
213 spec->ip_proto = proto;
214 spec->loc_host[0] = lhost;
215 spec->loc_port = lport;
216 spec->rem_host[0] = rhost;
217 spec->rem_port = rport;
218 return 0;
219}
220
133enum { 221enum {
134 EFX_FILTER_VID_UNSPEC = 0xffff, 222 EFX_FILTER_VID_UNSPEC = 0xffff,
135}; 223};
136 224
225/**
226 * efx_filter_set_eth_local - specify local Ethernet address and/or VID
227 * @spec: Specification to initialise
228 * @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
229 * @addr: Local Ethernet MAC address, or %NULL
230 */
231static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
232 u16 vid, const u8 *addr)
233{
234 if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL)
235 return -EINVAL;
236
237 if (vid != EFX_FILTER_VID_UNSPEC) {
238 spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
239 spec->outer_vid = htons(vid);
240 }
241 if (addr != NULL) {
242 spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
243 memcpy(spec->loc_mac, addr, ETH_ALEN);
244 }
245 return 0;
246}
247
248/**
249 * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
250 * @spec: Specification to initialise
251 */
252static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec)
253{
254 spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
255 return 0;
256}
257
258/**
259 * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
260 * @spec: Specification to initialise
261 */
262static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
263{
264 spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
265 spec->loc_mac[0] = 1;
266 return 0;
267}
268
137#endif /* EFX_FILTER_H */ 269#endif /* EFX_FILTER_H */
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 96759aee1c6c..96ce507d8602 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -20,7 +20,7 @@
20 * 20 *
21 ************************************************************************** 21 **************************************************************************
22 * 22 *
23 * Notes on locking strategy: 23 * Notes on locking strategy for the Falcon architecture:
24 * 24 *
25 * Many CSRs are very wide and cannot be read or written atomically. 25 * Many CSRs are very wide and cannot be read or written atomically.
26 * Writes from the host are buffered by the Bus Interface Unit (BIU) 26 * Writes from the host are buffered by the Bus Interface Unit (BIU)
@@ -54,6 +54,12 @@
54 * register while the collector already holds values for some other 54 * register while the collector already holds values for some other
55 * register, the write is discarded and the collector maintains its 55 * register, the write is discarded and the collector maintains its
56 * current state. 56 * current state.
57 *
58 * The EF10 architecture exposes very few registers to the host and
59 * most of them are only 32 bits wide. The only exceptions are the MC
60 * doorbell register pair, which has its own latching, and
61 * TX_DESC_UPD, which works in a similar way to the Falcon
62 * architecture.
57 */ 63 */
58 64
59#if BITS_PER_LONG == 64 65#if BITS_PER_LONG == 64
@@ -83,7 +89,7 @@ static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
83} 89}
84 90
85/* Write a normal 128-bit CSR, locking as appropriate. */ 91/* Write a normal 128-bit CSR, locking as appropriate. */
86static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, 92static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
87 unsigned int reg) 93 unsigned int reg)
88{ 94{
89 unsigned long flags __attribute__ ((unused)); 95 unsigned long flags __attribute__ ((unused));
@@ -108,7 +114,7 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
108 114
109/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */ 115/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
110static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, 116static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
111 efx_qword_t *value, unsigned int index) 117 const efx_qword_t *value, unsigned int index)
112{ 118{
113 unsigned int addr = index * sizeof(*value); 119 unsigned int addr = index * sizeof(*value);
114 unsigned long flags __attribute__ ((unused)); 120 unsigned long flags __attribute__ ((unused));
@@ -129,7 +135,7 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
129} 135}
130 136
131/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */ 137/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
132static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, 138static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value,
133 unsigned int reg) 139 unsigned int reg)
134{ 140{
135 netif_vdbg(efx, hw, efx->net_dev, 141 netif_vdbg(efx, hw, efx->net_dev,
@@ -190,8 +196,9 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
190} 196}
191 197
192/* Write a 128-bit CSR forming part of a table */ 198/* Write a 128-bit CSR forming part of a table */
193static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value, 199static inline void
194 unsigned int reg, unsigned int index) 200efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value,
201 unsigned int reg, unsigned int index)
195{ 202{
196 efx_writeo(efx, value, reg + index * sizeof(efx_oword_t)); 203 efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
197} 204}
@@ -203,12 +210,12 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
203 efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); 210 efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
204} 211}
205 212
206/* Page-mapped register block size */ 213/* Page size used as step between per-VI registers */
207#define EFX_PAGE_BLOCK_SIZE 0x2000 214#define EFX_VI_PAGE_SIZE 0x2000
208 215
209/* Calculate offset to page-mapped register block */ 216/* Calculate offset to page-mapped register */
210#define EFX_PAGED_REG(page, reg) \ 217#define EFX_PAGED_REG(page, reg) \
211 ((page) * EFX_PAGE_BLOCK_SIZE + (reg)) 218 ((page) * EFX_VI_PAGE_SIZE + (reg))
212 219
213/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */ 220/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
214static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, 221static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
@@ -236,19 +243,24 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
236 BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \ 243 BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
237 page) 244 page)
238 245
239/* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of 246/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
240 * RX_DESC_UPD or TX_DESC_UPD) 247 * high bits of RX_DESC_UPD or TX_DESC_UPD)
241 */ 248 */
242static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value, 249static inline void
243 unsigned int reg, unsigned int page) 250_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
251 unsigned int reg, unsigned int page)
244{ 252{
245 efx_writed(efx, value, EFX_PAGED_REG(page, reg)); 253 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
246} 254}
247#define efx_writed_page(efx, value, reg, page) \ 255#define efx_writed_page(efx, value, reg, page) \
248 _efx_writed_page(efx, value, \ 256 _efx_writed_page(efx, value, \
249 reg + \ 257 reg + \
250 BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \ 258 BUILD_BUG_ON_ZERO((reg) != 0x400 && \
251 && (reg) != 0xa1c), \ 259 (reg) != 0x420 && \
260 (reg) != 0x830 && \
261 (reg) != 0x83c && \
262 (reg) != 0xa18 && \
263 (reg) != 0xa1c), \
252 page) 264 page)
253 265
254/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug 266/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
@@ -256,7 +268,7 @@ static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
256 * collector register. 268 * collector register.
257 */ 269 */
258static inline void _efx_writed_page_locked(struct efx_nic *efx, 270static inline void _efx_writed_page_locked(struct efx_nic *efx,
259 efx_dword_t *value, 271 const efx_dword_t *value,
260 unsigned int reg, 272 unsigned int reg,
261 unsigned int page) 273 unsigned int page)
262{ 274{
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 97dd8f18c001..128d7cdf9eb2 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2008-2011 Solarflare Communications Inc. 3 * Copyright 2008-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -8,10 +8,11 @@
8 */ 8 */
9 9
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <asm/cmpxchg.h>
11#include "net_driver.h" 12#include "net_driver.h"
12#include "nic.h" 13#include "nic.h"
13#include "io.h" 14#include "io.h"
14#include "regs.h" 15#include "farch_regs.h"
15#include "mcdi_pcol.h" 16#include "mcdi_pcol.h"
16#include "phy.h" 17#include "phy.h"
17 18
@@ -24,112 +25,235 @@
24 25
25#define MCDI_RPC_TIMEOUT (10 * HZ) 26#define MCDI_RPC_TIMEOUT (10 * HZ)
26 27
27#define MCDI_PDU(efx) \
28 (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
29#define MCDI_DOORBELL(efx) \
30 (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
31#define MCDI_STATUS(efx) \
32 (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
33
34/* A reboot/assertion causes the MCDI status word to be set after the 28/* A reboot/assertion causes the MCDI status word to be set after the
35 * command word is set or a REBOOT event is sent. If we notice a reboot 29 * command word is set or a REBOOT event is sent. If we notice a reboot
36 * via these mechanisms then wait 10ms for the status word to be set. */ 30 * via these mechanisms then wait 20ms for the status word to be set.
31 */
37#define MCDI_STATUS_DELAY_US 100 32#define MCDI_STATUS_DELAY_US 100
38#define MCDI_STATUS_DELAY_COUNT 100 33#define MCDI_STATUS_DELAY_COUNT 200
39#define MCDI_STATUS_SLEEP_MS \ 34#define MCDI_STATUS_SLEEP_MS \
40 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) 35 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
41 36
42#define SEQ_MASK \ 37#define SEQ_MASK \
43 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) 38 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
44 39
40struct efx_mcdi_async_param {
41 struct list_head list;
42 unsigned int cmd;
43 size_t inlen;
44 size_t outlen;
45 efx_mcdi_async_completer *complete;
46 unsigned long cookie;
47 /* followed by request/response buffer */
48};
49
50static void efx_mcdi_timeout_async(unsigned long context);
51static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
52 bool *was_attached_out);
53
45static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) 54static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
46{ 55{
47 struct siena_nic_data *nic_data; 56 EFX_BUG_ON_PARANOID(!efx->mcdi);
48 EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); 57 return &efx->mcdi->iface;
49 nic_data = efx->nic_data;
50 return &nic_data->mcdi;
51} 58}
52 59
53void efx_mcdi_init(struct efx_nic *efx) 60int efx_mcdi_init(struct efx_nic *efx)
54{ 61{
55 struct efx_mcdi_iface *mcdi; 62 struct efx_mcdi_iface *mcdi;
63 bool already_attached;
64 int rc;
56 65
57 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 66 efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
58 return; 67 if (!efx->mcdi)
68 return -ENOMEM;
59 69
60 mcdi = efx_mcdi(efx); 70 mcdi = efx_mcdi(efx);
71 mcdi->efx = efx;
61 init_waitqueue_head(&mcdi->wq); 72 init_waitqueue_head(&mcdi->wq);
62 spin_lock_init(&mcdi->iface_lock); 73 spin_lock_init(&mcdi->iface_lock);
63 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); 74 mcdi->state = MCDI_STATE_QUIESCENT;
64 mcdi->mode = MCDI_MODE_POLL; 75 mcdi->mode = MCDI_MODE_POLL;
76 spin_lock_init(&mcdi->async_lock);
77 INIT_LIST_HEAD(&mcdi->async_list);
78 setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async,
79 (unsigned long)mcdi);
65 80
66 (void) efx_mcdi_poll_reboot(efx); 81 (void) efx_mcdi_poll_reboot(efx);
82 mcdi->new_epoch = true;
83
84 /* Recover from a failed assertion before probing */
85 rc = efx_mcdi_handle_assertion(efx);
86 if (rc)
87 return rc;
88
89 /* Let the MC (and BMC, if this is a LOM) know that the driver
90 * is loaded. We should do this before we reset the NIC.
91 */
92 rc = efx_mcdi_drv_attach(efx, true, &already_attached);
93 if (rc) {
94 netif_err(efx, probe, efx->net_dev,
95 "Unable to register driver with MCPU\n");
96 return rc;
97 }
98 if (already_attached)
99 /* Not a fatal error */
100 netif_err(efx, probe, efx->net_dev,
101 "Host already registered with MCPU\n");
102
103 return 0;
67} 104}
68 105
69static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, 106void efx_mcdi_fini(struct efx_nic *efx)
70 const u8 *inbuf, size_t inlen) 107{
108 if (!efx->mcdi)
109 return;
110
111 BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
112
113 /* Relinquish the device (back to the BMC, if this is a LOM) */
114 efx_mcdi_drv_attach(efx, false, NULL);
115
116 kfree(efx->mcdi);
117}
118
119static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
120 const efx_dword_t *inbuf, size_t inlen)
71{ 121{
72 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
73 unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 123 efx_dword_t hdr[2];
74 unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); 124 size_t hdr_len;
75 unsigned int i;
76 efx_dword_t hdr;
77 u32 xflags, seqno; 125 u32 xflags, seqno;
78 126
79 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 127 BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
80 BUG_ON(inlen & 3 || inlen >= MC_SMEM_PDU_LEN); 128
129 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
130 spin_lock_bh(&mcdi->iface_lock);
131 ++mcdi->seqno;
132 spin_unlock_bh(&mcdi->iface_lock);
81 133
82 seqno = mcdi->seqno & SEQ_MASK; 134 seqno = mcdi->seqno & SEQ_MASK;
83 xflags = 0; 135 xflags = 0;
84 if (mcdi->mode == MCDI_MODE_EVENTS) 136 if (mcdi->mode == MCDI_MODE_EVENTS)
85 xflags |= MCDI_HEADER_XFLAGS_EVREQ; 137 xflags |= MCDI_HEADER_XFLAGS_EVREQ;
86 138
87 EFX_POPULATE_DWORD_6(hdr, 139 if (efx->type->mcdi_max_ver == 1) {
88 MCDI_HEADER_RESPONSE, 0, 140 /* MCDI v1 */
89 MCDI_HEADER_RESYNC, 1, 141 EFX_POPULATE_DWORD_7(hdr[0],
90 MCDI_HEADER_CODE, cmd, 142 MCDI_HEADER_RESPONSE, 0,
91 MCDI_HEADER_DATALEN, inlen, 143 MCDI_HEADER_RESYNC, 1,
92 MCDI_HEADER_SEQ, seqno, 144 MCDI_HEADER_CODE, cmd,
93 MCDI_HEADER_XFLAGS, xflags); 145 MCDI_HEADER_DATALEN, inlen,
94 146 MCDI_HEADER_SEQ, seqno,
95 efx_writed(efx, &hdr, pdu); 147 MCDI_HEADER_XFLAGS, xflags,
148 MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
149 hdr_len = 4;
150 } else {
151 /* MCDI v2 */
152 BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
153 EFX_POPULATE_DWORD_7(hdr[0],
154 MCDI_HEADER_RESPONSE, 0,
155 MCDI_HEADER_RESYNC, 1,
156 MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
157 MCDI_HEADER_DATALEN, 0,
158 MCDI_HEADER_SEQ, seqno,
159 MCDI_HEADER_XFLAGS, xflags,
160 MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
161 EFX_POPULATE_DWORD_2(hdr[1],
162 MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
163 MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
164 hdr_len = 8;
165 }
96 166
97 for (i = 0; i < inlen; i += 4) 167 efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
99 168
100 /* Ensure the payload is written out before the header */ 169 mcdi->new_epoch = false;
101 wmb(); 170}
102 171
103 /* ring the doorbell with a distinctive value */ 172static int efx_mcdi_errno(unsigned int mcdi_err)
104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); 173{
174 switch (mcdi_err) {
175 case 0:
176 return 0;
177#define TRANSLATE_ERROR(name) \
178 case MC_CMD_ERR_ ## name: \
179 return -name;
180 TRANSLATE_ERROR(EPERM);
181 TRANSLATE_ERROR(ENOENT);
182 TRANSLATE_ERROR(EINTR);
183 TRANSLATE_ERROR(EAGAIN);
184 TRANSLATE_ERROR(EACCES);
185 TRANSLATE_ERROR(EBUSY);
186 TRANSLATE_ERROR(EINVAL);
187 TRANSLATE_ERROR(EDEADLK);
188 TRANSLATE_ERROR(ENOSYS);
189 TRANSLATE_ERROR(ETIME);
190 TRANSLATE_ERROR(EALREADY);
191 TRANSLATE_ERROR(ENOSPC);
192#undef TRANSLATE_ERROR
193 case MC_CMD_ERR_ALLOC_FAIL:
194 return -ENOBUFS;
195 case MC_CMD_ERR_MAC_EXIST:
196 return -EADDRINUSE;
197 default:
198 return -EPROTO;
199 }
105} 200}
106 201
107static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 202static void efx_mcdi_read_response_header(struct efx_nic *efx)
108{ 203{
109 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 204 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
110 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 205 unsigned int respseq, respcmd, error;
111 int i; 206 efx_dword_t hdr;
207
208 efx->type->mcdi_read_response(efx, &hdr, 0, 4);
209 respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
210 respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
211 error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
112 212
113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 213 if (respcmd != MC_CMD_V2_EXTN) {
114 BUG_ON(outlen & 3 || outlen >= MC_SMEM_PDU_LEN); 214 mcdi->resp_hdr_len = 4;
215 mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
216 } else {
217 efx->type->mcdi_read_response(efx, &hdr, 4, 4);
218 mcdi->resp_hdr_len = 8;
219 mcdi->resp_data_len =
220 EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
221 }
115 222
116 for (i = 0; i < outlen; i += 4) 223 if (error && mcdi->resp_data_len == 0) {
117 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); 224 netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
225 mcdi->resprc = -EIO;
226 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
227 netif_err(efx, hw, efx->net_dev,
228 "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
229 respseq, mcdi->seqno);
230 mcdi->resprc = -EIO;
231 } else if (error) {
232 efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
233 mcdi->resprc =
234 efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
235 } else {
236 mcdi->resprc = 0;
237 }
118} 238}
119 239
120static int efx_mcdi_poll(struct efx_nic *efx) 240static int efx_mcdi_poll(struct efx_nic *efx)
121{ 241{
122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 242 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
123 unsigned long time, finish; 243 unsigned long time, finish;
124 unsigned int respseq, respcmd, error; 244 unsigned int spins;
125 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 245 int rc;
126 unsigned int rc, spins;
127 efx_dword_t reg;
128 246
129 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ 247 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
130 rc = -efx_mcdi_poll_reboot(efx); 248 rc = efx_mcdi_poll_reboot(efx);
131 if (rc) 249 if (rc) {
132 goto out; 250 spin_lock_bh(&mcdi->iface_lock);
251 mcdi->resprc = rc;
252 mcdi->resp_hdr_len = 0;
253 mcdi->resp_data_len = 0;
254 spin_unlock_bh(&mcdi->iface_lock);
255 return 0;
256 }
133 257
134 /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, 258 /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
135 * because generally mcdi responses are fast. After that, back off 259 * because generally mcdi responses are fast. After that, back off
@@ -149,59 +273,16 @@ static int efx_mcdi_poll(struct efx_nic *efx)
149 time = jiffies; 273 time = jiffies;
150 274
151 rmb(); 275 rmb();
152 efx_readd(efx, &reg, pdu); 276 if (efx->type->mcdi_poll_response(efx))
153
154 /* All 1's indicates that shared memory is in reset (and is
155 * not a valid header). Wait for it to come out reset before
156 * completing the command */
157 if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff &&
158 EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
159 break; 277 break;
160 278
161 if (time_after(time, finish)) 279 if (time_after(time, finish))
162 return -ETIMEDOUT; 280 return -ETIMEDOUT;
163 } 281 }
164 282
165 mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN); 283 spin_lock_bh(&mcdi->iface_lock);
166 respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ); 284 efx_mcdi_read_response_header(efx);
167 respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE); 285 spin_unlock_bh(&mcdi->iface_lock);
168 error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
169
170 if (error && mcdi->resplen == 0) {
171 netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
172 rc = EIO;
173 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
174 netif_err(efx, hw, efx->net_dev,
175 "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
176 respseq, mcdi->seqno);
177 rc = EIO;
178 } else if (error) {
179 efx_readd(efx, &reg, pdu + 4);
180 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
181#define TRANSLATE_ERROR(name) \
182 case MC_CMD_ERR_ ## name: \
183 rc = name; \
184 break
185 TRANSLATE_ERROR(ENOENT);
186 TRANSLATE_ERROR(EINTR);
187 TRANSLATE_ERROR(EACCES);
188 TRANSLATE_ERROR(EBUSY);
189 TRANSLATE_ERROR(EINVAL);
190 TRANSLATE_ERROR(EDEADLK);
191 TRANSLATE_ERROR(ENOSYS);
192 TRANSLATE_ERROR(ETIME);
193#undef TRANSLATE_ERROR
194 default:
195 rc = EIO;
196 break;
197 }
198 } else
199 rc = 0;
200
201out:
202 mcdi->resprc = rc;
203 if (rc)
204 mcdi->resplen = 0;
205 286
206 /* Return rc=0 like wait_event_timeout() */ 287 /* Return rc=0 like wait_event_timeout() */
207 return 0; 288 return 0;
@@ -212,52 +293,36 @@ out:
212 */ 293 */
213int efx_mcdi_poll_reboot(struct efx_nic *efx) 294int efx_mcdi_poll_reboot(struct efx_nic *efx)
214{ 295{
215 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx); 296 if (!efx->mcdi)
216 efx_dword_t reg;
217 uint32_t value;
218
219 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
220 return false;
221
222 efx_readd(efx, &reg, addr);
223 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
224
225 if (value == 0)
226 return 0; 297 return 0;
227 298
228 /* MAC statistics have been cleared on the NIC; clear our copy 299 return efx->type->mcdi_poll_reboot(efx);
229 * so that efx_update_diff_stat() can continue to work. 300}
230 */
231 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
232
233 EFX_ZERO_DWORD(reg);
234 efx_writed(efx, &reg, addr);
235 301
236 if (value == MC_STATUS_DWORD_ASSERT) 302static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
237 return -EINTR; 303{
238 else 304 return cmpxchg(&mcdi->state,
239 return -EIO; 305 MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
306 MCDI_STATE_QUIESCENT;
240} 307}
241 308
242static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) 309static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
243{ 310{
244 /* Wait until the interface becomes QUIESCENT and we win the race 311 /* Wait until the interface becomes QUIESCENT and we win the race
245 * to mark it RUNNING. */ 312 * to mark it RUNNING_SYNC.
313 */
246 wait_event(mcdi->wq, 314 wait_event(mcdi->wq,
247 atomic_cmpxchg(&mcdi->state, 315 cmpxchg(&mcdi->state,
248 MCDI_STATE_QUIESCENT, 316 MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
249 MCDI_STATE_RUNNING) 317 MCDI_STATE_QUIESCENT);
250 == MCDI_STATE_QUIESCENT);
251} 318}
252 319
253static int efx_mcdi_await_completion(struct efx_nic *efx) 320static int efx_mcdi_await_completion(struct efx_nic *efx)
254{ 321{
255 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 322 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
256 323
257 if (wait_event_timeout( 324 if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
258 mcdi->wq, 325 MCDI_RPC_TIMEOUT) == 0)
259 atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
260 MCDI_RPC_TIMEOUT) == 0)
261 return -ETIMEDOUT; 326 return -ETIMEDOUT;
262 327
263 /* Check if efx_mcdi_set_mode() switched us back to polled completions. 328 /* Check if efx_mcdi_set_mode() switched us back to polled completions.
@@ -274,17 +339,14 @@ static int efx_mcdi_await_completion(struct efx_nic *efx)
274 return 0; 339 return 0;
275} 340}
276 341
277static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) 342/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
343 * requester. Return whether this was done. Does not take any locks.
344 */
345static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
278{ 346{
279 /* If the interface is RUNNING, then move to COMPLETED and wake any 347 if (cmpxchg(&mcdi->state,
280 * waiters. If the interface isn't in RUNNING then we've received a 348 MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
281 * duplicate completion after we've already transitioned back to 349 MCDI_STATE_RUNNING_SYNC) {
282 * QUIESCENT. [A subsequent invocation would increment seqno, so would
283 * have failed the seqno check].
284 */
285 if (atomic_cmpxchg(&mcdi->state,
286 MCDI_STATE_RUNNING,
287 MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
288 wake_up(&mcdi->wq); 350 wake_up(&mcdi->wq);
289 return true; 351 return true;
290 } 352 }
@@ -294,12 +356,93 @@ static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
294 356
295static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) 357static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
296{ 358{
297 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); 359 if (mcdi->mode == MCDI_MODE_EVENTS) {
360 struct efx_mcdi_async_param *async;
361 struct efx_nic *efx = mcdi->efx;
362
363 /* Process the asynchronous request queue */
364 spin_lock_bh(&mcdi->async_lock);
365 async = list_first_entry_or_null(
366 &mcdi->async_list, struct efx_mcdi_async_param, list);
367 if (async) {
368 mcdi->state = MCDI_STATE_RUNNING_ASYNC;
369 efx_mcdi_send_request(efx, async->cmd,
370 (const efx_dword_t *)(async + 1),
371 async->inlen);
372 mod_timer(&mcdi->async_timer,
373 jiffies + MCDI_RPC_TIMEOUT);
374 }
375 spin_unlock_bh(&mcdi->async_lock);
376
377 if (async)
378 return;
379 }
380
381 mcdi->state = MCDI_STATE_QUIESCENT;
298 wake_up(&mcdi->wq); 382 wake_up(&mcdi->wq);
299} 383}
300 384
385/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
386 * asynchronous completion function, and release the interface.
387 * Return whether this was done. Must be called in bh-disabled
388 * context. Will take iface_lock and async_lock.
389 */
390static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
391{
392 struct efx_nic *efx = mcdi->efx;
393 struct efx_mcdi_async_param *async;
394 size_t hdr_len, data_len;
395 efx_dword_t *outbuf;
396 int rc;
397
398 if (cmpxchg(&mcdi->state,
399 MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
400 MCDI_STATE_RUNNING_ASYNC)
401 return false;
402
403 spin_lock(&mcdi->iface_lock);
404 if (timeout) {
405 /* Ensure that if the completion event arrives later,
406 * the seqno check in efx_mcdi_ev_cpl() will fail
407 */
408 ++mcdi->seqno;
409 ++mcdi->credits;
410 rc = -ETIMEDOUT;
411 hdr_len = 0;
412 data_len = 0;
413 } else {
414 rc = mcdi->resprc;
415 hdr_len = mcdi->resp_hdr_len;
416 data_len = mcdi->resp_data_len;
417 }
418 spin_unlock(&mcdi->iface_lock);
419
420 /* Stop the timer. In case the timer function is running, we
421 * must wait for it to return so that there is no possibility
422 * of it aborting the next request.
423 */
424 if (!timeout)
425 del_timer_sync(&mcdi->async_timer);
426
427 spin_lock(&mcdi->async_lock);
428 async = list_first_entry(&mcdi->async_list,
429 struct efx_mcdi_async_param, list);
430 list_del(&async->list);
431 spin_unlock(&mcdi->async_lock);
432
433 outbuf = (efx_dword_t *)(async + 1);
434 efx->type->mcdi_read_response(efx, outbuf, hdr_len,
435 min(async->outlen, data_len));
436 async->complete(efx, async->cookie, rc, outbuf, data_len);
437 kfree(async);
438
439 efx_mcdi_release(mcdi);
440
441 return true;
442}
443
301static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, 444static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
302 unsigned int datalen, unsigned int errno) 445 unsigned int datalen, unsigned int mcdi_err)
303{ 446{
304 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 447 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
305 bool wake = false; 448 bool wake = false;
@@ -315,52 +458,161 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
315 "MC response mismatch tx seq 0x%x rx " 458 "MC response mismatch tx seq 0x%x rx "
316 "seq 0x%x\n", seqno, mcdi->seqno); 459 "seq 0x%x\n", seqno, mcdi->seqno);
317 } else { 460 } else {
318 mcdi->resprc = errno; 461 if (efx->type->mcdi_max_ver >= 2) {
319 mcdi->resplen = datalen; 462 /* MCDI v2 responses don't fit in an event */
463 efx_mcdi_read_response_header(efx);
464 } else {
465 mcdi->resprc = efx_mcdi_errno(mcdi_err);
466 mcdi->resp_hdr_len = 4;
467 mcdi->resp_data_len = datalen;
468 }
320 469
321 wake = true; 470 wake = true;
322 } 471 }
323 472
324 spin_unlock(&mcdi->iface_lock); 473 spin_unlock(&mcdi->iface_lock);
325 474
326 if (wake) 475 if (wake) {
327 efx_mcdi_complete(mcdi); 476 if (!efx_mcdi_complete_async(mcdi, false))
477 (void) efx_mcdi_complete_sync(mcdi);
478
479 /* If the interface isn't RUNNING_ASYNC or
480 * RUNNING_SYNC then we've received a duplicate
481 * completion after we've already transitioned back to
482 * QUIESCENT. [A subsequent invocation would increment
483 * seqno, so would have failed the seqno check].
484 */
485 }
486}
487
488static void efx_mcdi_timeout_async(unsigned long context)
489{
490 struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context;
491
492 efx_mcdi_complete_async(mcdi, true);
493}
494
495static int
496efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
497{
498 if (efx->type->mcdi_max_ver < 0 ||
499 (efx->type->mcdi_max_ver < 2 &&
500 cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
501 return -EINVAL;
502
503 if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
504 (efx->type->mcdi_max_ver < 2 &&
505 inlen > MCDI_CTL_SDU_LEN_MAX_V1))
506 return -EMSGSIZE;
507
508 return 0;
328} 509}
329 510
330int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, 511int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
331 const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, 512 const efx_dword_t *inbuf, size_t inlen,
513 efx_dword_t *outbuf, size_t outlen,
332 size_t *outlen_actual) 514 size_t *outlen_actual)
333{ 515{
334 efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); 516 int rc;
517
518 rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
519 if (rc)
520 return rc;
335 return efx_mcdi_rpc_finish(efx, cmd, inlen, 521 return efx_mcdi_rpc_finish(efx, cmd, inlen,
336 outbuf, outlen, outlen_actual); 522 outbuf, outlen, outlen_actual);
337} 523}
338 524
339void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, 525int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
340 size_t inlen) 526 const efx_dword_t *inbuf, size_t inlen)
341{ 527{
342 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 528 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
529 int rc;
343 530
344 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); 531 rc = efx_mcdi_check_supported(efx, cmd, inlen);
532 if (rc)
533 return rc;
345 534
346 efx_mcdi_acquire(mcdi); 535 efx_mcdi_acquire_sync(mcdi);
536 efx_mcdi_send_request(efx, cmd, inbuf, inlen);
537 return 0;
538}
347 539
348 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ 540/**
349 spin_lock_bh(&mcdi->iface_lock); 541 * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
350 ++mcdi->seqno; 542 * @efx: NIC through which to issue the command
351 spin_unlock_bh(&mcdi->iface_lock); 543 * @cmd: Command type number
544 * @inbuf: Command parameters
545 * @inlen: Length of command parameters, in bytes
546 * @outlen: Length to allocate for response buffer, in bytes
547 * @complete: Function to be called on completion or cancellation.
548 * @cookie: Arbitrary value to be passed to @complete.
549 *
550 * This function does not sleep and therefore may be called in atomic
551 * context. It will fail if event queues are disabled or if MCDI
552 * event completions have been disabled due to an error.
553 *
554 * If it succeeds, the @complete function will be called exactly once
555 * in atomic context, when one of the following occurs:
556 * (a) the completion event is received (in NAPI context)
557 * (b) event queues are disabled (in the process that disables them)
558 * (c) the request times-out (in timer context)
559 */
560int
561efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
562 const efx_dword_t *inbuf, size_t inlen, size_t outlen,
563 efx_mcdi_async_completer *complete, unsigned long cookie)
564{
565 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
566 struct efx_mcdi_async_param *async;
567 int rc;
568
569 rc = efx_mcdi_check_supported(efx, cmd, inlen);
570 if (rc)
571 return rc;
572
573 async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
574 GFP_ATOMIC);
575 if (!async)
576 return -ENOMEM;
577
578 async->cmd = cmd;
579 async->inlen = inlen;
580 async->outlen = outlen;
581 async->complete = complete;
582 async->cookie = cookie;
583 memcpy(async + 1, inbuf, inlen);
584
585 spin_lock_bh(&mcdi->async_lock);
586
587 if (mcdi->mode == MCDI_MODE_EVENTS) {
588 list_add_tail(&async->list, &mcdi->async_list);
589
590 /* If this is at the front of the queue, try to start it
591 * immediately
592 */
593 if (mcdi->async_list.next == &async->list &&
594 efx_mcdi_acquire_async(mcdi)) {
595 efx_mcdi_send_request(efx, cmd, inbuf, inlen);
596 mod_timer(&mcdi->async_timer,
597 jiffies + MCDI_RPC_TIMEOUT);
598 }
599 } else {
600 kfree(async);
601 rc = -ENETDOWN;
602 }
603
604 spin_unlock_bh(&mcdi->async_lock);
352 605
353 efx_mcdi_copyin(efx, cmd, inbuf, inlen); 606 return rc;
354} 607}
355 608
356int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, 609int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
357 u8 *outbuf, size_t outlen, size_t *outlen_actual) 610 efx_dword_t *outbuf, size_t outlen,
611 size_t *outlen_actual)
358{ 612{
359 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 613 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
360 int rc; 614 int rc;
361 615
362 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
363
364 if (mcdi->mode == MCDI_MODE_POLL) 616 if (mcdi->mode == MCDI_MODE_POLL)
365 rc = efx_mcdi_poll(efx); 617 rc = efx_mcdi_poll(efx);
366 else 618 else
@@ -380,22 +632,25 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
380 "MC command 0x%x inlen %d mode %d timed out\n", 632 "MC command 0x%x inlen %d mode %d timed out\n",
381 cmd, (int)inlen, mcdi->mode); 633 cmd, (int)inlen, mcdi->mode);
382 } else { 634 } else {
383 size_t resplen; 635 size_t hdr_len, data_len;
384 636
385 /* At the very least we need a memory barrier here to ensure 637 /* At the very least we need a memory barrier here to ensure
386 * we pick up changes from efx_mcdi_ev_cpl(). Protect against 638 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
387 * a spurious efx_mcdi_ev_cpl() running concurrently by 639 * a spurious efx_mcdi_ev_cpl() running concurrently by
388 * acquiring the iface_lock. */ 640 * acquiring the iface_lock. */
389 spin_lock_bh(&mcdi->iface_lock); 641 spin_lock_bh(&mcdi->iface_lock);
390 rc = -mcdi->resprc; 642 rc = mcdi->resprc;
391 resplen = mcdi->resplen; 643 hdr_len = mcdi->resp_hdr_len;
644 data_len = mcdi->resp_data_len;
392 spin_unlock_bh(&mcdi->iface_lock); 645 spin_unlock_bh(&mcdi->iface_lock);
393 646
647 BUG_ON(rc > 0);
648
394 if (rc == 0) { 649 if (rc == 0) {
395 efx_mcdi_copyout(efx, outbuf, 650 efx->type->mcdi_read_response(efx, outbuf, hdr_len,
396 min(outlen, mcdi->resplen + 3) & ~0x3); 651 min(outlen, data_len));
397 if (outlen_actual != NULL) 652 if (outlen_actual != NULL)
398 *outlen_actual = resplen; 653 *outlen_actual = data_len;
399 } else if (cmd == MC_CMD_REBOOT && rc == -EIO) 654 } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
400 ; /* Don't reset if MC_CMD_REBOOT returns EIO */ 655 ; /* Don't reset if MC_CMD_REBOOT returns EIO */
401 else if (rc == -EIO || rc == -EINTR) { 656 else if (rc == -EIO || rc == -EINTR) {
@@ -410,6 +665,7 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
410 if (rc == -EIO || rc == -EINTR) { 665 if (rc == -EIO || rc == -EINTR) {
411 msleep(MCDI_STATUS_SLEEP_MS); 666 msleep(MCDI_STATUS_SLEEP_MS);
412 efx_mcdi_poll_reboot(efx); 667 efx_mcdi_poll_reboot(efx);
668 mcdi->new_epoch = true;
413 } 669 }
414 } 670 }
415 671
@@ -417,11 +673,15 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
417 return rc; 673 return rc;
418} 674}
419 675
676/* Switch to polled MCDI completions. This can be called in various
677 * error conditions with various locks held, so it must be lockless.
678 * Caller is responsible for flushing asynchronous requests later.
679 */
420void efx_mcdi_mode_poll(struct efx_nic *efx) 680void efx_mcdi_mode_poll(struct efx_nic *efx)
421{ 681{
422 struct efx_mcdi_iface *mcdi; 682 struct efx_mcdi_iface *mcdi;
423 683
424 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 684 if (!efx->mcdi)
425 return; 685 return;
426 686
427 mcdi = efx_mcdi(efx); 687 mcdi = efx_mcdi(efx);
@@ -434,18 +694,57 @@ void efx_mcdi_mode_poll(struct efx_nic *efx)
434 * efx_mcdi_await_completion() will then call efx_mcdi_poll(). 694 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
435 * 695 *
436 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), 696 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
437 * which efx_mcdi_complete() provides for us. 697 * which efx_mcdi_complete_sync() provides for us.
438 */ 698 */
439 mcdi->mode = MCDI_MODE_POLL; 699 mcdi->mode = MCDI_MODE_POLL;
440 700
441 efx_mcdi_complete(mcdi); 701 efx_mcdi_complete_sync(mcdi);
702}
703
704/* Flush any running or queued asynchronous requests, after event processing
705 * is stopped
706 */
707void efx_mcdi_flush_async(struct efx_nic *efx)
708{
709 struct efx_mcdi_async_param *async, *next;
710 struct efx_mcdi_iface *mcdi;
711
712 if (!efx->mcdi)
713 return;
714
715 mcdi = efx_mcdi(efx);
716
717 /* We must be in polling mode so no more requests can be queued */
718 BUG_ON(mcdi->mode != MCDI_MODE_POLL);
719
720 del_timer_sync(&mcdi->async_timer);
721
722 /* If a request is still running, make sure we give the MC
723 * time to complete it so that the response won't overwrite our
724 * next request.
725 */
726 if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
727 efx_mcdi_poll(efx);
728 mcdi->state = MCDI_STATE_QUIESCENT;
729 }
730
731 /* Nothing else will access the async list now, so it is safe
732 * to walk it without holding async_lock. If we hold it while
733 * calling a completer then lockdep may warn that we have
734 * acquired locks in the wrong order.
735 */
736 list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
737 async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
738 list_del(&async->list);
739 kfree(async);
740 }
442} 741}
443 742
444void efx_mcdi_mode_event(struct efx_nic *efx) 743void efx_mcdi_mode_event(struct efx_nic *efx)
445{ 744{
446 struct efx_mcdi_iface *mcdi; 745 struct efx_mcdi_iface *mcdi;
447 746
448 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 747 if (!efx->mcdi)
449 return; 748 return;
450 749
451 mcdi = efx_mcdi(efx); 750 mcdi = efx_mcdi(efx);
@@ -460,7 +759,7 @@ void efx_mcdi_mode_event(struct efx_nic *efx)
460 * write memory barrier ensure that efx_mcdi_rpc() sees it, which 759 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
461 * efx_mcdi_acquire() provides. 760 * efx_mcdi_acquire() provides.
462 */ 761 */
463 efx_mcdi_acquire(mcdi); 762 efx_mcdi_acquire_sync(mcdi);
464 mcdi->mode = MCDI_MODE_EVENTS; 763 mcdi->mode = MCDI_MODE_EVENTS;
465 efx_mcdi_release(mcdi); 764 efx_mcdi_release(mcdi);
466} 765}
@@ -477,19 +776,25 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
477 * are sent to the same queue, we can't be racing with 776 * are sent to the same queue, we can't be racing with
478 * efx_mcdi_ev_cpl()] 777 * efx_mcdi_ev_cpl()]
479 * 778 *
480 * There's a race here with efx_mcdi_rpc(), because we might receive 779 * If there is an outstanding asynchronous request, we can't
481 * a REBOOT event *before* the request has been copied out. In polled 780 * complete it now (efx_mcdi_complete() would deadlock). The
482 * mode (during startup) this is irrelevant, because efx_mcdi_complete() 781 * reset process will take care of this.
483 * is ignored. In event mode, this condition is just an edge-case of 782 *
484 * receiving a REBOOT event after posting the MCDI request. Did the mc 783 * There's a race here with efx_mcdi_send_request(), because
485 * reboot before or after the copyout? The best we can do always is 784 * we might receive a REBOOT event *before* the request has
486 * just return failure. 785 * been copied out. In polled mode (during startup) this is
786 * irrelevant, because efx_mcdi_complete_sync() is ignored. In
787 * event mode, this condition is just an edge-case of
788 * receiving a REBOOT event after posting the MCDI
789 * request. Did the mc reboot before or after the copyout? The
790 * best we can do always is just return failure.
487 */ 791 */
488 spin_lock(&mcdi->iface_lock); 792 spin_lock(&mcdi->iface_lock);
489 if (efx_mcdi_complete(mcdi)) { 793 if (efx_mcdi_complete_sync(mcdi)) {
490 if (mcdi->mode == MCDI_MODE_EVENTS) { 794 if (mcdi->mode == MCDI_MODE_EVENTS) {
491 mcdi->resprc = rc; 795 mcdi->resprc = rc;
492 mcdi->resplen = 0; 796 mcdi->resp_hdr_len = 0;
797 mcdi->resp_data_len = 0;
493 ++mcdi->credits; 798 ++mcdi->credits;
494 } 799 }
495 } else { 800 } else {
@@ -504,41 +809,12 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
504 break; 809 break;
505 udelay(MCDI_STATUS_DELAY_US); 810 udelay(MCDI_STATUS_DELAY_US);
506 } 811 }
812 mcdi->new_epoch = true;
507 } 813 }
508 814
509 spin_unlock(&mcdi->iface_lock); 815 spin_unlock(&mcdi->iface_lock);
510} 816}
511 817
512static unsigned int efx_mcdi_event_link_speed[] = {
513 [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
514 [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
515 [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
516};
517
518
519static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
520{
521 u32 flags, fcntl, speed, lpa;
522
523 speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
524 EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
525 speed = efx_mcdi_event_link_speed[speed];
526
527 flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
528 fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
529 lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
530
531 /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
532 * which is only run after flushing the event queues. Therefore, it
533 * is safe to modify the link state outside of the mac_lock here.
534 */
535 efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
536
537 efx_mcdi_phy_check_fcntl(efx, lpa);
538
539 efx_link_status_changed(efx);
540}
541
542/* Called from falcon_process_eventq for MCDI events */ 818/* Called from falcon_process_eventq for MCDI events */
543void efx_mcdi_process_event(struct efx_channel *channel, 819void efx_mcdi_process_event(struct efx_channel *channel,
544 efx_qword_t *event) 820 efx_qword_t *event)
@@ -551,7 +827,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
551 case MCDI_EVENT_CODE_BADSSERT: 827 case MCDI_EVENT_CODE_BADSSERT:
552 netif_err(efx, hw, efx->net_dev, 828 netif_err(efx, hw, efx->net_dev,
553 "MC watchdog or assertion failure at 0x%x\n", data); 829 "MC watchdog or assertion failure at 0x%x\n", data);
554 efx_mcdi_ev_death(efx, EINTR); 830 efx_mcdi_ev_death(efx, -EINTR);
555 break; 831 break;
556 832
557 case MCDI_EVENT_CODE_PMNOTICE: 833 case MCDI_EVENT_CODE_PMNOTICE:
@@ -576,8 +852,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
576 "MC Scheduler error address=0x%x\n", data); 852 "MC Scheduler error address=0x%x\n", data);
577 break; 853 break;
578 case MCDI_EVENT_CODE_REBOOT: 854 case MCDI_EVENT_CODE_REBOOT:
855 case MCDI_EVENT_CODE_MC_REBOOT:
579 netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); 856 netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
580 efx_mcdi_ev_death(efx, EIO); 857 efx_mcdi_ev_death(efx, -EIO);
581 break; 858 break;
582 case MCDI_EVENT_CODE_MAC_STATS_DMA: 859 case MCDI_EVENT_CODE_MAC_STATS_DMA:
583 /* MAC stats are gather lazily. We can ignore this. */ 860 /* MAC stats are gather lazily. We can ignore this. */
@@ -590,7 +867,27 @@ void efx_mcdi_process_event(struct efx_channel *channel,
590 case MCDI_EVENT_CODE_PTP_PPS: 867 case MCDI_EVENT_CODE_PTP_PPS:
591 efx_ptp_event(efx, event); 868 efx_ptp_event(efx, event);
592 break; 869 break;
593 870 case MCDI_EVENT_CODE_TX_FLUSH:
871 case MCDI_EVENT_CODE_RX_FLUSH:
872 /* Two flush events will be sent: one to the same event
873 * queue as completions, and one to event queue 0.
874 * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
875 * flag will be set, and we should ignore the event
876 * because we want to wait for all completions.
877 */
878 BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
879 MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
880 if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
881 efx_ef10_handle_drain_event(efx);
882 break;
883 case MCDI_EVENT_CODE_TX_ERR:
884 case MCDI_EVENT_CODE_RX_ERR:
885 netif_err(efx, hw, efx->net_dev,
886 "%s DMA error (event: "EFX_QWORD_FMT")\n",
887 code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
888 EFX_QWORD_VAL(*event));
889 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
890 break;
594 default: 891 default:
595 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", 892 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
596 code); 893 code);
@@ -606,27 +903,55 @@ void efx_mcdi_process_event(struct efx_channel *channel,
606 903
607void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) 904void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
608{ 905{
609 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_OUT_LEN, 4)]; 906 MCDI_DECLARE_BUF(outbuf,
907 max(MC_CMD_GET_VERSION_OUT_LEN,
908 MC_CMD_GET_CAPABILITIES_OUT_LEN));
610 size_t outlength; 909 size_t outlength;
611 const __le16 *ver_words; 910 const __le16 *ver_words;
911 size_t offset;
612 int rc; 912 int rc;
613 913
614 BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); 914 BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
615
616 rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, 915 rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
617 outbuf, sizeof(outbuf), &outlength); 916 outbuf, sizeof(outbuf), &outlength);
618 if (rc) 917 if (rc)
619 goto fail; 918 goto fail;
620
621 if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { 919 if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
622 rc = -EIO; 920 rc = -EIO;
623 goto fail; 921 goto fail;
624 } 922 }
625 923
626 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); 924 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
627 snprintf(buf, len, "%u.%u.%u.%u", 925 offset = snprintf(buf, len, "%u.%u.%u.%u",
628 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), 926 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
629 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); 927 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
928
929 /* EF10 may have multiple datapath firmware variants within a
930 * single version. Report which variants are running.
931 */
932 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
933 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
934 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
935 outbuf, sizeof(outbuf), &outlength);
936 if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN)
937 offset += snprintf(
938 buf + offset, len - offset, " rx? tx?");
939 else
940 offset += snprintf(
941 buf + offset, len - offset, " rx%x tx%x",
942 MCDI_WORD(outbuf,
943 GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID),
944 MCDI_WORD(outbuf,
945 GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID));
946
947 /* It's theoretically possible for the string to exceed 31
948 * characters, though in practice the first three version
949 * components are short enough that this doesn't happen.
950 */
951 if (WARN_ON(offset >= len))
952 buf[0] = 0;
953 }
954
630 return; 955 return;
631 956
632fail: 957fail:
@@ -634,17 +959,18 @@ fail:
634 buf[0] = 0; 959 buf[0] = 0;
635} 960}
636 961
637int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 962static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
638 bool *was_attached) 963 bool *was_attached)
639{ 964{
640 u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN]; 965 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
641 u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN]; 966 MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
642 size_t outlen; 967 size_t outlen;
643 int rc; 968 int rc;
644 969
645 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, 970 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
646 driver_operating ? 1 : 0); 971 driver_operating ? 1 : 0);
647 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); 972 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
973 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
648 974
649 rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), 975 rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
650 outbuf, sizeof(outbuf), &outlen); 976 outbuf, sizeof(outbuf), &outlen);
@@ -667,8 +993,8 @@ fail:
667int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 993int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
668 u16 *fw_subtype_list, u32 *capabilities) 994 u16 *fw_subtype_list, u32 *capabilities)
669{ 995{
670 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMAX]; 996 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
671 size_t outlen, offset, i; 997 size_t outlen, i;
672 int port_num = efx_port_num(efx); 998 int port_num = efx_port_num(efx);
673 int rc; 999 int rc;
674 1000
@@ -684,22 +1010,21 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
684 goto fail; 1010 goto fail;
685 } 1011 }
686 1012
687 offset = (port_num)
688 ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST
689 : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
690 if (mac_address) 1013 if (mac_address)
691 memcpy(mac_address, outbuf + offset, ETH_ALEN); 1014 memcpy(mac_address,
1015 port_num ?
1016 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
1017 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
1018 ETH_ALEN);
692 if (fw_subtype_list) { 1019 if (fw_subtype_list) {
693 /* Byte-swap and truncate or zero-pad as necessary */
694 offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST;
695 for (i = 0; 1020 for (i = 0;
696 i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; 1021 i < MCDI_VAR_ARRAY_LEN(outlen,
697 i++) { 1022 GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
698 fw_subtype_list[i] = 1023 i++)
699 (offset + 2 <= outlen) ? 1024 fw_subtype_list[i] = MCDI_ARRAY_WORD(
700 le16_to_cpup((__le16 *)(outbuf + offset)) : 0; 1025 outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
701 offset += 2; 1026 for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
702 } 1027 fw_subtype_list[i] = 0;
703 } 1028 }
704 if (capabilities) { 1029 if (capabilities) {
705 if (port_num) 1030 if (port_num)
@@ -721,7 +1046,7 @@ fail:
721 1046
722int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) 1047int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
723{ 1048{
724 u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN]; 1049 MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
725 u32 dest = 0; 1050 u32 dest = 0;
726 int rc; 1051 int rc;
727 1052
@@ -749,7 +1074,7 @@ fail:
749 1074
750int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) 1075int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
751{ 1076{
752 u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN]; 1077 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
753 size_t outlen; 1078 size_t outlen;
754 int rc; 1079 int rc;
755 1080
@@ -777,8 +1102,8 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
777 size_t *size_out, size_t *erase_size_out, 1102 size_t *size_out, size_t *erase_size_out,
778 bool *protected_out) 1103 bool *protected_out)
779{ 1104{
780 u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN]; 1105 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
781 u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN]; 1106 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
782 size_t outlen; 1107 size_t outlen;
783 int rc; 1108 int rc;
784 1109
@@ -804,127 +1129,10 @@ fail:
804 return rc; 1129 return rc;
805} 1130}
806 1131
807int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
808{
809 u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN];
810 int rc;
811
812 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
813
814 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
815
816 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
817 NULL, 0, NULL);
818 if (rc)
819 goto fail;
820
821 return 0;
822
823fail:
824 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
825 return rc;
826}
827
828int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
829 loff_t offset, u8 *buffer, size_t length)
830{
831 u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
832 u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
833 size_t outlen;
834 int rc;
835
836 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
837 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
838 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
839
840 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
841 outbuf, sizeof(outbuf), &outlen);
842 if (rc)
843 goto fail;
844
845 memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
846 return 0;
847
848fail:
849 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
850 return rc;
851}
852
853int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
854 loff_t offset, const u8 *buffer, size_t length)
855{
856 u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
857 int rc;
858
859 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
860 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
861 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
862 memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
863
864 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
865
866 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
867 ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
868 NULL, 0, NULL);
869 if (rc)
870 goto fail;
871
872 return 0;
873
874fail:
875 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
876 return rc;
877}
878
879int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
880 loff_t offset, size_t length)
881{
882 u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN];
883 int rc;
884
885 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
886 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
887 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
888
889 BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
890
891 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
892 NULL, 0, NULL);
893 if (rc)
894 goto fail;
895
896 return 0;
897
898fail:
899 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
900 return rc;
901}
902
903int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
904{
905 u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN];
906 int rc;
907
908 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
909
910 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
911
912 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
913 NULL, 0, NULL);
914 if (rc)
915 goto fail;
916
917 return 0;
918
919fail:
920 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
921 return rc;
922}
923
924static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) 1132static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
925{ 1133{
926 u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN]; 1134 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
927 u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN]; 1135 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
928 int rc; 1136 int rc;
929 1137
930 MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); 1138 MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
@@ -976,9 +1184,9 @@ fail1:
976 1184
977static int efx_mcdi_read_assertion(struct efx_nic *efx) 1185static int efx_mcdi_read_assertion(struct efx_nic *efx)
978{ 1186{
979 u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN]; 1187 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
980 u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN]; 1188 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
981 unsigned int flags, index, ofst; 1189 unsigned int flags, index;
982 const char *reason; 1190 const char *reason;
983 size_t outlen; 1191 size_t outlen;
984 int retry; 1192 int retry;
@@ -1020,19 +1228,20 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
1020 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); 1228 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
1021 1229
1022 /* Print out the registers */ 1230 /* Print out the registers */
1023 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; 1231 for (index = 0;
1024 for (index = 1; index < 32; index++) { 1232 index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
1025 netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index, 1233 index++)
1026 MCDI_DWORD2(outbuf, ofst)); 1234 netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
1027 ofst += sizeof(efx_dword_t); 1235 1 + index,
1028 } 1236 MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
1237 index));
1029 1238
1030 return 0; 1239 return 0;
1031} 1240}
1032 1241
1033static void efx_mcdi_exit_assertion(struct efx_nic *efx) 1242static void efx_mcdi_exit_assertion(struct efx_nic *efx)
1034{ 1243{
1035 u8 inbuf[MC_CMD_REBOOT_IN_LEN]; 1244 MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
1036 1245
1037 /* If the MC is running debug firmware, it might now be 1246 /* If the MC is running debug firmware, it might now be
1038 * waiting for a debugger to attach, but we just want it to 1247 * waiting for a debugger to attach, but we just want it to
@@ -1062,7 +1271,7 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
1062 1271
1063void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) 1272void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1064{ 1273{
1065 u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN]; 1274 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
1066 int rc; 1275 int rc;
1067 1276
1068 BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); 1277 BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
@@ -1080,7 +1289,7 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1080 __func__, rc); 1289 __func__, rc);
1081} 1290}
1082 1291
1083int efx_mcdi_reset_port(struct efx_nic *efx) 1292static int efx_mcdi_reset_port(struct efx_nic *efx)
1084{ 1293{
1085 int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL); 1294 int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
1086 if (rc) 1295 if (rc)
@@ -1089,9 +1298,9 @@ int efx_mcdi_reset_port(struct efx_nic *efx)
1089 return rc; 1298 return rc;
1090} 1299}
1091 1300
1092int efx_mcdi_reset_mc(struct efx_nic *efx) 1301static int efx_mcdi_reset_mc(struct efx_nic *efx)
1093{ 1302{
1094 u8 inbuf[MC_CMD_REBOOT_IN_LEN]; 1303 MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
1095 int rc; 1304 int rc;
1096 1305
1097 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); 1306 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
@@ -1107,11 +1316,31 @@ int efx_mcdi_reset_mc(struct efx_nic *efx)
1107 return rc; 1316 return rc;
1108} 1317}
1109 1318
1319enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
1320{
1321 return RESET_TYPE_RECOVER_OR_ALL;
1322}
1323
1324int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
1325{
1326 int rc;
1327
1328 /* Recover from a failed assertion pre-reset */
1329 rc = efx_mcdi_handle_assertion(efx);
1330 if (rc)
1331 return rc;
1332
1333 if (method == RESET_TYPE_WORLD)
1334 return efx_mcdi_reset_mc(efx);
1335 else
1336 return efx_mcdi_reset_port(efx);
1337}
1338
1110static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, 1339static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1111 const u8 *mac, int *id_out) 1340 const u8 *mac, int *id_out)
1112{ 1341{
1113 u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN]; 1342 MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
1114 u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN]; 1343 MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
1115 size_t outlen; 1344 size_t outlen;
1116 int rc; 1345 int rc;
1117 1346
@@ -1151,7 +1380,7 @@ efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
1151 1380
1152int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) 1381int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1153{ 1382{
1154 u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN]; 1383 MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
1155 size_t outlen; 1384 size_t outlen;
1156 int rc; 1385 int rc;
1157 1386
@@ -1178,7 +1407,7 @@ fail:
1178 1407
1179int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) 1408int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
1180{ 1409{
1181 u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN]; 1410 MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
1182 int rc; 1411 int rc;
1183 1412
1184 MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); 1413 MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
@@ -1199,34 +1428,31 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx)
1199{ 1428{
1200 struct efx_channel *channel; 1429 struct efx_channel *channel;
1201 struct efx_rx_queue *rx_queue; 1430 struct efx_rx_queue *rx_queue;
1202 __le32 *qid; 1431 MCDI_DECLARE_BUF(inbuf,
1432 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
1203 int rc, count; 1433 int rc, count;
1204 1434
1205 BUILD_BUG_ON(EFX_MAX_CHANNELS > 1435 BUILD_BUG_ON(EFX_MAX_CHANNELS >
1206 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); 1436 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
1207 1437
1208 qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL);
1209 if (qid == NULL)
1210 return -ENOMEM;
1211
1212 count = 0; 1438 count = 0;
1213 efx_for_each_channel(channel, efx) { 1439 efx_for_each_channel(channel, efx) {
1214 efx_for_each_channel_rx_queue(rx_queue, channel) { 1440 efx_for_each_channel_rx_queue(rx_queue, channel) {
1215 if (rx_queue->flush_pending) { 1441 if (rx_queue->flush_pending) {
1216 rx_queue->flush_pending = false; 1442 rx_queue->flush_pending = false;
1217 atomic_dec(&efx->rxq_flush_pending); 1443 atomic_dec(&efx->rxq_flush_pending);
1218 qid[count++] = cpu_to_le32( 1444 MCDI_SET_ARRAY_DWORD(
1219 efx_rx_queue_index(rx_queue)); 1445 inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
1446 count, efx_rx_queue_index(rx_queue));
1447 count++;
1220 } 1448 }
1221 } 1449 }
1222 } 1450 }
1223 1451
1224 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid, 1452 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
1225 count * sizeof(*qid), NULL, 0, NULL); 1453 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
1226 WARN_ON(rc < 0); 1454 WARN_ON(rc < 0);
1227 1455
1228 kfree(qid);
1229
1230 return rc; 1456 return rc;
1231} 1457}
1232 1458
@@ -1245,3 +1471,247 @@ fail:
1245 return rc; 1471 return rc;
1246} 1472}
1247 1473
1474int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
1475{
1476 MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
1477
1478 BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
1479 MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
1480 MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
1481 return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
1482 NULL, 0, NULL);
1483}
1484
1485#ifdef CONFIG_SFC_MTD
1486
1487#define EFX_MCDI_NVRAM_LEN_MAX 128
1488
1489static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
1490{
1491 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
1492 int rc;
1493
1494 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
1495
1496 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
1497
1498 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
1499 NULL, 0, NULL);
1500 if (rc)
1501 goto fail;
1502
1503 return 0;
1504
1505fail:
1506 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1507 return rc;
1508}
1509
1510static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
1511 loff_t offset, u8 *buffer, size_t length)
1512{
1513 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
1514 MCDI_DECLARE_BUF(outbuf,
1515 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
1516 size_t outlen;
1517 int rc;
1518
1519 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
1520 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
1521 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
1522
1523 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
1524 outbuf, sizeof(outbuf), &outlen);
1525 if (rc)
1526 goto fail;
1527
1528 memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
1529 return 0;
1530
1531fail:
1532 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1533 return rc;
1534}
1535
1536static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
1537 loff_t offset, const u8 *buffer, size_t length)
1538{
1539 MCDI_DECLARE_BUF(inbuf,
1540 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
1541 int rc;
1542
1543 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
1544 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
1545 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
1546 memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
1547
1548 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
1549
1550 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
1551 ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
1552 NULL, 0, NULL);
1553 if (rc)
1554 goto fail;
1555
1556 return 0;
1557
1558fail:
1559 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1560 return rc;
1561}
1562
1563static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
1564 loff_t offset, size_t length)
1565{
1566 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
1567 int rc;
1568
1569 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
1570 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
1571 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
1572
1573 BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
1574
1575 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
1576 NULL, 0, NULL);
1577 if (rc)
1578 goto fail;
1579
1580 return 0;
1581
1582fail:
1583 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1584 return rc;
1585}
1586
1587static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
1588{
1589 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
1590 int rc;
1591
1592 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
1593
1594 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
1595
1596 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
1597 NULL, 0, NULL);
1598 if (rc)
1599 goto fail;
1600
1601 return 0;
1602
1603fail:
1604 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1605 return rc;
1606}
1607
1608int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
1609 size_t len, size_t *retlen, u8 *buffer)
1610{
1611 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1612 struct efx_nic *efx = mtd->priv;
1613 loff_t offset = start;
1614 loff_t end = min_t(loff_t, start + len, mtd->size);
1615 size_t chunk;
1616 int rc = 0;
1617
1618 while (offset < end) {
1619 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
1620 rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
1621 buffer, chunk);
1622 if (rc)
1623 goto out;
1624 offset += chunk;
1625 buffer += chunk;
1626 }
1627out:
1628 *retlen = offset - start;
1629 return rc;
1630}
1631
1632int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
1633{
1634 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1635 struct efx_nic *efx = mtd->priv;
1636 loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
1637 loff_t end = min_t(loff_t, start + len, mtd->size);
1638 size_t chunk = part->common.mtd.erasesize;
1639 int rc = 0;
1640
1641 if (!part->updating) {
1642 rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
1643 if (rc)
1644 goto out;
1645 part->updating = true;
1646 }
1647
1648 /* The MCDI interface can in fact do multiple erase blocks at once;
1649 * but erasing may be slow, so we make multiple calls here to avoid
1650 * tripping the MCDI RPC timeout. */
1651 while (offset < end) {
1652 rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
1653 chunk);
1654 if (rc)
1655 goto out;
1656 offset += chunk;
1657 }
1658out:
1659 return rc;
1660}
1661
1662int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
1663 size_t len, size_t *retlen, const u8 *buffer)
1664{
1665 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1666 struct efx_nic *efx = mtd->priv;
1667 loff_t offset = start;
1668 loff_t end = min_t(loff_t, start + len, mtd->size);
1669 size_t chunk;
1670 int rc = 0;
1671
1672 if (!part->updating) {
1673 rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
1674 if (rc)
1675 goto out;
1676 part->updating = true;
1677 }
1678
1679 while (offset < end) {
1680 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
1681 rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
1682 buffer, chunk);
1683 if (rc)
1684 goto out;
1685 offset += chunk;
1686 buffer += chunk;
1687 }
1688out:
1689 *retlen = offset - start;
1690 return rc;
1691}
1692
1693int efx_mcdi_mtd_sync(struct mtd_info *mtd)
1694{
1695 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1696 struct efx_nic *efx = mtd->priv;
1697 int rc = 0;
1698
1699 if (part->updating) {
1700 part->updating = false;
1701 rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
1702 }
1703
1704 return rc;
1705}
1706
1707void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
1708{
1709 struct efx_mcdi_mtd_partition *mcdi_part =
1710 container_of(part, struct efx_mcdi_mtd_partition, common);
1711 struct efx_nic *efx = part->mtd.priv;
1712
1713 snprintf(part->name, sizeof(part->name), "%s %s:%02x",
1714 efx->name, part->type_name, mcdi_part->fw_subtype);
1715}
1716
1717#endif /* CONFIG_SFC_MTD */
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 3ba2e5b5a9cc..c34d0d4e10ee 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2008-2010 Solarflare Communications Inc. 3 * Copyright 2008-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -11,18 +11,20 @@
11#define EFX_MCDI_H 11#define EFX_MCDI_H
12 12
13/** 13/**
14 * enum efx_mcdi_state 14 * enum efx_mcdi_state - MCDI request handling state
15 * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the 15 * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
16 * mcdi_lock then they are able to move to MCDI_STATE_RUNNING 16 * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING
17 * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that 17 * @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending.
18 * moved into this state is allowed to move out of it. 18 * Only the thread that moved into this state is allowed to move out of it.
19 * @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending.
19 * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread 20 * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
20 * has not yet consumed the result. For all other threads, equivalent to 21 * has not yet consumed the result. For all other threads, equivalent to
21 * MCDI_STATE_RUNNING. 22 * %MCDI_STATE_RUNNING.
22 */ 23 */
23enum efx_mcdi_state { 24enum efx_mcdi_state {
24 MCDI_STATE_QUIESCENT, 25 MCDI_STATE_QUIESCENT,
25 MCDI_STATE_RUNNING, 26 MCDI_STATE_RUNNING_SYNC,
27 MCDI_STATE_RUNNING_ASYNC,
26 MCDI_STATE_COMPLETED, 28 MCDI_STATE_COMPLETED,
27}; 29};
28 30
@@ -32,28 +34,39 @@ enum efx_mcdi_mode {
32}; 34};
33 35
34/** 36/**
35 * struct efx_mcdi_iface 37 * struct efx_mcdi_iface - MCDI protocol context
36 * @state: Interface state. Waited for by mcdi_wq. 38 * @efx: The associated NIC.
37 * @wq: Wait queue for threads waiting for state != STATE_RUNNING 39 * @state: Request handling state. Waited for by @wq.
38 * @iface_lock: Protects @credits, @seqno, @resprc, @resplen
39 * @mode: Poll for mcdi completion, or wait for an mcdi_event. 40 * @mode: Poll for mcdi completion, or wait for an mcdi_event.
40 * Serialised by @lock 41 * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
42 * @new_epoch: Indicates start of day or start of MC reboot recovery
43 * @iface_lock: Serialises access to @seqno, @credits and response metadata
41 * @seqno: The next sequence number to use for mcdi requests. 44 * @seqno: The next sequence number to use for mcdi requests.
42 * Serialised by @lock
43 * @credits: Number of spurious MCDI completion events allowed before we 45 * @credits: Number of spurious MCDI completion events allowed before we
44 * trigger a fatal error. Protected by @lock 46 * trigger a fatal error
45 * @resprc: Returned MCDI completion 47 * @resprc: Response error/success code (Linux numbering)
46 * @resplen: Returned payload length 48 * @resp_hdr_len: Response header length
49 * @resp_data_len: Response data (SDU or error) length
50 * @async_lock: Serialises access to @async_list while event processing is
51 * enabled
52 * @async_list: Queue of asynchronous requests
53 * @async_timer: Timer for asynchronous request timeout
47 */ 54 */
48struct efx_mcdi_iface { 55struct efx_mcdi_iface {
49 atomic_t state; 56 struct efx_nic *efx;
57 enum efx_mcdi_state state;
58 enum efx_mcdi_mode mode;
50 wait_queue_head_t wq; 59 wait_queue_head_t wq;
51 spinlock_t iface_lock; 60 spinlock_t iface_lock;
52 enum efx_mcdi_mode mode; 61 bool new_epoch;
53 unsigned int credits; 62 unsigned int credits;
54 unsigned int seqno; 63 unsigned int seqno;
55 unsigned int resprc; 64 int resprc;
56 size_t resplen; 65 size_t resp_hdr_len;
66 size_t resp_data_len;
67 spinlock_t async_lock;
68 struct list_head async_list;
69 struct timer_list async_timer;
57}; 70};
58 71
59struct efx_mcdi_mon { 72struct efx_mcdi_mon {
@@ -65,65 +78,204 @@ struct efx_mcdi_mon {
65 unsigned int n_attrs; 78 unsigned int n_attrs;
66}; 79};
67 80
68extern void efx_mcdi_init(struct efx_nic *efx); 81struct efx_mcdi_mtd_partition {
82 struct efx_mtd_partition common;
83 bool updating;
84 u16 nvram_type;
85 u16 fw_subtype;
86};
87
88#define to_efx_mcdi_mtd_partition(mtd) \
89 container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd)
90
91/**
92 * struct efx_mcdi_data - extra state for NICs that implement MCDI
93 * @iface: Interface/protocol state
94 * @hwmon: Hardware monitor state
95 */
96struct efx_mcdi_data {
97 struct efx_mcdi_iface iface;
98#ifdef CONFIG_SFC_MCDI_MON
99 struct efx_mcdi_mon hwmon;
100#endif
101};
102
103#ifdef CONFIG_SFC_MCDI_MON
104static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
105{
106 EFX_BUG_ON_PARANOID(!efx->mcdi);
107 return &efx->mcdi->hwmon;
108}
109#endif
110
111extern int efx_mcdi_init(struct efx_nic *efx);
112extern void efx_mcdi_fini(struct efx_nic *efx);
69 113
70extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, 114extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
71 size_t inlen, u8 *outbuf, size_t outlen, 115 const efx_dword_t *inbuf, size_t inlen,
116 efx_dword_t *outbuf, size_t outlen,
72 size_t *outlen_actual); 117 size_t *outlen_actual);
73 118
74extern void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, 119extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
75 const u8 *inbuf, size_t inlen); 120 const efx_dword_t *inbuf, size_t inlen);
76extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, 121extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
77 u8 *outbuf, size_t outlen, 122 efx_dword_t *outbuf, size_t outlen,
78 size_t *outlen_actual); 123 size_t *outlen_actual);
79 124
125typedef void efx_mcdi_async_completer(struct efx_nic *efx,
126 unsigned long cookie, int rc,
127 efx_dword_t *outbuf,
128 size_t outlen_actual);
129extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
130 const efx_dword_t *inbuf, size_t inlen,
131 size_t outlen,
132 efx_mcdi_async_completer *complete,
133 unsigned long cookie);
134
80extern int efx_mcdi_poll_reboot(struct efx_nic *efx); 135extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
81extern void efx_mcdi_mode_poll(struct efx_nic *efx); 136extern void efx_mcdi_mode_poll(struct efx_nic *efx);
82extern void efx_mcdi_mode_event(struct efx_nic *efx); 137extern void efx_mcdi_mode_event(struct efx_nic *efx);
138extern void efx_mcdi_flush_async(struct efx_nic *efx);
83 139
84extern void efx_mcdi_process_event(struct efx_channel *channel, 140extern void efx_mcdi_process_event(struct efx_channel *channel,
85 efx_qword_t *event); 141 efx_qword_t *event);
86extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); 142extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
87 143
88#define MCDI_PTR2(_buf, _ofst) \ 144/* We expect that 16- and 32-bit fields in MCDI requests and responses
89 (((u8 *)_buf) + _ofst) 145 * are appropriately aligned, but 64-bit fields are only
90#define MCDI_SET_DWORD2(_buf, _ofst, _value) \ 146 * 32-bit-aligned. Also, on Siena we must copy to the MC shared
91 EFX_POPULATE_DWORD_1(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \ 147 * memory strictly 32 bits at a time, so add any necessary padding.
92 EFX_DWORD_0, _value) 148 */
93#define MCDI_DWORD2(_buf, _ofst) \ 149#define MCDI_DECLARE_BUF(_name, _len) \
94 EFX_DWORD_FIELD(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \ 150 efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
95 EFX_DWORD_0) 151#define _MCDI_PTR(_buf, _offset) \
96#define MCDI_QWORD2(_buf, _ofst) \ 152 ((u8 *)(_buf) + (_offset))
97 EFX_QWORD_FIELD64(*((efx_qword_t *)MCDI_PTR2(_buf, _ofst)), \ 153#define MCDI_PTR(_buf, _field) \
98 EFX_QWORD_0) 154 _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
99 155#define _MCDI_CHECK_ALIGN(_ofst, _align) \
100#define MCDI_PTR(_buf, _ofst) \ 156 ((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
101 MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST) 157#define _MCDI_DWORD(_buf, _field) \
102#define MCDI_ARRAY_PTR(_buf, _field, _type, _index) \ 158 ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
103 MCDI_PTR2(_buf, \ 159
104 MC_CMD_ ## _field ## _OFST + \ 160#define MCDI_WORD(_buf, _field) \
105 (_index) * MC_CMD_ ## _type ## _TYPEDEF_LEN) 161 ((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
106#define MCDI_SET_DWORD(_buf, _ofst, _value) \ 162 le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
107 MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value) 163#define MCDI_SET_DWORD(_buf, _field, _value) \
108#define MCDI_DWORD(_buf, _ofst) \ 164 EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
109 MCDI_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST) 165#define MCDI_DWORD(_buf, _field) \
110#define MCDI_QWORD(_buf, _ofst) \ 166 EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
111 MCDI_QWORD2(_buf, MC_CMD_ ## _ofst ## _OFST) 167#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
168 EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
169 MC_CMD_ ## _name1, _value1)
170#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \
171 _name2, _value2) \
172 EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \
173 MC_CMD_ ## _name1, _value1, \
174 MC_CMD_ ## _name2, _value2)
175#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \
176 _name2, _value2, _name3, _value3) \
177 EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \
178 MC_CMD_ ## _name1, _value1, \
179 MC_CMD_ ## _name2, _value2, \
180 MC_CMD_ ## _name3, _value3)
181#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \
182 _name2, _value2, _name3, _value3, \
183 _name4, _value4) \
184 EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \
185 MC_CMD_ ## _name1, _value1, \
186 MC_CMD_ ## _name2, _value2, \
187 MC_CMD_ ## _name3, _value3, \
188 MC_CMD_ ## _name4, _value4)
189#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \
190 _name2, _value2, _name3, _value3, \
191 _name4, _value4, _name5, _value5) \
192 EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \
193 MC_CMD_ ## _name1, _value1, \
194 MC_CMD_ ## _name2, _value2, \
195 MC_CMD_ ## _name3, _value3, \
196 MC_CMD_ ## _name4, _value4, \
197 MC_CMD_ ## _name5, _value5)
198#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \
199 _name2, _value2, _name3, _value3, \
200 _name4, _value4, _name5, _value5, \
201 _name6, _value6) \
202 EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \
203 MC_CMD_ ## _name1, _value1, \
204 MC_CMD_ ## _name2, _value2, \
205 MC_CMD_ ## _name3, _value3, \
206 MC_CMD_ ## _name4, _value4, \
207 MC_CMD_ ## _name5, _value5, \
208 MC_CMD_ ## _name6, _value6)
209#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \
210 _name2, _value2, _name3, _value3, \
211 _name4, _value4, _name5, _value5, \
212 _name6, _value6, _name7, _value7) \
213 EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \
214 MC_CMD_ ## _name1, _value1, \
215 MC_CMD_ ## _name2, _value2, \
216 MC_CMD_ ## _name3, _value3, \
217 MC_CMD_ ## _name4, _value4, \
218 MC_CMD_ ## _name5, _value5, \
219 MC_CMD_ ## _name6, _value6, \
220 MC_CMD_ ## _name7, _value7)
221#define MCDI_SET_QWORD(_buf, _field, _value) \
222 do { \
223 EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
224 EFX_DWORD_0, (u32)(_value)); \
225 EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
226 EFX_DWORD_0, (u64)(_value) >> 32); \
227 } while (0)
228#define MCDI_QWORD(_buf, _field) \
229 (EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \
230 (u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32)
231#define MCDI_FIELD(_ptr, _type, _field) \
232 EFX_EXTRACT_DWORD( \
233 *(efx_dword_t *) \
234 _MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\
235 MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \
236 (MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \
237 MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1)
238
239#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \
240 (_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\
241 + (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align))
242#define MCDI_DECLARE_STRUCT_PTR(_name) \
243 efx_dword_t *_name
244#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \
245 ((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
246#define MCDI_VAR_ARRAY_LEN(_len, _field) \
247 min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \
248 ((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN)
249#define MCDI_ARRAY_WORD(_buf, _field, _index) \
250 (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
251 le16_to_cpu(*(__force const __le16 *) \
252 _MCDI_ARRAY_PTR(_buf, _field, _index, 2)))
253#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \
254 (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \
255 (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
256#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \
257 EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \
258 EFX_DWORD_0, _value)
259#define MCDI_ARRAY_DWORD(_buf, _field, _index) \
260 EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0)
261#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \
262 (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \
263 (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
264#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \
265 do { \
266 EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\
267 EFX_DWORD_0, (u32)(_value)); \
268 EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\
269 EFX_DWORD_0, (u64)(_value) >> 32); \
270 } while (0)
271#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
272 MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \
273 _type ## _TYPEDEF, _field2)
112 274
113#define MCDI_EVENT_FIELD(_ev, _field) \ 275#define MCDI_EVENT_FIELD(_ev, _field) \
114 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) 276 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
115#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
116 EFX_EXTRACT_DWORD( \
117 *((efx_dword_t *) \
118 (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) + \
119 (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \
120 MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f, \
121 (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f) + \
122 MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _WIDTH - 1)
123 277
124extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); 278extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
125extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
126 bool *was_attached_out);
127extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 279extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
128 u16 *fw_subtype_list, u32 *capabilities); 280 u16 *fw_subtype_list, u32 *capabilities);
129extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, 281extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
@@ -132,34 +284,29 @@ extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
132extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, 284extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
133 size_t *size_out, size_t *erase_size_out, 285 size_t *size_out, size_t *erase_size_out,
134 bool *protected_out); 286 bool *protected_out);
135extern int efx_mcdi_nvram_update_start(struct efx_nic *efx,
136 unsigned int type);
137extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
138 loff_t offset, u8 *buffer, size_t length);
139extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
140 loff_t offset, const u8 *buffer,
141 size_t length);
142#define EFX_MCDI_NVRAM_LEN_MAX 128
143extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
144 loff_t offset, size_t length);
145extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
146 unsigned int type);
147extern int efx_mcdi_nvram_test_all(struct efx_nic *efx); 287extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
148extern int efx_mcdi_handle_assertion(struct efx_nic *efx); 288extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
149extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); 289extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
150extern int efx_mcdi_reset_port(struct efx_nic *efx);
151extern int efx_mcdi_reset_mc(struct efx_nic *efx);
152extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, 290extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
153 const u8 *mac, int *id_out); 291 const u8 *mac, int *id_out);
154extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); 292extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
155extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); 293extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
156extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx); 294extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
157extern int efx_mcdi_flush_rxqs(struct efx_nic *efx); 295extern int efx_mcdi_flush_rxqs(struct efx_nic *efx);
296extern int efx_mcdi_port_probe(struct efx_nic *efx);
297extern void efx_mcdi_port_remove(struct efx_nic *efx);
298extern int efx_mcdi_port_reconfigure(struct efx_nic *efx);
299extern int efx_mcdi_port_get_number(struct efx_nic *efx);
300extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
301extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
158extern int efx_mcdi_set_mac(struct efx_nic *efx); 302extern int efx_mcdi_set_mac(struct efx_nic *efx);
159extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, 303#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
160 u32 dma_len, int enable, int clear); 304extern void efx_mcdi_mac_start_stats(struct efx_nic *efx);
161extern int efx_mcdi_mac_reconfigure(struct efx_nic *efx); 305extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
162extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx); 306extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
307extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
308extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
309extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
163 310
164#ifdef CONFIG_SFC_MCDI_MON 311#ifdef CONFIG_SFC_MCDI_MON
165extern int efx_mcdi_mon_probe(struct efx_nic *efx); 312extern int efx_mcdi_mon_probe(struct efx_nic *efx);
@@ -169,4 +316,14 @@ static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
169static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {} 316static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
170#endif 317#endif
171 318
319#ifdef CONFIG_SFC_MTD
320extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
321 size_t len, size_t *retlen, u8 *buffer);
322extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
323extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
324 size_t len, size_t *retlen, const u8 *buffer);
325extern int efx_mcdi_mtd_sync(struct mtd_info *mtd);
326extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
327#endif
328
172#endif /* EFX_MCDI_H */ 329#endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c
deleted file mode 100644
index 1003f309cba7..000000000000
--- a/drivers/net/ethernet/sfc/mcdi_mac.c
+++ /dev/null
@@ -1,130 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2009-2010 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "efx.h"
12#include "mcdi.h"
13#include "mcdi_pcol.h"
14
15int efx_mcdi_set_mac(struct efx_nic *efx)
16{
17 u32 reject, fcntl;
18 u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN];
19
20 memcpy(cmdbytes + MC_CMD_SET_MAC_IN_ADDR_OFST,
21 efx->net_dev->dev_addr, ETH_ALEN);
22
23 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
24 EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
25 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
26
27 /* The MCDI command provides for controlling accept/reject
28 * of broadcast packets too, but the driver doesn't currently
29 * expose this. */
30 reject = (efx->promiscuous) ? 0 :
31 (1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN);
32 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject);
33
34 switch (efx->wanted_fc) {
35 case EFX_FC_RX | EFX_FC_TX:
36 fcntl = MC_CMD_FCNTL_BIDIR;
37 break;
38 case EFX_FC_RX:
39 fcntl = MC_CMD_FCNTL_RESPOND;
40 break;
41 default:
42 fcntl = MC_CMD_FCNTL_OFF;
43 break;
44 }
45 if (efx->wanted_fc & EFX_FC_AUTO)
46 fcntl = MC_CMD_FCNTL_AUTO;
47 if (efx->fc_disable)
48 fcntl = MC_CMD_FCNTL_OFF;
49
50 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
51
52 return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
53 NULL, 0, NULL);
54}
55
56bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
57{
58 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
59 size_t outlength;
60 int rc;
61
62 BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
63
64 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
65 outbuf, sizeof(outbuf), &outlength);
66 if (rc) {
67 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
68 __func__, rc);
69 return true;
70 }
71
72 return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
73}
74
75int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
76 u32 dma_len, int enable, int clear)
77{
78 u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
79 int rc;
80 efx_dword_t *cmd_ptr;
81 int period = enable ? 1000 : 0;
82 u32 addr_hi;
83 u32 addr_lo;
84
85 BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
86
87 addr_lo = ((u64)dma_addr) >> 0;
88 addr_hi = ((u64)dma_addr) >> 32;
89
90 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
91 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
92 cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
93 EFX_POPULATE_DWORD_7(*cmd_ptr,
94 MC_CMD_MAC_STATS_IN_DMA, !!enable,
95 MC_CMD_MAC_STATS_IN_CLEAR, clear,
96 MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE, 1,
97 MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
98 MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR, 0,
99 MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT, 1,
100 MC_CMD_MAC_STATS_IN_PERIOD_MS, period);
101 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
102
103 rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
104 NULL, 0, NULL);
105 if (rc)
106 goto fail;
107
108 return 0;
109
110fail:
111 netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
112 __func__, enable ? "enable" : "disable", rc);
113 return rc;
114}
115
116int efx_mcdi_mac_reconfigure(struct efx_nic *efx)
117{
118 int rc;
119
120 WARN_ON(!mutex_is_locked(&efx->mac_lock));
121
122 rc = efx_mcdi_set_mac(efx);
123 if (rc != 0)
124 return rc;
125
126 return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
127 efx->multicast_hash.byte,
128 sizeof(efx->multicast_hash),
129 NULL, 0, NULL);
130}
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index 1d552f0664d7..4cc5d95b2a5a 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2011 Solarflare Communications Inc. 3 * Copyright 2011-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -21,31 +21,62 @@ enum efx_hwmon_type {
21 EFX_HWMON_UNKNOWN, 21 EFX_HWMON_UNKNOWN,
22 EFX_HWMON_TEMP, /* temperature */ 22 EFX_HWMON_TEMP, /* temperature */
23 EFX_HWMON_COOL, /* cooling device, probably a heatsink */ 23 EFX_HWMON_COOL, /* cooling device, probably a heatsink */
24 EFX_HWMON_IN /* input voltage */ 24 EFX_HWMON_IN, /* voltage */
25 EFX_HWMON_CURR, /* current */
26 EFX_HWMON_POWER, /* power */
25}; 27};
26 28
27static const struct { 29static const struct {
28 const char *label; 30 const char *label;
29 enum efx_hwmon_type hwmon_type; 31 enum efx_hwmon_type hwmon_type;
30 int port; 32 int port;
31} efx_mcdi_sensor_type[MC_CMD_SENSOR_ENTRY_MAXNUM] = { 33} efx_mcdi_sensor_type[] = {
32#define SENSOR(name, label, hwmon_type, port) \ 34#define SENSOR(name, label, hwmon_type, port) \
33 [MC_CMD_SENSOR_##name] = { label, hwmon_type, port } 35 [MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
34 SENSOR(CONTROLLER_TEMP, "Controller temp.", EFX_HWMON_TEMP, -1), 36 SENSOR(CONTROLLER_TEMP, "Controller ext. temp.", TEMP, -1),
35 SENSOR(PHY_COMMON_TEMP, "PHY temp.", EFX_HWMON_TEMP, -1), 37 SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
36 SENSOR(CONTROLLER_COOLING, "Controller cooling", EFX_HWMON_COOL, -1), 38 SENSOR(CONTROLLER_COOLING, "Controller cooling", COOL, -1),
37 SENSOR(PHY0_TEMP, "PHY temp.", EFX_HWMON_TEMP, 0), 39 SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
38 SENSOR(PHY0_COOLING, "PHY cooling", EFX_HWMON_COOL, 0), 40 SENSOR(PHY0_COOLING, "PHY cooling", COOL, 0),
39 SENSOR(PHY1_TEMP, "PHY temp.", EFX_HWMON_TEMP, 1), 41 SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
40 SENSOR(PHY1_COOLING, "PHY cooling", EFX_HWMON_COOL, 1), 42 SENSOR(PHY1_COOLING, "PHY cooling", COOL, 1),
41 SENSOR(IN_1V0, "1.0V supply", EFX_HWMON_IN, -1), 43 SENSOR(IN_1V0, "1.0V supply", IN, -1),
42 SENSOR(IN_1V2, "1.2V supply", EFX_HWMON_IN, -1), 44 SENSOR(IN_1V2, "1.2V supply", IN, -1),
43 SENSOR(IN_1V8, "1.8V supply", EFX_HWMON_IN, -1), 45 SENSOR(IN_1V8, "1.8V supply", IN, -1),
44 SENSOR(IN_2V5, "2.5V supply", EFX_HWMON_IN, -1), 46 SENSOR(IN_2V5, "2.5V supply", IN, -1),
45 SENSOR(IN_3V3, "3.3V supply", EFX_HWMON_IN, -1), 47 SENSOR(IN_3V3, "3.3V supply", IN, -1),
46 SENSOR(IN_12V0, "12.0V supply", EFX_HWMON_IN, -1), 48 SENSOR(IN_12V0, "12.0V supply", IN, -1),
47 SENSOR(IN_1V2A, "1.2V analogue supply", EFX_HWMON_IN, -1), 49 SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
48 SENSOR(IN_VREF, "ref. voltage", EFX_HWMON_IN, -1), 50 SENSOR(IN_VREF, "ref. voltage", IN, -1),
51 SENSOR(OUT_VAOE, "AOE power supply", IN, -1),
52 SENSOR(AOE_TEMP, "AOE temp.", TEMP, -1),
53 SENSOR(PSU_AOE_TEMP, "AOE PSU temp.", TEMP, -1),
54 SENSOR(PSU_TEMP, "Controller PSU temp.", TEMP, -1),
55 SENSOR(FAN_0, NULL, COOL, -1),
56 SENSOR(FAN_1, NULL, COOL, -1),
57 SENSOR(FAN_2, NULL, COOL, -1),
58 SENSOR(FAN_3, NULL, COOL, -1),
59 SENSOR(FAN_4, NULL, COOL, -1),
60 SENSOR(IN_VAOE, "AOE input supply", IN, -1),
61 SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
62 SENSOR(IN_IAOE, "AOE input current", CURR, -1),
63 SENSOR(NIC_POWER, "Board power use", POWER, -1),
64 SENSOR(IN_0V9, "0.9V supply", IN, -1),
65 SENSOR(IN_I0V9, "0.9V input current", CURR, -1),
66 SENSOR(IN_I1V2, "1.2V input current", CURR, -1),
67 SENSOR(IN_0V9_ADC, "0.9V supply (at ADC)", IN, -1),
68 SENSOR(CONTROLLER_2_TEMP, "Controller ext. temp. 2", TEMP, -1),
69 SENSOR(VREG_INTERNAL_TEMP, "Voltage regulator temp.", TEMP, -1),
70 SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
71 SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
72 SENSOR(CONTROLLER_VPTAT, "Controller int. temp. raw", IN, -1),
73 SENSOR(CONTROLLER_INTERNAL_TEMP, "Controller int. temp.", TEMP, -1),
74 SENSOR(CONTROLLER_VPTAT_EXTADC,
75 "Controller int. temp. raw (at ADC)", IN, -1),
76 SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
77 "Controller int. temp. (via ADC)", TEMP, -1),
78 SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
79 SENSOR(AIRFLOW, "Air flow raw", IN, -1),
49#undef SENSOR 80#undef SENSOR
50}; 81};
51 82
@@ -54,6 +85,7 @@ static const char *const sensor_status_names[] = {
54 [MC_CMD_SENSOR_STATE_WARNING] = "Warning", 85 [MC_CMD_SENSOR_STATE_WARNING] = "Warning",
55 [MC_CMD_SENSOR_STATE_FATAL] = "Fatal", 86 [MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
56 [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure", 87 [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
88 [MC_CMD_SENSOR_STATE_NO_READING] = "No reading",
57}; 89};
58 90
59void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) 91void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
@@ -85,6 +117,7 @@ struct efx_mcdi_mon_attribute {
85 struct device_attribute dev_attr; 117 struct device_attribute dev_attr;
86 unsigned int index; 118 unsigned int index;
87 unsigned int type; 119 unsigned int type;
120 enum efx_hwmon_type hwmon_type;
88 unsigned int limit_value; 121 unsigned int limit_value;
89 char name[12]; 122 char name[12];
90}; 123};
@@ -92,13 +125,12 @@ struct efx_mcdi_mon_attribute {
92static int efx_mcdi_mon_update(struct efx_nic *efx) 125static int efx_mcdi_mon_update(struct efx_nic *efx)
93{ 126{
94 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); 127 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
95 u8 inbuf[MC_CMD_READ_SENSORS_IN_LEN]; 128 MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN);
96 int rc; 129 int rc;
97 130
98 MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_LO, 131 MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR,
99 hwmon->dma_buf.dma_addr & 0xffffffff); 132 hwmon->dma_buf.dma_addr);
100 MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_HI, 133 MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len);
101 (u64)hwmon->dma_buf.dma_addr >> 32);
102 134
103 rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS, 135 rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS,
104 inbuf, sizeof(inbuf), NULL, 0, NULL); 136 inbuf, sizeof(inbuf), NULL, 0, NULL);
@@ -146,18 +178,32 @@ static ssize_t efx_mcdi_mon_show_value(struct device *dev,
146 struct efx_mcdi_mon_attribute *mon_attr = 178 struct efx_mcdi_mon_attribute *mon_attr =
147 container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); 179 container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
148 efx_dword_t entry; 180 efx_dword_t entry;
149 unsigned int value; 181 unsigned int value, state;
150 int rc; 182 int rc;
151 183
152 rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry); 184 rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
153 if (rc) 185 if (rc)
154 return rc; 186 return rc;
155 187
188 state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
189 if (state == MC_CMD_SENSOR_STATE_NO_READING)
190 return -EBUSY;
191
156 value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE); 192 value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
157 193
158 /* Convert temperature from degrees to milli-degrees Celsius */ 194 switch (mon_attr->hwmon_type) {
159 if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP) 195 case EFX_HWMON_TEMP:
196 /* Convert temperature from degrees to milli-degrees Celsius */
160 value *= 1000; 197 value *= 1000;
198 break;
199 case EFX_HWMON_POWER:
200 /* Convert power from watts to microwatts */
201 value *= 1000000;
202 break;
203 default:
204 /* No conversion needed */
205 break;
206 }
161 207
162 return sprintf(buf, "%u\n", value); 208 return sprintf(buf, "%u\n", value);
163} 209}
@@ -172,9 +218,19 @@ static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
172 218
173 value = mon_attr->limit_value; 219 value = mon_attr->limit_value;
174 220
175 /* Convert temperature from degrees to milli-degrees Celsius */ 221 switch (mon_attr->hwmon_type) {
176 if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP) 222 case EFX_HWMON_TEMP:
223 /* Convert temperature from degrees to milli-degrees Celsius */
177 value *= 1000; 224 value *= 1000;
225 break;
226 case EFX_HWMON_POWER:
227 /* Convert power from watts to microwatts */
228 value *= 1000000;
229 break;
230 default:
231 /* No conversion needed */
232 break;
233 }
178 234
179 return sprintf(buf, "%u\n", value); 235 return sprintf(buf, "%u\n", value);
180} 236}
@@ -221,6 +277,10 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
221 strlcpy(attr->name, name, sizeof(attr->name)); 277 strlcpy(attr->name, name, sizeof(attr->name));
222 attr->index = index; 278 attr->index = index;
223 attr->type = type; 279 attr->type = type;
280 if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
281 attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
282 else
283 attr->hwmon_type = EFX_HWMON_UNKNOWN;
224 attr->limit_value = limit_value; 284 attr->limit_value = limit_value;
225 sysfs_attr_init(&attr->dev_attr.attr); 285 sysfs_attr_init(&attr->dev_attr.attr);
226 attr->dev_attr.attr.name = attr->name; 286 attr->dev_attr.attr.name = attr->name;
@@ -234,36 +294,43 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
234 294
235int efx_mcdi_mon_probe(struct efx_nic *efx) 295int efx_mcdi_mon_probe(struct efx_nic *efx)
236{ 296{
297 unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0;
237 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); 298 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
238 unsigned int n_attrs, n_temp = 0, n_cool = 0, n_in = 0; 299 MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN);
239 u8 outbuf[MC_CMD_SENSOR_INFO_OUT_LENMAX]; 300 MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX);
301 unsigned int n_pages, n_sensors, n_attrs, page;
240 size_t outlen; 302 size_t outlen;
241 char name[12]; 303 char name[12];
242 u32 mask; 304 u32 mask;
243 int rc, i, type; 305 int rc, i, j, type;
244 306
245 BUILD_BUG_ON(MC_CMD_SENSOR_INFO_IN_LEN != 0); 307 /* Find out how many sensors are present */
308 n_sensors = 0;
309 page = 0;
310 do {
311 MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page);
246 312
247 rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, NULL, 0, 313 rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, sizeof(inbuf),
248 outbuf, sizeof(outbuf), &outlen); 314 outbuf, sizeof(outbuf), &outlen);
249 if (rc) 315 if (rc)
250 return rc; 316 return rc;
251 if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) 317 if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
252 return -EIO; 318 return -EIO;
253 319
254 /* Find out which sensors are present. Don't create a device 320 mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
255 * if there are none. 321 n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
256 */ 322 ++page;
257 mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK); 323 } while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT));
258 if (mask == 0) 324 n_pages = page;
325
326 /* Don't create a device if there are none */
327 if (n_sensors == 0)
259 return 0; 328 return 0;
260 329
261 /* Check again for short response */ 330 rc = efx_nic_alloc_buffer(
262 if (outlen < MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) 331 efx, &hwmon->dma_buf,
263 return -EIO; 332 n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN,
264 333 GFP_KERNEL);
265 rc = efx_nic_alloc_buffer(efx, &hwmon->dma_buf,
266 4 * MC_CMD_SENSOR_ENTRY_MAXNUM);
267 if (rc) 334 if (rc)
268 return rc; 335 return rc;
269 336
@@ -274,7 +341,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
274 * attributes for this set of sensors: name of the driver plus 341 * attributes for this set of sensors: name of the driver plus
275 * value, min, max, crit, alarm and label for each sensor. 342 * value, min, max, crit, alarm and label for each sensor.
276 */ 343 */
277 n_attrs = 1 + 6 * hweight32(mask); 344 n_attrs = 1 + 6 * n_sensors;
278 hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); 345 hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
279 if (!hwmon->attrs) { 346 if (!hwmon->attrs) {
280 rc = -ENOMEM; 347 rc = -ENOMEM;
@@ -291,26 +358,63 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
291 if (rc) 358 if (rc)
292 goto fail; 359 goto fail;
293 360
294 for (i = 0, type = -1; ; i++) { 361 for (i = 0, j = -1, type = -1; ; i++) {
362 enum efx_hwmon_type hwmon_type;
295 const char *hwmon_prefix; 363 const char *hwmon_prefix;
296 unsigned hwmon_index; 364 unsigned hwmon_index;
297 u16 min1, max1, min2, max2; 365 u16 min1, max1, min2, max2;
298 366
299 /* Find next sensor type or exit if there is none */ 367 /* Find next sensor type or exit if there is none */
300 type++; 368 do {
301 while (!(mask & (1 << type))) {
302 type++; 369 type++;
303 if (type == 32)
304 return 0;
305 }
306 370
307 /* Skip sensors specific to a different port */ 371 if ((type % 32) == 0) {
308 if (efx_mcdi_sensor_type[type].hwmon_type != EFX_HWMON_UNKNOWN && 372 page = type / 32;
309 efx_mcdi_sensor_type[type].port >= 0 && 373 j = -1;
310 efx_mcdi_sensor_type[type].port != efx_port_num(efx)) 374 if (page == n_pages)
311 continue; 375 return 0;
376
377 MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
378 page);
379 rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO,
380 inbuf, sizeof(inbuf),
381 outbuf, sizeof(outbuf),
382 &outlen);
383 if (rc)
384 goto fail;
385 if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) {
386 rc = -EIO;
387 goto fail;
388 }
389
390 mask = (MCDI_DWORD(outbuf,
391 SENSOR_INFO_OUT_MASK) &
392 ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
312 393
313 switch (efx_mcdi_sensor_type[type].hwmon_type) { 394 /* Check again for short response */
395 if (outlen <
396 MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) {
397 rc = -EIO;
398 goto fail;
399 }
400 }
401 } while (!(mask & (1 << type % 32)));
402 j++;
403
404 if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
405 hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
406
407 /* Skip sensors specific to a different port */
408 if (hwmon_type != EFX_HWMON_UNKNOWN &&
409 efx_mcdi_sensor_type[type].port >= 0 &&
410 efx_mcdi_sensor_type[type].port !=
411 efx_port_num(efx))
412 continue;
413 } else {
414 hwmon_type = EFX_HWMON_UNKNOWN;
415 }
416
417 switch (hwmon_type) {
314 case EFX_HWMON_TEMP: 418 case EFX_HWMON_TEMP:
315 hwmon_prefix = "temp"; 419 hwmon_prefix = "temp";
316 hwmon_index = ++n_temp; /* 1-based */ 420 hwmon_index = ++n_temp; /* 1-based */
@@ -327,16 +431,24 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
327 hwmon_prefix = "in"; 431 hwmon_prefix = "in";
328 hwmon_index = n_in++; /* 0-based */ 432 hwmon_index = n_in++; /* 0-based */
329 break; 433 break;
434 case EFX_HWMON_CURR:
435 hwmon_prefix = "curr";
436 hwmon_index = ++n_curr; /* 1-based */
437 break;
438 case EFX_HWMON_POWER:
439 hwmon_prefix = "power";
440 hwmon_index = ++n_power; /* 1-based */
441 break;
330 } 442 }
331 443
332 min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, 444 min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
333 SENSOR_INFO_ENTRY, i, MIN1); 445 SENSOR_INFO_ENTRY, j, MIN1);
334 max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, 446 max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
335 SENSOR_INFO_ENTRY, i, MAX1); 447 SENSOR_INFO_ENTRY, j, MAX1);
336 min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, 448 min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
337 SENSOR_INFO_ENTRY, i, MIN2); 449 SENSOR_INFO_ENTRY, j, MIN2);
338 max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, 450 max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
339 SENSOR_INFO_ENTRY, i, MAX2); 451 SENSOR_INFO_ENTRY, j, MAX2);
340 452
341 if (min1 != max1) { 453 if (min1 != max1) {
342 snprintf(name, sizeof(name), "%s%u_input", 454 snprintf(name, sizeof(name), "%s%u_input",
@@ -346,13 +458,15 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
346 if (rc) 458 if (rc)
347 goto fail; 459 goto fail;
348 460
349 snprintf(name, sizeof(name), "%s%u_min", 461 if (hwmon_type != EFX_HWMON_POWER) {
350 hwmon_prefix, hwmon_index); 462 snprintf(name, sizeof(name), "%s%u_min",
351 rc = efx_mcdi_mon_add_attr( 463 hwmon_prefix, hwmon_index);
352 efx, name, efx_mcdi_mon_show_limit, 464 rc = efx_mcdi_mon_add_attr(
353 i, type, min1); 465 efx, name, efx_mcdi_mon_show_limit,
354 if (rc) 466 i, type, min1);
355 goto fail; 467 if (rc)
468 goto fail;
469 }
356 470
357 snprintf(name, sizeof(name), "%s%u_max", 471 snprintf(name, sizeof(name), "%s%u_max",
358 hwmon_prefix, hwmon_index); 472 hwmon_prefix, hwmon_index);
@@ -383,7 +497,8 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
383 if (rc) 497 if (rc)
384 goto fail; 498 goto fail;
385 499
386 if (efx_mcdi_sensor_type[type].label) { 500 if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
501 efx_mcdi_sensor_type[type].label) {
387 snprintf(name, sizeof(name), "%s%u_label", 502 snprintf(name, sizeof(name), "%s%u_label",
388 hwmon_prefix, hwmon_index); 503 hwmon_prefix, hwmon_index);
389 rc = efx_mcdi_mon_add_attr( 504 rc = efx_mcdi_mon_add_attr(
@@ -400,8 +515,7 @@ fail:
400 515
401void efx_mcdi_mon_remove(struct efx_nic *efx) 516void efx_mcdi_mon_remove(struct efx_nic *efx)
402{ 517{
403 struct siena_nic_data *nic_data = efx->nic_data; 518 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
404 struct efx_mcdi_mon *hwmon = &nic_data->hwmon;
405 unsigned int i; 519 unsigned int i;
406 520
407 for (i = 0; i < hwmon->n_attrs; i++) 521 for (i = 0; i < hwmon->n_attrs; i++)
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index c5c9747861ba..b5cf62492f8e 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2009-2011 Solarflare Communications Inc. 3 * Copyright 2009-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,13 @@
21#define MC_FW_STATE_BOOTING (4) 21#define MC_FW_STATE_BOOTING (4)
22/* The Scheduler has started. */ 22/* The Scheduler has started. */
23#define MC_FW_STATE_SCHED (8) 23#define MC_FW_STATE_SCHED (8)
24/* If this is set in MC_RESET_STATE_REG then it should be
25 * possible to jump into IMEM without loading code from flash.
26 * Unlike a warm boot, assume DMEM has been reloaded, so that
27 * the MC persistent data must be reinitialised. */
28#define MC_FW_TEPID_BOOT_OK (16)
29/* BIST state has been initialized */
30#define MC_FW_BIST_INIT_OK (128)
24 31
25/* Siena MC shared memmory offsets */ 32/* Siena MC shared memmory offsets */
26/* The 'doorbell' addresses are hard-wired to alert the MC when written */ 33/* The 'doorbell' addresses are hard-wired to alert the MC when written */
@@ -39,18 +46,21 @@
39#define MC_STATUS_DWORD_REBOOT (0xb007b007) 46#define MC_STATUS_DWORD_REBOOT (0xb007b007)
40#define MC_STATUS_DWORD_ASSERT (0xdeaddead) 47#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
41 48
49/* Check whether an mcfw version (in host order) belongs to a bootloader */
50#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007)
51
42/* The current version of the MCDI protocol. 52/* The current version of the MCDI protocol.
43 * 53 *
44 * Note that the ROM burnt into the card only talks V0, so at the very 54 * Note that the ROM burnt into the card only talks V0, so at the very
45 * least every driver must support version 0 and MCDI_PCOL_VERSION 55 * least every driver must support version 0 and MCDI_PCOL_VERSION
46 */ 56 */
47#define MCDI_PCOL_VERSION 1 57#define MCDI_PCOL_VERSION 2
48 58
49/* Unused commands: 0x23, 0x27, 0x30, 0x31 */ 59/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
50 60
51/* MCDI version 1 61/* MCDI version 1
52 * 62 *
53 * Each MCDI request starts with an MCDI_HEADER, which is a 32byte 63 * Each MCDI request starts with an MCDI_HEADER, which is a 32bit
54 * structure, filled in by the client. 64 * structure, filled in by the client.
55 * 65 *
56 * 0 7 8 16 20 22 23 24 31 66 * 0 7 8 16 20 22 23 24 31
@@ -87,9 +97,11 @@
87#define MCDI_HEADER_DATALEN_LBN 8 97#define MCDI_HEADER_DATALEN_LBN 8
88#define MCDI_HEADER_DATALEN_WIDTH 8 98#define MCDI_HEADER_DATALEN_WIDTH 8
89#define MCDI_HEADER_SEQ_LBN 16 99#define MCDI_HEADER_SEQ_LBN 16
90#define MCDI_HEADER_RSVD_LBN 20
91#define MCDI_HEADER_RSVD_WIDTH 2
92#define MCDI_HEADER_SEQ_WIDTH 4 100#define MCDI_HEADER_SEQ_WIDTH 4
101#define MCDI_HEADER_RSVD_LBN 20
102#define MCDI_HEADER_RSVD_WIDTH 1
103#define MCDI_HEADER_NOT_EPOCH_LBN 21
104#define MCDI_HEADER_NOT_EPOCH_WIDTH 1
93#define MCDI_HEADER_ERROR_LBN 22 105#define MCDI_HEADER_ERROR_LBN 22
94#define MCDI_HEADER_ERROR_WIDTH 1 106#define MCDI_HEADER_ERROR_WIDTH 1
95#define MCDI_HEADER_RESPONSE_LBN 23 107#define MCDI_HEADER_RESPONSE_LBN 23
@@ -100,7 +112,11 @@
100#define MCDI_HEADER_XFLAGS_EVREQ 0x01 112#define MCDI_HEADER_XFLAGS_EVREQ 0x01
101 113
102/* Maximum number of payload bytes */ 114/* Maximum number of payload bytes */
103#define MCDI_CTL_SDU_LEN_MAX 0xfc 115#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
116#define MCDI_CTL_SDU_LEN_MAX_V2 0x400
117
118#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2
119
104 120
105/* The MC can generate events for two reasons: 121/* The MC can generate events for two reasons:
106 * - To complete a shared memory request if XFLAGS_EVREQ was set 122 * - To complete a shared memory request if XFLAGS_EVREQ was set
@@ -145,22 +161,69 @@
145#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc 161#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
146 162
147 163
164/* Operation not permitted. */
165#define MC_CMD_ERR_EPERM 1
148/* Non-existent command target */ 166/* Non-existent command target */
149#define MC_CMD_ERR_ENOENT 2 167#define MC_CMD_ERR_ENOENT 2
150/* assert() has killed the MC */ 168/* assert() has killed the MC */
151#define MC_CMD_ERR_EINTR 4 169#define MC_CMD_ERR_EINTR 4
170/* I/O failure */
171#define MC_CMD_ERR_EIO 5
172/* Try again */
173#define MC_CMD_ERR_EAGAIN 11
174/* Out of memory */
175#define MC_CMD_ERR_ENOMEM 12
152/* Caller does not hold required locks */ 176/* Caller does not hold required locks */
153#define MC_CMD_ERR_EACCES 13 177#define MC_CMD_ERR_EACCES 13
154/* Resource is currently unavailable (e.g. lock contention) */ 178/* Resource is currently unavailable (e.g. lock contention) */
155#define MC_CMD_ERR_EBUSY 16 179#define MC_CMD_ERR_EBUSY 16
180/* No such device */
181#define MC_CMD_ERR_ENODEV 19
156/* Invalid argument to target */ 182/* Invalid argument to target */
157#define MC_CMD_ERR_EINVAL 22 183#define MC_CMD_ERR_EINVAL 22
184/* Out of range */
185#define MC_CMD_ERR_ERANGE 34
158/* Non-recursive resource is already acquired */ 186/* Non-recursive resource is already acquired */
159#define MC_CMD_ERR_EDEADLK 35 187#define MC_CMD_ERR_EDEADLK 35
160/* Operation not implemented */ 188/* Operation not implemented */
161#define MC_CMD_ERR_ENOSYS 38 189#define MC_CMD_ERR_ENOSYS 38
162/* Operation timed out */ 190/* Operation timed out */
163#define MC_CMD_ERR_ETIME 62 191#define MC_CMD_ERR_ETIME 62
192/* Link has been severed */
193#define MC_CMD_ERR_ENOLINK 67
194/* Protocol error */
195#define MC_CMD_ERR_EPROTO 71
196/* Operation not supported */
197#define MC_CMD_ERR_ENOTSUP 95
198/* Address not available */
199#define MC_CMD_ERR_EADDRNOTAVAIL 99
200/* Not connected */
201#define MC_CMD_ERR_ENOTCONN 107
202/* Operation already in progress */
203#define MC_CMD_ERR_EALREADY 114
204
205/* Resource allocation failed. */
206#define MC_CMD_ERR_ALLOC_FAIL 0x1000
207/* V-adaptor not found. */
208#define MC_CMD_ERR_NO_VADAPTOR 0x1001
209/* EVB port not found. */
210#define MC_CMD_ERR_NO_EVB_PORT 0x1002
211/* V-switch not found. */
212#define MC_CMD_ERR_NO_VSWITCH 0x1003
213/* Too many VLAN tags. */
214#define MC_CMD_ERR_VLAN_LIMIT 0x1004
215/* Bad PCI function number. */
216#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
217/* Invalid VLAN mode. */
218#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
219/* Invalid v-switch type. */
220#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
221/* Invalid v-port type. */
222#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
223/* MAC address exists. */
224#define MC_CMD_ERR_MAC_EXIST 0x1009
225/* Slave core not present */
226#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
164 227
165#define MC_CMD_ERR_CODE_OFST 0 228#define MC_CMD_ERR_CODE_OFST 0
166 229
@@ -178,9 +241,11 @@
178 241
179/* Vectors in the boot ROM */ 242/* Vectors in the boot ROM */
180/* Point to the copycode entry point. */ 243/* Point to the copycode entry point. */
181#define MC_BOOTROM_COPYCODE_VEC (0x7f4) 244#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
245#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
182/* Points to the recovery mode entry point. */ 246/* Points to the recovery mode entry point. */
183#define MC_BOOTROM_NOFLASH_VEC (0x7f8) 247#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
248#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
184 249
185/* The command set exported by the boot ROM (MCDI v0) */ 250/* The command set exported by the boot ROM (MCDI v0) */
186#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ 251#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
@@ -209,16 +274,29 @@
209 (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) 274 (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
210 275
211 276
277/* Version 2 adds an optional argument to error returns: the errno value
278 * may be followed by the (0-based) number of the first argument that
279 * could not be processed.
280 */
281#define MC_CMD_ERR_ARG_OFST 4
282
283/* No space */
284#define MC_CMD_ERR_ENOSPC 28
285
212/* MCDI_EVENT structuredef */ 286/* MCDI_EVENT structuredef */
213#define MCDI_EVENT_LEN 8 287#define MCDI_EVENT_LEN 8
214#define MCDI_EVENT_CONT_LBN 32 288#define MCDI_EVENT_CONT_LBN 32
215#define MCDI_EVENT_CONT_WIDTH 1 289#define MCDI_EVENT_CONT_WIDTH 1
216#define MCDI_EVENT_LEVEL_LBN 33 290#define MCDI_EVENT_LEVEL_LBN 33
217#define MCDI_EVENT_LEVEL_WIDTH 3 291#define MCDI_EVENT_LEVEL_WIDTH 3
218#define MCDI_EVENT_LEVEL_INFO 0x0 /* enum */ 292/* enum: Info. */
219#define MCDI_EVENT_LEVEL_WARN 0x1 /* enum */ 293#define MCDI_EVENT_LEVEL_INFO 0x0
220#define MCDI_EVENT_LEVEL_ERR 0x2 /* enum */ 294/* enum: Warning. */
221#define MCDI_EVENT_LEVEL_FATAL 0x3 /* enum */ 295#define MCDI_EVENT_LEVEL_WARN 0x1
296/* enum: Error. */
297#define MCDI_EVENT_LEVEL_ERR 0x2
298/* enum: Fatal. */
299#define MCDI_EVENT_LEVEL_FATAL 0x3
222#define MCDI_EVENT_DATA_OFST 0 300#define MCDI_EVENT_DATA_OFST 0
223#define MCDI_EVENT_CMDDONE_SEQ_LBN 0 301#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
224#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8 302#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
@@ -230,9 +308,14 @@
230#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16 308#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
231#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16 309#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
232#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4 310#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
233#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 /* enum */ 311/* enum: 100Mbs */
234#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 /* enum */ 312#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
235#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 /* enum */ 313/* enum: 1Gbs */
314#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
315/* enum: 10Gbs */
316#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
317/* enum: 40Gbs */
318#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
236#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20 319#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
237#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4 320#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
238#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24 321#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
@@ -247,26 +330,80 @@
247#define MCDI_EVENT_FWALERT_DATA_WIDTH 24 330#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
248#define MCDI_EVENT_FWALERT_REASON_LBN 0 331#define MCDI_EVENT_FWALERT_REASON_LBN 0
249#define MCDI_EVENT_FWALERT_REASON_WIDTH 8 332#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
250#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 /* enum */ 333/* enum: SRAM Access. */
334#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1
251#define MCDI_EVENT_FLR_VF_LBN 0 335#define MCDI_EVENT_FLR_VF_LBN 0
252#define MCDI_EVENT_FLR_VF_WIDTH 8 336#define MCDI_EVENT_FLR_VF_WIDTH 8
253#define MCDI_EVENT_TX_ERR_TXQ_LBN 0 337#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
254#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12 338#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
255#define MCDI_EVENT_TX_ERR_TYPE_LBN 12 339#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
256#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4 340#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
257#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 /* enum */ 341/* enum: Descriptor loader reported failure */
258#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 /* enum */ 342#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1
259#define MCDI_EVENT_TX_ERR_2BIG 0x3 /* enum */ 343/* enum: Descriptor ring empty and no EOP seen for packet */
344#define MCDI_EVENT_TX_ERR_NO_EOP 0x2
345/* enum: Overlength packet */
346#define MCDI_EVENT_TX_ERR_2BIG 0x3
347/* enum: Malformed option descriptor */
348#define MCDI_EVENT_TX_BAD_OPTDESC 0x5
349/* enum: Option descriptor part way through a packet */
350#define MCDI_EVENT_TX_OPT_IN_PKT 0x8
351/* enum: DMA or PIO data access error */
352#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
260#define MCDI_EVENT_TX_ERR_INFO_LBN 16 353#define MCDI_EVENT_TX_ERR_INFO_LBN 16
261#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16 354#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
355#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12
356#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1
262#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0 357#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
263#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12 358#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
264#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0 359#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0
265#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8 360#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
266#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1 /* enum */ 361/* enum: PLL lost lock */
267#define MCDI_EVENT_PTP_ERR_FILTER 0x2 /* enum */ 362#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1
268#define MCDI_EVENT_PTP_ERR_FIFO 0x3 /* enum */ 363/* enum: Filter overflow (PDMA) */
269#define MCDI_EVENT_PTP_ERR_QUEUE 0x4 /* enum */ 364#define MCDI_EVENT_PTP_ERR_FILTER 0x2
365/* enum: FIFO overflow (FPGA) */
366#define MCDI_EVENT_PTP_ERR_FIFO 0x3
367/* enum: Merge queue overflow */
368#define MCDI_EVENT_PTP_ERR_QUEUE 0x4
369#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0
370#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8
371/* enum: AOE failed to load - no valid image? */
372#define MCDI_EVENT_AOE_NO_LOAD 0x1
373/* enum: AOE FC reported an exception */
374#define MCDI_EVENT_AOE_FC_ASSERT 0x2
375/* enum: AOE FC watchdogged */
376#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3
377/* enum: AOE FC failed to start */
378#define MCDI_EVENT_AOE_FC_NO_START 0x4
379/* enum: Generic AOE fault - likely to have been reported via other means too
380 * but intended for use by aoex driver.
381 */
382#define MCDI_EVENT_AOE_FAULT 0x5
383/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */
384#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6
385/* enum: AOE loaded successfully */
386#define MCDI_EVENT_AOE_LOAD 0x7
387/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */
388#define MCDI_EVENT_AOE_DMA 0x8
389/* enum: AOE byteblaster connected/disconnected (Connection status in
390 * AOE_ERR_DATA)
391 */
392#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
393#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
394#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
395#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
396#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
397#define MCDI_EVENT_RX_ERR_TYPE_LBN 12
398#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4
399#define MCDI_EVENT_RX_ERR_INFO_LBN 16
400#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16
401#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12
402#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1
403#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0
404#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
405#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
406#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
270#define MCDI_EVENT_DATA_LBN 0 407#define MCDI_EVENT_DATA_LBN 0
271#define MCDI_EVENT_DATA_WIDTH 32 408#define MCDI_EVENT_DATA_WIDTH 32
272#define MCDI_EVENT_SRC_LBN 36 409#define MCDI_EVENT_SRC_LBN 36
@@ -275,21 +412,60 @@
275#define MCDI_EVENT_EV_CODE_WIDTH 4 412#define MCDI_EVENT_EV_CODE_WIDTH 4
276#define MCDI_EVENT_CODE_LBN 44 413#define MCDI_EVENT_CODE_LBN 44
277#define MCDI_EVENT_CODE_WIDTH 8 414#define MCDI_EVENT_CODE_WIDTH 8
278#define MCDI_EVENT_CODE_BADSSERT 0x1 /* enum */ 415/* enum: Bad assert. */
279#define MCDI_EVENT_CODE_PMNOTICE 0x2 /* enum */ 416#define MCDI_EVENT_CODE_BADSSERT 0x1
280#define MCDI_EVENT_CODE_CMDDONE 0x3 /* enum */ 417/* enum: PM Notice. */
281#define MCDI_EVENT_CODE_LINKCHANGE 0x4 /* enum */ 418#define MCDI_EVENT_CODE_PMNOTICE 0x2
282#define MCDI_EVENT_CODE_SENSOREVT 0x5 /* enum */ 419/* enum: Command done. */
283#define MCDI_EVENT_CODE_SCHEDERR 0x6 /* enum */ 420#define MCDI_EVENT_CODE_CMDDONE 0x3
284#define MCDI_EVENT_CODE_REBOOT 0x7 /* enum */ 421/* enum: Link change. */
285#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 /* enum */ 422#define MCDI_EVENT_CODE_LINKCHANGE 0x4
286#define MCDI_EVENT_CODE_FWALERT 0x9 /* enum */ 423/* enum: Sensor Event. */
287#define MCDI_EVENT_CODE_FLR 0xa /* enum */ 424#define MCDI_EVENT_CODE_SENSOREVT 0x5
288#define MCDI_EVENT_CODE_TX_ERR 0xb /* enum */ 425/* enum: Schedule error. */
289#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum */ 426#define MCDI_EVENT_CODE_SCHEDERR 0x6
290#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum */ 427/* enum: Reboot. */
291#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum */ 428#define MCDI_EVENT_CODE_REBOOT 0x7
292#define MCDI_EVENT_CODE_PTP_PPS 0xf /* enum */ 429/* enum: Mac stats DMA. */
430#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8
431/* enum: Firmware alert. */
432#define MCDI_EVENT_CODE_FWALERT 0x9
433/* enum: Function level reset. */
434#define MCDI_EVENT_CODE_FLR 0xa
435/* enum: Transmit error */
436#define MCDI_EVENT_CODE_TX_ERR 0xb
437/* enum: Tx flush has completed */
438#define MCDI_EVENT_CODE_TX_FLUSH 0xc
439/* enum: PTP packet received timestamp */
440#define MCDI_EVENT_CODE_PTP_RX 0xd
441/* enum: PTP NIC failure */
442#define MCDI_EVENT_CODE_PTP_FAULT 0xe
443/* enum: PTP PPS event */
444#define MCDI_EVENT_CODE_PTP_PPS 0xf
445/* enum: Rx flush has completed */
446#define MCDI_EVENT_CODE_RX_FLUSH 0x10
447/* enum: Receive error */
448#define MCDI_EVENT_CODE_RX_ERR 0x11
449/* enum: AOE fault */
450#define MCDI_EVENT_CODE_AOE 0x12
451/* enum: Network port calibration failed (VCAL). */
452#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
453/* enum: HW PPS event */
454#define MCDI_EVENT_CODE_HW_PPS 0x14
455/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and
456 * a different format)
457 */
458#define MCDI_EVENT_CODE_MC_REBOOT 0x15
459/* enum: the MC has detected a parity error */
460#define MCDI_EVENT_CODE_PAR_ERR 0x16
461/* enum: the MC has detected a correctable error */
462#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
463/* enum: the MC has detected an uncorrectable error */
464#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
465/* enum: Artificial event generated by host and posted via MC for test
466 * purposes.
467 */
468#define MCDI_EVENT_CODE_TESTGEN 0xfa
293#define MCDI_EVENT_CMDDONE_DATA_OFST 0 469#define MCDI_EVENT_CMDDONE_DATA_OFST 0
294#define MCDI_EVENT_CMDDONE_DATA_LBN 0 470#define MCDI_EVENT_CMDDONE_DATA_LBN 0
295#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32 471#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
@@ -305,15 +481,114 @@
305#define MCDI_EVENT_TX_ERR_DATA_OFST 0 481#define MCDI_EVENT_TX_ERR_DATA_OFST 0
306#define MCDI_EVENT_TX_ERR_DATA_LBN 0 482#define MCDI_EVENT_TX_ERR_DATA_LBN 0
307#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32 483#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
484/* Seconds field of timestamp */
308#define MCDI_EVENT_PTP_SECONDS_OFST 0 485#define MCDI_EVENT_PTP_SECONDS_OFST 0
309#define MCDI_EVENT_PTP_SECONDS_LBN 0 486#define MCDI_EVENT_PTP_SECONDS_LBN 0
310#define MCDI_EVENT_PTP_SECONDS_WIDTH 32 487#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
488/* Nanoseconds field of timestamp */
311#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0 489#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
312#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0 490#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
313#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32 491#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
492/* Lowest four bytes of sourceUUID from PTP packet */
314#define MCDI_EVENT_PTP_UUID_OFST 0 493#define MCDI_EVENT_PTP_UUID_OFST 0
315#define MCDI_EVENT_PTP_UUID_LBN 0 494#define MCDI_EVENT_PTP_UUID_LBN 0
316#define MCDI_EVENT_PTP_UUID_WIDTH 32 495#define MCDI_EVENT_PTP_UUID_WIDTH 32
496#define MCDI_EVENT_RX_ERR_DATA_OFST 0
497#define MCDI_EVENT_RX_ERR_DATA_LBN 0
498#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
499#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
500#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
501#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
502#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
503#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
504#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
505#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
506#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
507#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
508
509/* FCDI_EVENT structuredef */
510#define FCDI_EVENT_LEN 8
511#define FCDI_EVENT_CONT_LBN 32
512#define FCDI_EVENT_CONT_WIDTH 1
513#define FCDI_EVENT_LEVEL_LBN 33
514#define FCDI_EVENT_LEVEL_WIDTH 3
515/* enum: Info. */
516#define FCDI_EVENT_LEVEL_INFO 0x0
517/* enum: Warning. */
518#define FCDI_EVENT_LEVEL_WARN 0x1
519/* enum: Error. */
520#define FCDI_EVENT_LEVEL_ERR 0x2
521/* enum: Fatal. */
522#define FCDI_EVENT_LEVEL_FATAL 0x3
523#define FCDI_EVENT_DATA_OFST 0
524#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
525#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
526#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
527#define FCDI_EVENT_LINK_UP 0x1 /* enum */
528#define FCDI_EVENT_DATA_LBN 0
529#define FCDI_EVENT_DATA_WIDTH 32
530#define FCDI_EVENT_SRC_LBN 36
531#define FCDI_EVENT_SRC_WIDTH 8
532#define FCDI_EVENT_EV_CODE_LBN 60
533#define FCDI_EVENT_EV_CODE_WIDTH 4
534#define FCDI_EVENT_CODE_LBN 44
535#define FCDI_EVENT_CODE_WIDTH 8
536/* enum: The FC was rebooted. */
537#define FCDI_EVENT_CODE_REBOOT 0x1
538/* enum: Bad assert. */
539#define FCDI_EVENT_CODE_ASSERT 0x2
540/* enum: DDR3 test result. */
541#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
542/* enum: Link status. */
543#define FCDI_EVENT_CODE_LINK_STATE 0x4
544/* enum: A timed read is ready to be serviced. */
545#define FCDI_EVENT_CODE_TIMED_READ 0x5
546/* enum: One or more PPS IN events */
547#define FCDI_EVENT_CODE_PPS_IN 0x6
548/* enum: One or more PPS OUT events */
549#define FCDI_EVENT_CODE_PPS_OUT 0x7
550#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
551#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
552#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
553#define FCDI_EVENT_ASSERT_TYPE_LBN 36
554#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
555#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
556#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
557#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
558#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
559#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
560#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
561#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
562#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
563#define FCDI_EVENT_PPS_COUNT_OFST 0
564#define FCDI_EVENT_PPS_COUNT_LBN 0
565#define FCDI_EVENT_PPS_COUNT_WIDTH 32
566
567/* FCDI_EXTENDED_EVENT structuredef */
568#define FCDI_EXTENDED_EVENT_LENMIN 16
569#define FCDI_EXTENDED_EVENT_LENMAX 248
570#define FCDI_EXTENDED_EVENT_LEN(num) (8+8*(num))
571/* Number of timestamps following */
572#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
573#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
574#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
575/* Seconds field of a timestamp record */
576#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
577#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
578#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
579/* Nanoseconds field of a timestamp record */
580#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
581#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
582#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
583/* Timestamp records comprising the event */
584#define FCDI_EXTENDED_EVENT_PPS_TIME_OFST 8
585#define FCDI_EXTENDED_EVENT_PPS_TIME_LEN 8
586#define FCDI_EXTENDED_EVENT_PPS_TIME_LO_OFST 8
587#define FCDI_EXTENDED_EVENT_PPS_TIME_HI_OFST 12
588#define FCDI_EXTENDED_EVENT_PPS_TIME_MINNUM 1
589#define FCDI_EXTENDED_EVENT_PPS_TIME_MAXNUM 30
590#define FCDI_EXTENDED_EVENT_PPS_TIME_LBN 64
591#define FCDI_EXTENDED_EVENT_PPS_TIME_WIDTH 64
317 592
318 593
319/***********************************/ 594/***********************************/
@@ -365,11 +640,27 @@
365 640
366/* MC_CMD_COPYCODE_IN msgrequest */ 641/* MC_CMD_COPYCODE_IN msgrequest */
367#define MC_CMD_COPYCODE_IN_LEN 16 642#define MC_CMD_COPYCODE_IN_LEN 16
643/* Source address */
368#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 644#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
645/* enum: Entering the main image via a copy of a single word from and to this
646 * address indicates that it should not attempt to start the datapath CPUs.
647 * This is useful for certain soft rebooting scenarios. (Huntington only)
648 */
649#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
650/* enum: Entering the main image via a copy of a single word from and to this
651 * address indicates that it should not attempt to parse any configuration from
652 * flash. (In addition, the datapath CPUs will not be started, as for
653 * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for
654 * certain soft rebooting scenarios. (Huntington only)
655 */
656#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
657/* Destination address */
369#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 658#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
370#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 659#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
660/* Address of where to jump after copy. */
371#define MC_CMD_COPYCODE_IN_JUMP_OFST 12 661#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
372#define MC_CMD_COPYCODE_JUMP_NONE 0x1 /* enum */ 662/* enum: Control should return to the caller rather than jumping */
663#define MC_CMD_COPYCODE_JUMP_NONE 0x1
373 664
374/* MC_CMD_COPYCODE_OUT msgresponse */ 665/* MC_CMD_COPYCODE_OUT msgresponse */
375#define MC_CMD_COPYCODE_OUT_LEN 0 666#define MC_CMD_COPYCODE_OUT_LEN 0
@@ -377,11 +668,13 @@
377 668
378/***********************************/ 669/***********************************/
379/* MC_CMD_SET_FUNC 670/* MC_CMD_SET_FUNC
671 * Select function for function-specific commands.
380 */ 672 */
381#define MC_CMD_SET_FUNC 0x4 673#define MC_CMD_SET_FUNC 0x4
382 674
383/* MC_CMD_SET_FUNC_IN msgrequest */ 675/* MC_CMD_SET_FUNC_IN msgrequest */
384#define MC_CMD_SET_FUNC_IN_LEN 4 676#define MC_CMD_SET_FUNC_IN_LEN 4
677/* Set function */
385#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0 678#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
386 679
387/* MC_CMD_SET_FUNC_OUT msgresponse */ 680/* MC_CMD_SET_FUNC_OUT msgresponse */
@@ -390,6 +683,7 @@
390 683
391/***********************************/ 684/***********************************/
392/* MC_CMD_GET_BOOT_STATUS 685/* MC_CMD_GET_BOOT_STATUS
686 * Get the instruction address from which the MC booted.
393 */ 687 */
394#define MC_CMD_GET_BOOT_STATUS 0x5 688#define MC_CMD_GET_BOOT_STATUS 0x5
395 689
@@ -398,7 +692,10 @@
398 692
399/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */ 693/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
400#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8 694#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
695/* ?? */
401#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0 696#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
697/* enum: indicates that the MC wasn't flash booted */
698#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
402#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4 699#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
403#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0 700#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
404#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1 701#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
@@ -410,25 +707,38 @@
410 707
411/***********************************/ 708/***********************************/
412/* MC_CMD_GET_ASSERTS 709/* MC_CMD_GET_ASSERTS
413 * Get and clear any assertion status. 710 * Get (and optionally clear) the current assertion status. Only
711 * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other
712 * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS
414 */ 713 */
415#define MC_CMD_GET_ASSERTS 0x6 714#define MC_CMD_GET_ASSERTS 0x6
416 715
417/* MC_CMD_GET_ASSERTS_IN msgrequest */ 716/* MC_CMD_GET_ASSERTS_IN msgrequest */
418#define MC_CMD_GET_ASSERTS_IN_LEN 4 717#define MC_CMD_GET_ASSERTS_IN_LEN 4
718/* Set to clear assertion */
419#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0 719#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
420 720
421/* MC_CMD_GET_ASSERTS_OUT msgresponse */ 721/* MC_CMD_GET_ASSERTS_OUT msgresponse */
422#define MC_CMD_GET_ASSERTS_OUT_LEN 140 722#define MC_CMD_GET_ASSERTS_OUT_LEN 140
723/* Assertion status flag. */
423#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0 724#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
424#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 /* enum */ 725/* enum: No assertions have failed. */
425#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 /* enum */ 726#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
426#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 /* enum */ 727/* enum: A system-level assertion has failed. */
427#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 /* enum */ 728#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2
729/* enum: A thread-level assertion has failed. */
730#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3
731/* enum: The system was reset by the watchdog. */
732#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4
733/* enum: An illegal address trap stopped the system (huntington and later) */
734#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
735/* Failing PC value */
428#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4 736#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
737/* Saved GP regs */
429#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8 738#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
430#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4 739#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
431#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31 740#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
741/* Failing thread address */
432#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132 742#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
433#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136 743#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
434 744
@@ -441,9 +751,12 @@
441 751
442/* MC_CMD_LOG_CTRL_IN msgrequest */ 752/* MC_CMD_LOG_CTRL_IN msgrequest */
443#define MC_CMD_LOG_CTRL_IN_LEN 8 753#define MC_CMD_LOG_CTRL_IN_LEN 8
754/* Log destination */
444#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0 755#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
445#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum */ 756/* enum: UART. */
446#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 /* enum */ 757#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
758/* enum: Event queue. */
759#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
447#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4 760#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
448 761
449/* MC_CMD_LOG_CTRL_OUT msgresponse */ 762/* MC_CMD_LOG_CTRL_OUT msgresponse */
@@ -459,11 +772,20 @@
459/* MC_CMD_GET_VERSION_IN msgrequest */ 772/* MC_CMD_GET_VERSION_IN msgrequest */
460#define MC_CMD_GET_VERSION_IN_LEN 0 773#define MC_CMD_GET_VERSION_IN_LEN 0
461 774
462/* MC_CMD_GET_VERSION_V0_OUT msgresponse */ 775/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */
776#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
777/* placeholder, set to 0 */
778#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
779
780/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
463#define MC_CMD_GET_VERSION_V0_OUT_LEN 4 781#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
464#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 782#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
465#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff /* enum */ 783/* enum: Reserved version number to indicate "any" version. */
466#define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000 /* enum */ 784#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
785/* enum: Bootrom version value for Siena. */
786#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
787/* enum: Bootrom version value for Huntington. */
788#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
467 789
468/* MC_CMD_GET_VERSION_OUT msgresponse */ 790/* MC_CMD_GET_VERSION_OUT msgresponse */
469#define MC_CMD_GET_VERSION_OUT_LEN 32 791#define MC_CMD_GET_VERSION_OUT_LEN 32
@@ -471,6 +793,7 @@
471/* Enum values, see field(s): */ 793/* Enum values, see field(s): */
472/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ 794/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
473#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4 795#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
796/* 128bit mask of functions supported by the current firmware */
474#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8 797#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
475#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16 798#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
476#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24 799#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
@@ -478,46 +801,22 @@
478#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24 801#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
479#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28 802#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
480 803
481 804/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
482/***********************************/ 805#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
483/* MC_CMD_GET_FPGAREG 806/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
484 * Read multiple bytes from PTP FPGA. 807/* Enum values, see field(s): */
485 */ 808/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
486#define MC_CMD_GET_FPGAREG 0x9 809#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
487 810/* 128bit mask of functions supported by the current firmware */
488/* MC_CMD_GET_FPGAREG_IN msgrequest */ 811#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
489#define MC_CMD_GET_FPGAREG_IN_LEN 8 812#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
490#define MC_CMD_GET_FPGAREG_IN_ADDR_OFST 0 813#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
491#define MC_CMD_GET_FPGAREG_IN_NUMBYTES_OFST 4 814#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
492 815#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
493/* MC_CMD_GET_FPGAREG_OUT msgresponse */ 816#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
494#define MC_CMD_GET_FPGAREG_OUT_LENMIN 1 817/* extra info */
495#define MC_CMD_GET_FPGAREG_OUT_LENMAX 252 818#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
496#define MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num)) 819#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
497#define MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0
498#define MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1
499#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1
500#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 252
501
502
503/***********************************/
504/* MC_CMD_PUT_FPGAREG
505 * Write multiple bytes to PTP FPGA.
506 */
507#define MC_CMD_PUT_FPGAREG 0xa
508
509/* MC_CMD_PUT_FPGAREG_IN msgrequest */
510#define MC_CMD_PUT_FPGAREG_IN_LENMIN 5
511#define MC_CMD_PUT_FPGAREG_IN_LENMAX 252
512#define MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num))
513#define MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0
514#define MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4
515#define MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1
516#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1
517#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 248
518
519/* MC_CMD_PUT_FPGAREG_OUT msgresponse */
520#define MC_CMD_PUT_FPGAREG_OUT_LEN 0
521 820
522 821
523/***********************************/ 822/***********************************/
@@ -528,32 +827,74 @@
528 827
529/* MC_CMD_PTP_IN msgrequest */ 828/* MC_CMD_PTP_IN msgrequest */
530#define MC_CMD_PTP_IN_LEN 1 829#define MC_CMD_PTP_IN_LEN 1
830/* PTP operation code */
531#define MC_CMD_PTP_IN_OP_OFST 0 831#define MC_CMD_PTP_IN_OP_OFST 0
532#define MC_CMD_PTP_IN_OP_LEN 1 832#define MC_CMD_PTP_IN_OP_LEN 1
533#define MC_CMD_PTP_OP_ENABLE 0x1 /* enum */ 833/* enum: Enable PTP packet timestamping operation. */
534#define MC_CMD_PTP_OP_DISABLE 0x2 /* enum */ 834#define MC_CMD_PTP_OP_ENABLE 0x1
535#define MC_CMD_PTP_OP_TRANSMIT 0x3 /* enum */ 835/* enum: Disable PTP packet timestamping operation. */
536#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 /* enum */ 836#define MC_CMD_PTP_OP_DISABLE 0x2
537#define MC_CMD_PTP_OP_STATUS 0x5 /* enum */ 837/* enum: Send a PTP packet. */
538#define MC_CMD_PTP_OP_ADJUST 0x6 /* enum */ 838#define MC_CMD_PTP_OP_TRANSMIT 0x3
539#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 /* enum */ 839/* enum: Read the current NIC time. */
540#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 /* enum */ 840#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
541#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 /* enum */ 841/* enum: Get the current PTP status. */
542#define MC_CMD_PTP_OP_RESET_STATS 0xa /* enum */ 842#define MC_CMD_PTP_OP_STATUS 0x5
543#define MC_CMD_PTP_OP_DEBUG 0xb /* enum */ 843/* enum: Adjust the PTP NIC's time. */
544#define MC_CMD_PTP_OP_MAX 0xc /* enum */ 844#define MC_CMD_PTP_OP_ADJUST 0x6
845/* enum: Synchronize host and NIC time. */
846#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
847/* enum: Basic manufacturing tests. */
848#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
849/* enum: Packet based manufacturing tests. */
850#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
851/* enum: Reset some of the PTP related statistics */
852#define MC_CMD_PTP_OP_RESET_STATS 0xa
853/* enum: Debug operations to MC. */
854#define MC_CMD_PTP_OP_DEBUG 0xb
855/* enum: Read an FPGA register */
856#define MC_CMD_PTP_OP_FPGAREAD 0xc
857/* enum: Write an FPGA register */
858#define MC_CMD_PTP_OP_FPGAWRITE 0xd
859/* enum: Apply an offset to the NIC clock */
860#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
861/* enum: Change Apply an offset to the NIC clock */
862#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
863/* enum: Set the MC packet filter VLAN tags for received PTP packets */
864#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
865/* enum: Set the MC packet filter UUID for received PTP packets */
866#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
867/* enum: Set the MC packet filter Domain for received PTP packets */
868#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
869/* enum: Set the clock source */
870#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
871/* enum: Reset value of Timer Reg. */
872#define MC_CMD_PTP_OP_RST_CLK 0x14
873/* enum: Enable the forwarding of PPS events to the host */
874#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
875/* enum: Above this for future use. */
876#define MC_CMD_PTP_OP_MAX 0x16
545 877
546/* MC_CMD_PTP_IN_ENABLE msgrequest */ 878/* MC_CMD_PTP_IN_ENABLE msgrequest */
547#define MC_CMD_PTP_IN_ENABLE_LEN 16 879#define MC_CMD_PTP_IN_ENABLE_LEN 16
548#define MC_CMD_PTP_IN_CMD_OFST 0 880#define MC_CMD_PTP_IN_CMD_OFST 0
549#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4 881#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
882/* Event queue for PTP events */
550#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8 883#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
884/* PTP timestamping mode */
551#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12 885#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
552#define MC_CMD_PTP_MODE_V1 0x0 /* enum */ 886/* enum: PTP, version 1 */
553#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */ 887#define MC_CMD_PTP_MODE_V1 0x0
554#define MC_CMD_PTP_MODE_V2 0x2 /* enum */ 888/* enum: PTP, version 1, with VLAN headers - deprecated */
555#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */ 889#define MC_CMD_PTP_MODE_V1_VLAN 0x1
556#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 /* enum */ 890/* enum: PTP, version 2 */
891#define MC_CMD_PTP_MODE_V2 0x2
892/* enum: PTP, version 2, with VLAN headers - deprecated */
893#define MC_CMD_PTP_MODE_V2_VLAN 0x3
894/* enum: PTP, version 2, with improved UUID filtering */
895#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4
896/* enum: FCoE (seconds and microseconds) */
897#define MC_CMD_PTP_MODE_FCOE 0x5
557 898
558/* MC_CMD_PTP_IN_DISABLE msgrequest */ 899/* MC_CMD_PTP_IN_DISABLE msgrequest */
559#define MC_CMD_PTP_IN_DISABLE_LEN 8 900#define MC_CMD_PTP_IN_DISABLE_LEN 8
@@ -566,7 +907,9 @@
566#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num)) 907#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
567/* MC_CMD_PTP_IN_CMD_OFST 0 */ 908/* MC_CMD_PTP_IN_CMD_OFST 0 */
568/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ 909/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
910/* Transmit packet length */
569#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8 911#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
912/* Transmit packet data */
570#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12 913#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
571#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1 914#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
572#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1 915#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
@@ -586,19 +929,27 @@
586#define MC_CMD_PTP_IN_ADJUST_LEN 24 929#define MC_CMD_PTP_IN_ADJUST_LEN 24
587/* MC_CMD_PTP_IN_CMD_OFST 0 */ 930/* MC_CMD_PTP_IN_CMD_OFST 0 */
588/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ 931/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
932/* Frequency adjustment 40 bit fixed point ns */
589#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8 933#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
590#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8 934#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
591#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8 935#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
592#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12 936#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
593#define MC_CMD_PTP_IN_ADJUST_BITS 0x28 /* enum */ 937/* enum: Number of fractional bits in frequency adjustment */
938#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
939/* Time adjustment in seconds */
594#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16 940#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
941/* Time adjustment in nanoseconds */
595#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20 942#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
596 943
597/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */ 944/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
598#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20 945#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
599/* MC_CMD_PTP_IN_CMD_OFST 0 */ 946/* MC_CMD_PTP_IN_CMD_OFST 0 */
600/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ 947/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
948/* Number of time readings to capture */
601#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8 949#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
950/* Host address in which to write "synchronization started" indication (64
951 * bits)
952 */
602#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12 953#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
603#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8 954#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
604#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12 955#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
@@ -613,86 +964,240 @@
613#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12 964#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
614/* MC_CMD_PTP_IN_CMD_OFST 0 */ 965/* MC_CMD_PTP_IN_CMD_OFST 0 */
615/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ 966/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
967/* Enable or disable packet testing */
616#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8 968#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
617 969
618/* MC_CMD_PTP_IN_RESET_STATS msgrequest */ 970/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
619#define MC_CMD_PTP_IN_RESET_STATS_LEN 8 971#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
620/* MC_CMD_PTP_IN_CMD_OFST 0 */ 972/* MC_CMD_PTP_IN_CMD_OFST 0 */
973/* Reset PTP statistics */
621/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ 974/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
622 975
623/* MC_CMD_PTP_IN_DEBUG msgrequest */ 976/* MC_CMD_PTP_IN_DEBUG msgrequest */
624#define MC_CMD_PTP_IN_DEBUG_LEN 12 977#define MC_CMD_PTP_IN_DEBUG_LEN 12
625/* MC_CMD_PTP_IN_CMD_OFST 0 */ 978/* MC_CMD_PTP_IN_CMD_OFST 0 */
626/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ 979/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
980/* Debug operations */
627#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8 981#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
628 982
983/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
984#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
985/* MC_CMD_PTP_IN_CMD_OFST 0 */
986/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
987#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
988#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
989
990/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
991#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
992#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
993#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
994/* MC_CMD_PTP_IN_CMD_OFST 0 */
995/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
996#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
997#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
998#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
999#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
1000#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240
1001
1002/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
1003#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
1004/* MC_CMD_PTP_IN_CMD_OFST 0 */
1005/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
1006/* Time adjustment in seconds */
1007#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
1008/* Time adjustment in nanoseconds */
1009#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
1010
1011/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
1012#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
1013/* MC_CMD_PTP_IN_CMD_OFST 0 */
1014/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
1015/* Frequency adjustment 40 bit fixed point ns */
1016#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
1017#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
1018#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
1019#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
1020/* enum: Number of fractional bits in frequency adjustment */
1021/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
1022
1023/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
1024#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
1025/* MC_CMD_PTP_IN_CMD_OFST 0 */
1026/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
1027/* Number of VLAN tags, 0 if not VLAN */
1028#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
1029/* Set of VLAN tags to filter against */
1030#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
1031#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
1032#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3
1033
1034/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
1035#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
1036/* MC_CMD_PTP_IN_CMD_OFST 0 */
1037/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
1038/* 1 to enable UUID filtering, 0 to disable */
1039#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
1040/* UUID to filter against */
1041#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
1042#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
1043#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
1044#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
1045
1046/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
1047#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
1048/* MC_CMD_PTP_IN_CMD_OFST 0 */
1049/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
1050/* 1 to enable Domain filtering, 0 to disable */
1051#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
1052/* Domain number to filter against */
1053#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
1054
1055/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
1056#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
1057/* MC_CMD_PTP_IN_CMD_OFST 0 */
1058/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
1059/* Set the clock source. */
1060#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
1061/* enum: Internal. */
1062#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
1063/* enum: External. */
1064#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1
1065
1066/* MC_CMD_PTP_IN_RST_CLK msgrequest */
1067#define MC_CMD_PTP_IN_RST_CLK_LEN 8
1068/* MC_CMD_PTP_IN_CMD_OFST 0 */
1069/* Reset value of Timer Reg. */
1070/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
1071
1072/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
1073#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
1074/* MC_CMD_PTP_IN_CMD_OFST 0 */
1075/* Enable or disable */
1076#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
1077/* enum: Enable */
1078#define MC_CMD_PTP_ENABLE_PPS 0x0
1079/* enum: Disable */
1080#define MC_CMD_PTP_DISABLE_PPS 0x1
1081/* Queueid to send events back */
1082#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
1083
629/* MC_CMD_PTP_OUT msgresponse */ 1084/* MC_CMD_PTP_OUT msgresponse */
630#define MC_CMD_PTP_OUT_LEN 0 1085#define MC_CMD_PTP_OUT_LEN 0
631 1086
632/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */ 1087/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
633#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8 1088#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
1089/* Value of seconds timestamp */
634#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0 1090#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
1091/* Value of nanoseconds timestamp */
635#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4 1092#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
636 1093
637/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */ 1094/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
638#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8 1095#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
1096/* Value of seconds timestamp */
639#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0 1097#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
1098/* Value of nanoseconds timestamp */
640#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4 1099#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
641 1100
642/* MC_CMD_PTP_OUT_STATUS msgresponse */ 1101/* MC_CMD_PTP_OUT_STATUS msgresponse */
643#define MC_CMD_PTP_OUT_STATUS_LEN 64 1102#define MC_CMD_PTP_OUT_STATUS_LEN 64
1103/* Frequency of NIC's hardware clock */
644#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0 1104#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
1105/* Number of packets transmitted and timestamped */
645#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4 1106#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
1107/* Number of packets received and timestamped */
646#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8 1108#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
1109/* Number of packets timestamped by the FPGA */
647#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12 1110#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
1111/* Number of packets filter matched */
648#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16 1112#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
1113/* Number of packets not filter matched */
649#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20 1114#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
1115/* Number of PPS overflows (noise on input?) */
650#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24 1116#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
1117/* Number of PPS bad periods */
651#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28 1118#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
1119/* Minimum period of PPS pulse */
652#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32 1120#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
1121/* Maximum period of PPS pulse */
653#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36 1122#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
1123/* Last period of PPS pulse */
654#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40 1124#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
1125/* Mean period of PPS pulse */
655#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44 1126#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
1127/* Minimum offset of PPS pulse (signed) */
656#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48 1128#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
1129/* Maximum offset of PPS pulse (signed) */
657#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52 1130#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
1131/* Last offset of PPS pulse (signed) */
658#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56 1132#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
1133/* Mean offset of PPS pulse (signed) */
659#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60 1134#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
660 1135
661/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */ 1136/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
662#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20 1137#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
663#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240 1138#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
664#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num)) 1139#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
1140/* A set of host and NIC times */
665#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0 1141#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
666#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20 1142#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
667#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1 1143#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
668#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12 1144#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
1145/* Host time immediately before NIC's hardware clock read */
669#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0 1146#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
1147/* Value of seconds timestamp */
670#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4 1148#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
1149/* Value of nanoseconds timestamp */
671#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8 1150#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
1151/* Host time immediately after NIC's hardware clock read */
672#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12 1152#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
1153/* Number of nanoseconds waited after reading NIC's hardware clock */
673#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16 1154#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
674 1155
675/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */ 1156/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
676#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8 1157#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
1158/* Results of testing */
677#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0 1159#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
678#define MC_CMD_PTP_MANF_SUCCESS 0x0 /* enum */ 1160/* enum: Successful test */
679#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1 /* enum */ 1161#define MC_CMD_PTP_MANF_SUCCESS 0x0
680#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2 /* enum */ 1162/* enum: FPGA load failed */
681#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3 /* enum */ 1163#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1
682#define MC_CMD_PTP_MANF_OSCILLATOR 0x4 /* enum */ 1164/* enum: FPGA version invalid */
683#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5 /* enum */ 1165#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2
684#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6 /* enum */ 1166/* enum: FPGA registers incorrect */
685#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7 /* enum */ 1167#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3
686#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8 /* enum */ 1168/* enum: Oscillator possibly not working? */
687#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9 /* enum */ 1169#define MC_CMD_PTP_MANF_OSCILLATOR 0x4
1170/* enum: Timestamps not increasing */
1171#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5
1172/* enum: Mismatched packet count */
1173#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6
1174/* enum: Mismatched packet count (Siena filter and FPGA) */
1175#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7
1176/* enum: Not enough packets to perform timestamp check */
1177#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
1178/* enum: Timestamp trigger GPIO not working */
1179#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
1180/* Presence of external oscillator */
688#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4 1181#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
689 1182
690/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */ 1183/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
691#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12 1184#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
1185/* Results of testing */
692#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0 1186#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
1187/* Number of packets received by FPGA */
693#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4 1188#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
1189/* Number of packets received by Siena filters */
694#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8 1190#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
695 1191
1192/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
1193#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
1194#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252
1195#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num))
1196#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0
1197#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1
1198#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
1199#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
1200
696 1201
697/***********************************/ 1202/***********************************/
698/* MC_CMD_CSR_READ32 1203/* MC_CMD_CSR_READ32
@@ -702,6 +1207,7 @@
702 1207
703/* MC_CMD_CSR_READ32_IN msgrequest */ 1208/* MC_CMD_CSR_READ32_IN msgrequest */
704#define MC_CMD_CSR_READ32_IN_LEN 12 1209#define MC_CMD_CSR_READ32_IN_LEN 12
1210/* Address */
705#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0 1211#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
706#define MC_CMD_CSR_READ32_IN_STEP_OFST 4 1212#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
707#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8 1213#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
@@ -710,6 +1216,7 @@
710#define MC_CMD_CSR_READ32_OUT_LENMIN 4 1216#define MC_CMD_CSR_READ32_OUT_LENMIN 4
711#define MC_CMD_CSR_READ32_OUT_LENMAX 252 1217#define MC_CMD_CSR_READ32_OUT_LENMAX 252
712#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num)) 1218#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
1219/* The last dword is the status, not a value read */
713#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0 1220#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
714#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4 1221#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
715#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1 1222#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
@@ -726,6 +1233,7 @@
726#define MC_CMD_CSR_WRITE32_IN_LENMIN 12 1233#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
727#define MC_CMD_CSR_WRITE32_IN_LENMAX 252 1234#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
728#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num)) 1235#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
1236/* Address */
729#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0 1237#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
730#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4 1238#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
731#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8 1239#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
@@ -739,6 +1247,48 @@
739 1247
740 1248
741/***********************************/ 1249/***********************************/
1250/* MC_CMD_HP
1251 * These commands are used for HP related features. They are grouped under one
1252 * MCDI command to avoid creating too many MCDI commands.
1253 */
1254#define MC_CMD_HP 0x54
1255
1256/* MC_CMD_HP_IN msgrequest */
1257#define MC_CMD_HP_IN_LEN 16
1258/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
1259 * the specified address with the specified interval.When address is NULL,
1260 * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
1261 * state / 2: (debug) Show temperature reported by one of the supported
1262 * sensors.
1263 */
1264#define MC_CMD_HP_IN_SUBCMD_OFST 0
1265/* enum: OCSD (Option Card Sensor Data) sub-command. */
1266#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
1267/* enum: Last known valid HP sub-command. */
1268#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
1269/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
1270 */
1271#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
1272#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
1273#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
1274#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
1275/* The requested update interval, in seconds. (Or the sub-command if ADDR is
1276 * NULL.)
1277 */
1278#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
1279
1280/* MC_CMD_HP_OUT msgresponse */
1281#define MC_CMD_HP_OUT_LEN 4
1282#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
1283/* enum: OCSD stopped for this card. */
1284#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
1285/* enum: OCSD was successfully started with the address provided. */
1286#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
1287/* enum: OCSD was already started for this card. */
1288#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
1289
1290
1291/***********************************/
742/* MC_CMD_STACKINFO 1292/* MC_CMD_STACKINFO
743 * Get stack information. 1293 * Get stack information.
744 */ 1294 */
@@ -751,6 +1301,7 @@
751#define MC_CMD_STACKINFO_OUT_LENMIN 12 1301#define MC_CMD_STACKINFO_OUT_LENMIN 12
752#define MC_CMD_STACKINFO_OUT_LENMAX 252 1302#define MC_CMD_STACKINFO_OUT_LENMAX 252
753#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num)) 1303#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
1304/* (thread ptr, stack size, free space) for each thread in system */
754#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0 1305#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
755#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12 1306#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
756#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1 1307#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
@@ -765,19 +1316,35 @@
765 1316
766/* MC_CMD_MDIO_READ_IN msgrequest */ 1317/* MC_CMD_MDIO_READ_IN msgrequest */
767#define MC_CMD_MDIO_READ_IN_LEN 16 1318#define MC_CMD_MDIO_READ_IN_LEN 16
1319/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
1320 * external devices.
1321 */
768#define MC_CMD_MDIO_READ_IN_BUS_OFST 0 1322#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
769#define MC_CMD_MDIO_BUS_INTERNAL 0x0 /* enum */ 1323/* enum: Internal. */
770#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 /* enum */ 1324#define MC_CMD_MDIO_BUS_INTERNAL 0x0
1325/* enum: External. */
1326#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
1327/* Port address */
771#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4 1328#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
1329/* Device Address or clause 22. */
772#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8 1330#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
773#define MC_CMD_MDIO_CLAUSE22 0x20 /* enum */ 1331/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
1332 * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
1333 */
1334#define MC_CMD_MDIO_CLAUSE22 0x20
1335/* Address */
774#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12 1336#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
775 1337
776/* MC_CMD_MDIO_READ_OUT msgresponse */ 1338/* MC_CMD_MDIO_READ_OUT msgresponse */
777#define MC_CMD_MDIO_READ_OUT_LEN 8 1339#define MC_CMD_MDIO_READ_OUT_LEN 8
1340/* Value */
778#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0 1341#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
1342/* Status the MDIO commands return the raw status bits from the MDIO block. A
1343 * "good" transaction should have the DONE bit set and all other bits clear.
1344 */
779#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4 1345#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
780#define MC_CMD_MDIO_STATUS_GOOD 0x8 /* enum */ 1346/* enum: Good. */
1347#define MC_CMD_MDIO_STATUS_GOOD 0x8
781 1348
782 1349
783/***********************************/ 1350/***********************************/
@@ -788,18 +1355,34 @@
788 1355
789/* MC_CMD_MDIO_WRITE_IN msgrequest */ 1356/* MC_CMD_MDIO_WRITE_IN msgrequest */
790#define MC_CMD_MDIO_WRITE_IN_LEN 20 1357#define MC_CMD_MDIO_WRITE_IN_LEN 20
1358/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
1359 * external devices.
1360 */
791#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0 1361#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
1362/* enum: Internal. */
792/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */ 1363/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
1364/* enum: External. */
793/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */ 1365/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
1366/* Port address */
794#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4 1367#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
1368/* Device Address or clause 22. */
795#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8 1369#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
1370/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
1371 * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
1372 */
796/* MC_CMD_MDIO_CLAUSE22 0x20 */ 1373/* MC_CMD_MDIO_CLAUSE22 0x20 */
1374/* Address */
797#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12 1375#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
1376/* Value */
798#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16 1377#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
799 1378
800/* MC_CMD_MDIO_WRITE_OUT msgresponse */ 1379/* MC_CMD_MDIO_WRITE_OUT msgresponse */
801#define MC_CMD_MDIO_WRITE_OUT_LEN 4 1380#define MC_CMD_MDIO_WRITE_OUT_LEN 4
1381/* Status; the MDIO commands return the raw status bits from the MDIO block. A
1382 * "good" transaction should have the DONE bit set and all other bits clear.
1383 */
802#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0 1384#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
1385/* enum: Good. */
803/* MC_CMD_MDIO_STATUS_GOOD 0x8 */ 1386/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
804 1387
805 1388
@@ -813,6 +1396,9 @@
813#define MC_CMD_DBI_WRITE_IN_LENMIN 12 1396#define MC_CMD_DBI_WRITE_IN_LENMIN 12
814#define MC_CMD_DBI_WRITE_IN_LENMAX 252 1397#define MC_CMD_DBI_WRITE_IN_LENMAX 252
815#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num)) 1398#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
1399/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
1400 * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
1401 */
816#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0 1402#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
817#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12 1403#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
818#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1 1404#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
@@ -826,9 +1412,15 @@
826#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0 1412#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
827#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0 1413#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
828#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32 1414#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
829#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST 4 1415#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
830#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_LBN 32 1416#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
831#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_WIDTH 32 1417#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
1418#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
1419#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
1420#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
1421#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
1422#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
1423#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
832#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8 1424#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
833#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64 1425#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
834#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32 1426#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
@@ -836,69 +1428,111 @@
836 1428
837/***********************************/ 1429/***********************************/
838/* MC_CMD_PORT_READ32 1430/* MC_CMD_PORT_READ32
839 * Read a 32-bit register from the indirect port register map. 1431 * Read a 32-bit register from the indirect port register map. The port to
1432 * access is implied by the Shared memory channel used.
840 */ 1433 */
841#define MC_CMD_PORT_READ32 0x14 1434#define MC_CMD_PORT_READ32 0x14
842 1435
843/* MC_CMD_PORT_READ32_IN msgrequest */ 1436/* MC_CMD_PORT_READ32_IN msgrequest */
844#define MC_CMD_PORT_READ32_IN_LEN 4 1437#define MC_CMD_PORT_READ32_IN_LEN 4
1438/* Address */
845#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0 1439#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
846 1440
847/* MC_CMD_PORT_READ32_OUT msgresponse */ 1441/* MC_CMD_PORT_READ32_OUT msgresponse */
848#define MC_CMD_PORT_READ32_OUT_LEN 8 1442#define MC_CMD_PORT_READ32_OUT_LEN 8
1443/* Value */
849#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0 1444#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
1445/* Status */
850#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4 1446#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
851 1447
852 1448
853/***********************************/ 1449/***********************************/
854/* MC_CMD_PORT_WRITE32 1450/* MC_CMD_PORT_WRITE32
855 * Write a 32-bit register to the indirect port register map. 1451 * Write a 32-bit register to the indirect port register map. The port to
1452 * access is implied by the Shared memory channel used.
856 */ 1453 */
857#define MC_CMD_PORT_WRITE32 0x15 1454#define MC_CMD_PORT_WRITE32 0x15
858 1455
859/* MC_CMD_PORT_WRITE32_IN msgrequest */ 1456/* MC_CMD_PORT_WRITE32_IN msgrequest */
860#define MC_CMD_PORT_WRITE32_IN_LEN 8 1457#define MC_CMD_PORT_WRITE32_IN_LEN 8
1458/* Address */
861#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0 1459#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
1460/* Value */
862#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4 1461#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
863 1462
864/* MC_CMD_PORT_WRITE32_OUT msgresponse */ 1463/* MC_CMD_PORT_WRITE32_OUT msgresponse */
865#define MC_CMD_PORT_WRITE32_OUT_LEN 4 1464#define MC_CMD_PORT_WRITE32_OUT_LEN 4
1465/* Status */
866#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0 1466#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
867 1467
868 1468
869/***********************************/ 1469/***********************************/
870/* MC_CMD_PORT_READ128 1470/* MC_CMD_PORT_READ128
871 * Read a 128-bit register from the indirect port register map. 1471 * Read a 128-bit register from the indirect port register map. The port to
1472 * access is implied by the Shared memory channel used.
872 */ 1473 */
873#define MC_CMD_PORT_READ128 0x16 1474#define MC_CMD_PORT_READ128 0x16
874 1475
875/* MC_CMD_PORT_READ128_IN msgrequest */ 1476/* MC_CMD_PORT_READ128_IN msgrequest */
876#define MC_CMD_PORT_READ128_IN_LEN 4 1477#define MC_CMD_PORT_READ128_IN_LEN 4
1478/* Address */
877#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0 1479#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
878 1480
879/* MC_CMD_PORT_READ128_OUT msgresponse */ 1481/* MC_CMD_PORT_READ128_OUT msgresponse */
880#define MC_CMD_PORT_READ128_OUT_LEN 20 1482#define MC_CMD_PORT_READ128_OUT_LEN 20
1483/* Value */
881#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0 1484#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
882#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16 1485#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
1486/* Status */
883#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16 1487#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
884 1488
885 1489
886/***********************************/ 1490/***********************************/
887/* MC_CMD_PORT_WRITE128 1491/* MC_CMD_PORT_WRITE128
888 * Write a 128-bit register to the indirect port register map. 1492 * Write a 128-bit register to the indirect port register map. The port to
1493 * access is implied by the Shared memory channel used.
889 */ 1494 */
890#define MC_CMD_PORT_WRITE128 0x17 1495#define MC_CMD_PORT_WRITE128 0x17
891 1496
892/* MC_CMD_PORT_WRITE128_IN msgrequest */ 1497/* MC_CMD_PORT_WRITE128_IN msgrequest */
893#define MC_CMD_PORT_WRITE128_IN_LEN 20 1498#define MC_CMD_PORT_WRITE128_IN_LEN 20
1499/* Address */
894#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0 1500#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
1501/* Value */
895#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4 1502#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
896#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16 1503#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
897 1504
898/* MC_CMD_PORT_WRITE128_OUT msgresponse */ 1505/* MC_CMD_PORT_WRITE128_OUT msgresponse */
899#define MC_CMD_PORT_WRITE128_OUT_LEN 4 1506#define MC_CMD_PORT_WRITE128_OUT_LEN 4
1507/* Status */
900#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0 1508#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
901 1509
1510/* MC_CMD_CAPABILITIES structuredef */
1511#define MC_CMD_CAPABILITIES_LEN 4
1512/* Small buf table. */
1513#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
1514#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
1515/* Turbo mode (for Maranello). */
1516#define MC_CMD_CAPABILITIES_TURBO_LBN 1
1517#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
1518/* Turbo mode active (for Maranello). */
1519#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
1520#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
1521/* PTP offload. */
1522#define MC_CMD_CAPABILITIES_PTP_LBN 3
1523#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
1524/* AOE mode. */
1525#define MC_CMD_CAPABILITIES_AOE_LBN 4
1526#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
1527/* AOE mode active. */
1528#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
1529#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
1530/* AOE mode active. */
1531#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
1532#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
1533#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
1534#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
1535
902 1536
903/***********************************/ 1537/***********************************/
904/* MC_CMD_GET_BOARD_CFG 1538/* MC_CMD_GET_BOARD_CFG
@@ -916,18 +1550,10 @@
916#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0 1550#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
917#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4 1551#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
918#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32 1552#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
1553/* See MC_CMD_CAPABILITIES */
919#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36 1554#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
920#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0x0 /* enum */ 1555/* See MC_CMD_CAPABILITIES */
921#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 0x1 /* enum */
922#define MC_CMD_CAPABILITIES_TURBO_LBN 0x1 /* enum */
923#define MC_CMD_CAPABILITIES_TURBO_WIDTH 0x1 /* enum */
924#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 0x2 /* enum */
925#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 0x1 /* enum */
926#define MC_CMD_CAPABILITIES_PTP_LBN 0x3 /* enum */
927#define MC_CMD_CAPABILITIES_PTP_WIDTH 0x1 /* enum */
928#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40 1556#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
929/* Enum values, see field(s): */
930/* CAPABILITIES_PORT0 */
931#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44 1557#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
932#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6 1558#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
933#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50 1559#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
@@ -936,6 +1562,11 @@
936#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60 1562#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
937#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64 1563#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
938#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68 1564#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
1565/* This field contains a 16-bit value for each of the types of NVRAM area. The
1566 * values are defined in the firmware/mc/platform/.c file for a specific board
1567 * type, but otherwise have no meaning to the MC; they are used by the driver
1568 * to manage selection of appropriate firmware updates.
1569 */
939#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72 1570#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
940#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2 1571#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
941#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12 1572#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
@@ -944,7 +1575,7 @@
944 1575
945/***********************************/ 1576/***********************************/
946/* MC_CMD_DBI_READX 1577/* MC_CMD_DBI_READX
947 * Read DBI register(s). 1578 * Read DBI register(s) -- extended functionality
948 */ 1579 */
949#define MC_CMD_DBI_READX 0x19 1580#define MC_CMD_DBI_READX 0x19
950 1581
@@ -952,6 +1583,7 @@
952#define MC_CMD_DBI_READX_IN_LENMIN 8 1583#define MC_CMD_DBI_READX_IN_LENMIN 8
953#define MC_CMD_DBI_READX_IN_LENMAX 248 1584#define MC_CMD_DBI_READX_IN_LENMAX 248
954#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num)) 1585#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
1586/* Each Read op consists of an address (offset 0), VF/CS2) */
955#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0 1587#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
956#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8 1588#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
957#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0 1589#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
@@ -963,11 +1595,27 @@
963#define MC_CMD_DBI_READX_OUT_LENMIN 4 1595#define MC_CMD_DBI_READX_OUT_LENMIN 4
964#define MC_CMD_DBI_READX_OUT_LENMAX 252 1596#define MC_CMD_DBI_READX_OUT_LENMAX 252
965#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num)) 1597#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
1598/* Value */
966#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0 1599#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
967#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4 1600#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
968#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1 1601#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
969#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63 1602#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
970 1603
1604/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
1605#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
1606#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
1607#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
1608#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
1609#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
1610#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
1611#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
1612#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
1613#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
1614#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
1615#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
1616#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
1617#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
1618
971 1619
972/***********************************/ 1620/***********************************/
973/* MC_CMD_SET_RAND_SEED 1621/* MC_CMD_SET_RAND_SEED
@@ -977,6 +1625,7 @@
977 1625
978/* MC_CMD_SET_RAND_SEED_IN msgrequest */ 1626/* MC_CMD_SET_RAND_SEED_IN msgrequest */
979#define MC_CMD_SET_RAND_SEED_IN_LEN 16 1627#define MC_CMD_SET_RAND_SEED_IN_LEN 16
1628/* Seed value. */
980#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0 1629#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
981#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16 1630#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
982 1631
@@ -986,7 +1635,7 @@
986 1635
987/***********************************/ 1636/***********************************/
988/* MC_CMD_LTSSM_HIST 1637/* MC_CMD_LTSSM_HIST
989 * Retrieve the history of the PCIE LTSSM. 1638 * Retrieve the history of the LTSSM, if the build supports it.
990 */ 1639 */
991#define MC_CMD_LTSSM_HIST 0x1b 1640#define MC_CMD_LTSSM_HIST 0x1b
992 1641
@@ -997,6 +1646,7 @@
997#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0 1646#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
998#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252 1647#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
999#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num)) 1648#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
1649/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
1000#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0 1650#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
1001#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4 1651#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
1002#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0 1652#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
@@ -1005,41 +1655,47 @@
1005 1655
1006/***********************************/ 1656/***********************************/
1007/* MC_CMD_DRV_ATTACH 1657/* MC_CMD_DRV_ATTACH
1008 * Inform MCPU that this port is managed on the host. 1658 * Inform MCPU that this port is managed on the host (i.e. driver active). For
1659 * Huntington, also request the preferred datapath firmware to use if possible
1660 * (it may not be possible for this request to be fulfilled; the driver must
1661 * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which
1662 * features are actually available). The FIRMWARE_ID field is ignored by older
1663 * platforms.
1009 */ 1664 */
1010#define MC_CMD_DRV_ATTACH 0x1c 1665#define MC_CMD_DRV_ATTACH 0x1c
1011 1666
1012/* MC_CMD_DRV_ATTACH_IN msgrequest */ 1667/* MC_CMD_DRV_ATTACH_IN msgrequest */
1013#define MC_CMD_DRV_ATTACH_IN_LEN 8 1668#define MC_CMD_DRV_ATTACH_IN_LEN 12
1669/* new state (0=detached, 1=attached) to set if UPDATE=1 */
1014#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 1670#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
1671/* 1 to set new state, or 0 to just report the existing state */
1015#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 1672#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
1673/* preferred datapath firmware (for Huntington; ignored for Siena) */
1674#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
1675/* enum: Prefer to use full featured firmware */
1676#define MC_CMD_FW_FULL_FEATURED 0x0
1677/* enum: Prefer to use firmware with fewer features but lower latency */
1678#define MC_CMD_FW_LOW_LATENCY 0x1
1016 1679
1017/* MC_CMD_DRV_ATTACH_OUT msgresponse */ 1680/* MC_CMD_DRV_ATTACH_OUT msgresponse */
1018#define MC_CMD_DRV_ATTACH_OUT_LEN 4 1681#define MC_CMD_DRV_ATTACH_OUT_LEN 4
1682/* previous or existing state (0=detached, 1=attached) */
1019#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0 1683#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
1020 1684
1021 1685/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
1022/***********************************/ 1686#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
1023/* MC_CMD_NCSI_PROD 1687/* previous or existing state (0=detached, 1=attached) */
1024 * Trigger an NC-SI event. 1688#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
1689/* Flags associated with this function */
1690#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
1691/* enum: Labels the lowest-numbered function visible to the OS */
1692#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
1693/* enum: The function can control the link state of the physical port it is
1694 * bound to.
1025 */ 1695 */
1026#define MC_CMD_NCSI_PROD 0x1d 1696#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
1027 1697/* enum: The function can perform privileged operations */
1028/* MC_CMD_NCSI_PROD_IN msgrequest */ 1698#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
1029#define MC_CMD_NCSI_PROD_IN_LEN 4
1030#define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0
1031#define MC_CMD_NCSI_PROD_LINKCHANGE 0x0 /* enum */
1032#define MC_CMD_NCSI_PROD_RESET 0x1 /* enum */
1033#define MC_CMD_NCSI_PROD_DRVATTACH 0x2 /* enum */
1034#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_LBN 0
1035#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_WIDTH 1
1036#define MC_CMD_NCSI_PROD_IN_RESET_LBN 1
1037#define MC_CMD_NCSI_PROD_IN_RESET_WIDTH 1
1038#define MC_CMD_NCSI_PROD_IN_DRVATTACH_LBN 2
1039#define MC_CMD_NCSI_PROD_IN_DRVATTACH_WIDTH 1
1040
1041/* MC_CMD_NCSI_PROD_OUT msgresponse */
1042#define MC_CMD_NCSI_PROD_OUT_LEN 0
1043 1699
1044 1700
1045/***********************************/ 1701/***********************************/
@@ -1050,6 +1706,7 @@
1050 1706
1051/* MC_CMD_SHMUART_IN msgrequest */ 1707/* MC_CMD_SHMUART_IN msgrequest */
1052#define MC_CMD_SHMUART_IN_LEN 4 1708#define MC_CMD_SHMUART_IN_LEN 4
1709/* ??? */
1053#define MC_CMD_SHMUART_IN_FLAG_OFST 0 1710#define MC_CMD_SHMUART_IN_FLAG_OFST 0
1054 1711
1055/* MC_CMD_SHMUART_OUT msgresponse */ 1712/* MC_CMD_SHMUART_OUT msgresponse */
@@ -1057,13 +1714,33 @@
1057 1714
1058 1715
1059/***********************************/ 1716/***********************************/
1717/* MC_CMD_PORT_RESET
1718 * Generic per-port reset. There is no equivalent for per-board reset. Locks
1719 * required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
1720 * use MC_CMD_ENTITY_RESET instead.
1721 */
1722#define MC_CMD_PORT_RESET 0x20
1723
1724/* MC_CMD_PORT_RESET_IN msgrequest */
1725#define MC_CMD_PORT_RESET_IN_LEN 0
1726
1727/* MC_CMD_PORT_RESET_OUT msgresponse */
1728#define MC_CMD_PORT_RESET_OUT_LEN 0
1729
1730
1731/***********************************/
1060/* MC_CMD_ENTITY_RESET 1732/* MC_CMD_ENTITY_RESET
1061 * Generic per-port reset. 1733 * Generic per-resource reset. There is no equivalent for per-board reset.
1734 * Locks required: None; Return code: 0, ETIME. NOTE: This command is an
1735 * extended version of the deprecated MC_CMD_PORT_RESET with added fields.
1062 */ 1736 */
1063#define MC_CMD_ENTITY_RESET 0x20 1737#define MC_CMD_ENTITY_RESET 0x20
1064 1738
1065/* MC_CMD_ENTITY_RESET_IN msgrequest */ 1739/* MC_CMD_ENTITY_RESET_IN msgrequest */
1066#define MC_CMD_ENTITY_RESET_IN_LEN 4 1740#define MC_CMD_ENTITY_RESET_IN_LEN 4
1741/* Optional flags field. Omitting this will perform a "legacy" reset action
1742 * (TBD).
1743 */
1067#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0 1744#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
1068#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0 1745#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
1069#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1 1746#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
@@ -1080,7 +1757,9 @@
1080 1757
1081/* MC_CMD_PCIE_CREDITS_IN msgrequest */ 1758/* MC_CMD_PCIE_CREDITS_IN msgrequest */
1082#define MC_CMD_PCIE_CREDITS_IN_LEN 8 1759#define MC_CMD_PCIE_CREDITS_IN_LEN 8
1760/* poll period. 0 is disabled */
1083#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0 1761#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
1762/* wipe statistics */
1084#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4 1763#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
1085 1764
1086/* MC_CMD_PCIE_CREDITS_OUT msgresponse */ 1765/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
@@ -1141,7 +1820,7 @@
1141 1820
1142/***********************************/ 1821/***********************************/
1143/* MC_CMD_PUTS 1822/* MC_CMD_PUTS
1144 * puts(3) implementation over MCDI 1823 * Copy the given ASCII string out onto UART and/or out of the network port.
1145 */ 1824 */
1146#define MC_CMD_PUTS 0x23 1825#define MC_CMD_PUTS 0x23
1147 1826
@@ -1167,7 +1846,8 @@
1167 1846
1168/***********************************/ 1847/***********************************/
1169/* MC_CMD_GET_PHY_CFG 1848/* MC_CMD_GET_PHY_CFG
1170 * Report PHY configuration. 1849 * Report PHY configuration. This guarantees to succeed even if the PHY is in a
1850 * 'zombie' state. Locks required: None
1171 */ 1851 */
1172#define MC_CMD_GET_PHY_CFG 0x24 1852#define MC_CMD_GET_PHY_CFG 0x24
1173 1853
@@ -1176,6 +1856,7 @@
1176 1856
1177/* MC_CMD_GET_PHY_CFG_OUT msgresponse */ 1857/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
1178#define MC_CMD_GET_PHY_CFG_OUT_LEN 72 1858#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
1859/* flags */
1179#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 1860#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
1180#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0 1861#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
1181#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1 1862#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
@@ -1191,7 +1872,9 @@
1191#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1 1872#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
1192#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6 1873#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
1193#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1 1874#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
1875/* ?? */
1194#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 1876#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
1877/* Bitmask of supported capabilities */
1195#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 1878#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
1196#define MC_CMD_PHY_CAP_10HDX_LBN 1 1879#define MC_CMD_PHY_CAP_10HDX_LBN 1
1197#define MC_CMD_PHY_CAP_10HDX_WIDTH 1 1880#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
@@ -1213,20 +1896,36 @@
1213#define MC_CMD_PHY_CAP_ASYM_WIDTH 1 1896#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
1214#define MC_CMD_PHY_CAP_AN_LBN 10 1897#define MC_CMD_PHY_CAP_AN_LBN 10
1215#define MC_CMD_PHY_CAP_AN_WIDTH 1 1898#define MC_CMD_PHY_CAP_AN_WIDTH 1
1899#define MC_CMD_PHY_CAP_40000FDX_LBN 11
1900#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
1901#define MC_CMD_PHY_CAP_DDM_LBN 12
1902#define MC_CMD_PHY_CAP_DDM_WIDTH 1
1903/* ?? */
1216#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12 1904#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
1905/* ?? */
1217#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16 1906#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
1907/* ?? */
1218#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20 1908#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
1909/* ?? */
1219#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24 1910#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
1220#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20 1911#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
1912/* ?? */
1221#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44 1913#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
1222#define MC_CMD_MEDIA_XAUI 0x1 /* enum */ 1914/* enum: Xaui. */
1223#define MC_CMD_MEDIA_CX4 0x2 /* enum */ 1915#define MC_CMD_MEDIA_XAUI 0x1
1224#define MC_CMD_MEDIA_KX4 0x3 /* enum */ 1916/* enum: CX4. */
1225#define MC_CMD_MEDIA_XFP 0x4 /* enum */ 1917#define MC_CMD_MEDIA_CX4 0x2
1226#define MC_CMD_MEDIA_SFP_PLUS 0x5 /* enum */ 1918/* enum: KX4. */
1227#define MC_CMD_MEDIA_BASE_T 0x6 /* enum */ 1919#define MC_CMD_MEDIA_KX4 0x3
1920/* enum: XFP Far. */
1921#define MC_CMD_MEDIA_XFP 0x4
1922/* enum: SFP+. */
1923#define MC_CMD_MEDIA_SFP_PLUS 0x5
1924/* enum: 10GBaseT. */
1925#define MC_CMD_MEDIA_BASE_T 0x6
1228#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 1926#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
1229#define MC_CMD_MMD_CLAUSE22 0x0 /* enum */ 1927/* enum: Native clause 22 */
1928#define MC_CMD_MMD_CLAUSE22 0x0
1230#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */ 1929#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
1231#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */ 1930#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
1232#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */ 1931#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
@@ -1234,7 +1933,8 @@
1234#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */ 1933#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
1235#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */ 1934#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
1236#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */ 1935#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
1237#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d /* enum */ 1936/* enum: Clause22 proxied over clause45 by PHY. */
1937#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d
1238#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */ 1938#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
1239#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */ 1939#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
1240#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52 1940#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
@@ -1243,18 +1943,31 @@
1243 1943
1244/***********************************/ 1944/***********************************/
1245/* MC_CMD_START_BIST 1945/* MC_CMD_START_BIST
1246 * Start a BIST test on the PHY. 1946 * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST
1947 * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
1247 */ 1948 */
1248#define MC_CMD_START_BIST 0x25 1949#define MC_CMD_START_BIST 0x25
1249 1950
1250/* MC_CMD_START_BIST_IN msgrequest */ 1951/* MC_CMD_START_BIST_IN msgrequest */
1251#define MC_CMD_START_BIST_IN_LEN 4 1952#define MC_CMD_START_BIST_IN_LEN 4
1953/* Type of test. */
1252#define MC_CMD_START_BIST_IN_TYPE_OFST 0 1954#define MC_CMD_START_BIST_IN_TYPE_OFST 0
1253#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 /* enum */ 1955/* enum: Run the PHY's short cable BIST. */
1254#define MC_CMD_PHY_BIST_CABLE_LONG 0x2 /* enum */ 1956#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
1255#define MC_CMD_BPX_SERDES_BIST 0x3 /* enum */ 1957/* enum: Run the PHY's long cable BIST. */
1256#define MC_CMD_MC_LOOPBACK_BIST 0x4 /* enum */ 1958#define MC_CMD_PHY_BIST_CABLE_LONG 0x2
1257#define MC_CMD_PHY_BIST 0x5 /* enum */ 1959/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */
1960#define MC_CMD_BPX_SERDES_BIST 0x3
1961/* enum: Run the MC loopback tests. */
1962#define MC_CMD_MC_LOOPBACK_BIST 0x4
1963/* enum: Run the PHY's standard BIST. */
1964#define MC_CMD_PHY_BIST 0x5
1965/* enum: Run MC RAM test. */
1966#define MC_CMD_MC_MEM_BIST 0x6
1967/* enum: Run Port RAM test. */
1968#define MC_CMD_PORT_MEM_BIST 0x7
1969/* enum: Run register test. */
1970#define MC_CMD_REG_BIST 0x8
1258 1971
1259/* MC_CMD_START_BIST_OUT msgresponse */ 1972/* MC_CMD_START_BIST_OUT msgresponse */
1260#define MC_CMD_START_BIST_OUT_LEN 0 1973#define MC_CMD_START_BIST_OUT_LEN 0
@@ -1262,7 +1975,12 @@
1262 1975
1263/***********************************/ 1976/***********************************/
1264/* MC_CMD_POLL_BIST 1977/* MC_CMD_POLL_BIST
1265 * Poll for BIST completion. 1978 * Poll for BIST completion. Returns a single status code, and optionally some
1979 * PHY specific bist output. The driver should only consume the BIST output
1980 * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't
1981 * successfully parse the BIST output, it should still respect the pass/Fail in
1982 * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0,
1983 * EACCES (if PHY_LOCK is not held).
1266 */ 1984 */
1267#define MC_CMD_POLL_BIST 0x26 1985#define MC_CMD_POLL_BIST 0x26
1268 1986
@@ -1271,15 +1989,21 @@
1271 1989
1272/* MC_CMD_POLL_BIST_OUT msgresponse */ 1990/* MC_CMD_POLL_BIST_OUT msgresponse */
1273#define MC_CMD_POLL_BIST_OUT_LEN 8 1991#define MC_CMD_POLL_BIST_OUT_LEN 8
1992/* result */
1274#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 1993#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
1275#define MC_CMD_POLL_BIST_RUNNING 0x1 /* enum */ 1994/* enum: Running. */
1276#define MC_CMD_POLL_BIST_PASSED 0x2 /* enum */ 1995#define MC_CMD_POLL_BIST_RUNNING 0x1
1277#define MC_CMD_POLL_BIST_FAILED 0x3 /* enum */ 1996/* enum: Passed. */
1278#define MC_CMD_POLL_BIST_TIMEOUT 0x4 /* enum */ 1997#define MC_CMD_POLL_BIST_PASSED 0x2
1998/* enum: Failed. */
1999#define MC_CMD_POLL_BIST_FAILED 0x3
2000/* enum: Timed-out. */
2001#define MC_CMD_POLL_BIST_TIMEOUT 0x4
1279#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 2002#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
1280 2003
1281/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */ 2004/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
1282#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36 2005#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
2006/* result */
1283/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ 2007/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
1284/* Enum values, see field(s): */ 2008/* Enum values, see field(s): */
1285/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ 2009/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
@@ -1287,42 +2011,116 @@
1287#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8 2011#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
1288#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12 2012#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
1289#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16 2013#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
2014/* Status of each channel A */
1290#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20 2015#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
1291#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 /* enum */ 2016/* enum: Ok. */
1292#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2 /* enum */ 2017#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
1293#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3 /* enum */ 2018/* enum: Open. */
1294#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4 /* enum */ 2019#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2
1295#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 /* enum */ 2020/* enum: Intra-pair short. */
2021#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3
2022/* enum: Inter-pair short. */
2023#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4
2024/* enum: Busy. */
2025#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
2026/* Status of each channel B */
1296#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24 2027#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
1297/* Enum values, see field(s): */ 2028/* Enum values, see field(s): */
1298/* CABLE_STATUS_A */ 2029/* CABLE_STATUS_A */
2030/* Status of each channel C */
1299#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28 2031#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
1300/* Enum values, see field(s): */ 2032/* Enum values, see field(s): */
1301/* CABLE_STATUS_A */ 2033/* CABLE_STATUS_A */
2034/* Status of each channel D */
1302#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32 2035#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
1303/* Enum values, see field(s): */ 2036/* Enum values, see field(s): */
1304/* CABLE_STATUS_A */ 2037/* CABLE_STATUS_A */
1305 2038
1306/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */ 2039/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
1307#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 2040#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
2041/* result */
1308/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ 2042/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
1309/* Enum values, see field(s): */ 2043/* Enum values, see field(s): */
1310/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ 2044/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
1311#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4 2045#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
1312#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 /* enum */ 2046/* enum: Complete. */
1313#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1 /* enum */ 2047#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
1314#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2 /* enum */ 2048/* enum: Bus switch off I2C write. */
1315#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3 /* enum */ 2049#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1
1316#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4 /* enum */ 2050/* enum: Bus switch off I2C no access IO exp. */
1317#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5 /* enum */ 2051#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2
1318#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6 /* enum */ 2052/* enum: Bus switch off I2C no access module. */
1319#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7 /* enum */ 2053#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3
1320#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8 /* enum */ 2054/* enum: IO exp I2C configure. */
2055#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4
2056/* enum: Bus switch I2C no cross talk. */
2057#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5
2058/* enum: Module presence. */
2059#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6
2060/* enum: Module ID I2C access. */
2061#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7
2062/* enum: Module ID sane value. */
2063#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8
2064
2065/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */
2066#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
2067/* result */
2068/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
2069/* Enum values, see field(s): */
2070/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
2071#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
2072/* enum: Test has completed. */
2073#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
2074/* enum: RAM test - walk ones. */
2075#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1
2076/* enum: RAM test - walk zeros. */
2077#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2
2078/* enum: RAM test - walking inversions zeros/ones. */
2079#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3
2080/* enum: RAM test - walking inversions checkerboard. */
2081#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4
2082/* enum: Register test - set / clear individual bits. */
2083#define MC_CMD_POLL_BIST_MEM_REG 0x5
2084/* enum: ECC error detected. */
2085#define MC_CMD_POLL_BIST_MEM_ECC 0x6
2086/* Failure address, only valid if result is POLL_BIST_FAILED */
2087#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
2088/* Bus or address space to which the failure address corresponds */
2089#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
2090/* enum: MC MIPS bus. */
2091#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
2092/* enum: CSR IREG bus. */
2093#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1
2094/* enum: RX DPCPU bus. */
2095#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2
2096/* enum: TX0 DPCPU bus. */
2097#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3
2098/* enum: TX1 DPCPU bus. */
2099#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4
2100/* enum: RX DICPU bus. */
2101#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5
2102/* enum: TX DICPU bus. */
2103#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6
2104/* Pattern written to RAM / register */
2105#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
2106/* Actual value read from RAM / register */
2107#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
2108/* ECC error mask */
2109#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
2110/* ECC parity error mask */
2111#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
2112/* ECC fatal error mask */
2113#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
1321 2114
1322 2115
1323/***********************************/ 2116/***********************************/
1324/* MC_CMD_FLUSH_RX_QUEUES 2117/* MC_CMD_FLUSH_RX_QUEUES
1325 * Flush receive queue(s). 2118 * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
2119 * flushes should be initiated via this MCDI operation, rather than via
2120 * directly writing FLUSH_CMD.
2121 *
2122 * The flush is completed (either done/fail) asynchronously (after this command
2123 * returns). The driver must still wait for flush done/failure events as usual.
1326 */ 2124 */
1327#define MC_CMD_FLUSH_RX_QUEUES 0x27 2125#define MC_CMD_FLUSH_RX_QUEUES 0x27
1328 2126
@@ -1341,7 +2139,7 @@
1341 2139
1342/***********************************/ 2140/***********************************/
1343/* MC_CMD_GET_LOOPBACK_MODES 2141/* MC_CMD_GET_LOOPBACK_MODES
1344 * Get port's loopback modes. 2142 * Returns a bitmask of loopback modes available at each speed.
1345 */ 2143 */
1346#define MC_CMD_GET_LOOPBACK_MODES 0x28 2144#define MC_CMD_GET_LOOPBACK_MODES 0x28
1347 2145
@@ -1349,61 +2147,116 @@
1349#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0 2147#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
1350 2148
1351/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */ 2149/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
1352#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32 2150#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
2151/* Supported loopbacks. */
1353#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0 2152#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
1354#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8 2153#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
1355#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0 2154#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
1356#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4 2155#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
1357#define MC_CMD_LOOPBACK_NONE 0x0 /* enum */ 2156/* enum: None. */
1358#define MC_CMD_LOOPBACK_DATA 0x1 /* enum */ 2157#define MC_CMD_LOOPBACK_NONE 0x0
1359#define MC_CMD_LOOPBACK_GMAC 0x2 /* enum */ 2158/* enum: Data. */
1360#define MC_CMD_LOOPBACK_XGMII 0x3 /* enum */ 2159#define MC_CMD_LOOPBACK_DATA 0x1
1361#define MC_CMD_LOOPBACK_XGXS 0x4 /* enum */ 2160/* enum: GMAC. */
1362#define MC_CMD_LOOPBACK_XAUI 0x5 /* enum */ 2161#define MC_CMD_LOOPBACK_GMAC 0x2
1363#define MC_CMD_LOOPBACK_GMII 0x6 /* enum */ 2162/* enum: XGMII. */
1364#define MC_CMD_LOOPBACK_SGMII 0x7 /* enum */ 2163#define MC_CMD_LOOPBACK_XGMII 0x3
1365#define MC_CMD_LOOPBACK_XGBR 0x8 /* enum */ 2164/* enum: XGXS. */
1366#define MC_CMD_LOOPBACK_XFI 0x9 /* enum */ 2165#define MC_CMD_LOOPBACK_XGXS 0x4
1367#define MC_CMD_LOOPBACK_XAUI_FAR 0xa /* enum */ 2166/* enum: XAUI. */
1368#define MC_CMD_LOOPBACK_GMII_FAR 0xb /* enum */ 2167#define MC_CMD_LOOPBACK_XAUI 0x5
1369#define MC_CMD_LOOPBACK_SGMII_FAR 0xc /* enum */ 2168/* enum: GMII. */
1370#define MC_CMD_LOOPBACK_XFI_FAR 0xd /* enum */ 2169#define MC_CMD_LOOPBACK_GMII 0x6
1371#define MC_CMD_LOOPBACK_GPHY 0xe /* enum */ 2170/* enum: SGMII. */
1372#define MC_CMD_LOOPBACK_PHYXS 0xf /* enum */ 2171#define MC_CMD_LOOPBACK_SGMII 0x7
1373#define MC_CMD_LOOPBACK_PCS 0x10 /* enum */ 2172/* enum: XGBR. */
1374#define MC_CMD_LOOPBACK_PMAPMD 0x11 /* enum */ 2173#define MC_CMD_LOOPBACK_XGBR 0x8
1375#define MC_CMD_LOOPBACK_XPORT 0x12 /* enum */ 2174/* enum: XFI. */
1376#define MC_CMD_LOOPBACK_XGMII_WS 0x13 /* enum */ 2175#define MC_CMD_LOOPBACK_XFI 0x9
1377#define MC_CMD_LOOPBACK_XAUI_WS 0x14 /* enum */ 2176/* enum: XAUI Far. */
1378#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 /* enum */ 2177#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
1379#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 /* enum */ 2178/* enum: GMII Far. */
1380#define MC_CMD_LOOPBACK_GMII_WS 0x17 /* enum */ 2179#define MC_CMD_LOOPBACK_GMII_FAR 0xb
1381#define MC_CMD_LOOPBACK_XFI_WS 0x18 /* enum */ 2180/* enum: SGMII Far. */
1382#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 /* enum */ 2181#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
1383#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a /* enum */ 2182/* enum: XFI Far. */
2183#define MC_CMD_LOOPBACK_XFI_FAR 0xd
2184/* enum: GPhy. */
2185#define MC_CMD_LOOPBACK_GPHY 0xe
2186/* enum: PhyXS. */
2187#define MC_CMD_LOOPBACK_PHYXS 0xf
2188/* enum: PCS. */
2189#define MC_CMD_LOOPBACK_PCS 0x10
2190/* enum: PMA-PMD. */
2191#define MC_CMD_LOOPBACK_PMAPMD 0x11
2192/* enum: Cross-Port. */
2193#define MC_CMD_LOOPBACK_XPORT 0x12
2194/* enum: XGMII-Wireside. */
2195#define MC_CMD_LOOPBACK_XGMII_WS 0x13
2196/* enum: XAUI Wireside. */
2197#define MC_CMD_LOOPBACK_XAUI_WS 0x14
2198/* enum: XAUI Wireside Far. */
2199#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
2200/* enum: XAUI Wireside near. */
2201#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
2202/* enum: GMII Wireside. */
2203#define MC_CMD_LOOPBACK_GMII_WS 0x17
2204/* enum: XFI Wireside. */
2205#define MC_CMD_LOOPBACK_XFI_WS 0x18
2206/* enum: XFI Wireside Far. */
2207#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
2208/* enum: PhyXS Wireside. */
2209#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
2210/* enum: PMA lanes MAC-Serdes. */
2211#define MC_CMD_LOOPBACK_PMA_INT 0x1b
2212/* enum: KR Serdes Parallel (Encoder). */
2213#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
2214/* enum: KR Serdes Serial. */
2215#define MC_CMD_LOOPBACK_SD_FAR 0x1d
2216/* enum: PMA lanes MAC-Serdes Wireside. */
2217#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
2218/* enum: KR Serdes Parallel Wireside (Full PCS). */
2219#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
2220/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
2221#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
2222/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
2223#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
2224/* enum: KR Serdes Serial Wireside. */
2225#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
2226/* Supported loopbacks. */
1384#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8 2227#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
1385#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8 2228#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
1386#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8 2229#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
1387#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12 2230#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
1388/* Enum values, see field(s): */ 2231/* Enum values, see field(s): */
1389/* 100M */ 2232/* 100M */
2233/* Supported loopbacks. */
1390#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16 2234#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
1391#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8 2235#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
1392#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16 2236#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
1393#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20 2237#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
1394/* Enum values, see field(s): */ 2238/* Enum values, see field(s): */
1395/* 100M */ 2239/* 100M */
2240/* Supported loopbacks. */
1396#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24 2241#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
1397#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8 2242#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
1398#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24 2243#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
1399#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28 2244#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
1400/* Enum values, see field(s): */ 2245/* Enum values, see field(s): */
1401/* 100M */ 2246/* 100M */
2247/* Supported loopbacks. */
2248#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
2249#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
2250#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
2251#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
2252/* Enum values, see field(s): */
2253/* 100M */
1402 2254
1403 2255
1404/***********************************/ 2256/***********************************/
1405/* MC_CMD_GET_LINK 2257/* MC_CMD_GET_LINK
1406 * Read the unified MAC/PHY link state. 2258 * Read the unified MAC/PHY link state. Locks required: None Return code: 0,
2259 * ETIME.
1407 */ 2260 */
1408#define MC_CMD_GET_LINK 0x29 2261#define MC_CMD_GET_LINK 0x29
1409 2262
@@ -1412,9 +2265,15 @@
1412 2265
1413/* MC_CMD_GET_LINK_OUT msgresponse */ 2266/* MC_CMD_GET_LINK_OUT msgresponse */
1414#define MC_CMD_GET_LINK_OUT_LEN 28 2267#define MC_CMD_GET_LINK_OUT_LEN 28
2268/* near-side advertised capabilities */
1415#define MC_CMD_GET_LINK_OUT_CAP_OFST 0 2269#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
2270/* link-partner advertised capabilities */
1416#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 2271#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
2272/* Autonegotiated speed in mbit/s. The link may still be down even if this
2273 * reads non-zero.
2274 */
1417#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 2275#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
2276/* Current loopback setting. */
1418#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 2277#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
1419/* Enum values, see field(s): */ 2278/* Enum values, see field(s): */
1420/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ 2279/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
@@ -1427,10 +2286,14 @@
1427#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1 2286#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
1428#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3 2287#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
1429#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1 2288#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
2289/* This returns the negotiated flow control value. */
1430#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 2290#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
1431#define MC_CMD_FCNTL_OFF 0x0 /* enum */ 2291/* enum: Flow control is off. */
1432#define MC_CMD_FCNTL_RESPOND 0x1 /* enum */ 2292#define MC_CMD_FCNTL_OFF 0x0
1433#define MC_CMD_FCNTL_BIDIR 0x2 /* enum */ 2293/* enum: Respond to flow control. */
2294#define MC_CMD_FCNTL_RESPOND 0x1
2295/* enum: Respond to and Issue flow control. */
2296#define MC_CMD_FCNTL_BIDIR 0x2
1434#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 2297#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
1435#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 2298#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
1436#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 2299#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
@@ -1444,13 +2307,16 @@
1444 2307
1445/***********************************/ 2308/***********************************/
1446/* MC_CMD_SET_LINK 2309/* MC_CMD_SET_LINK
1447 * Write the unified MAC/PHY link configuration. 2310 * Write the unified MAC/PHY link configuration. Locks required: None. Return
2311 * code: 0, EINVAL, ETIME
1448 */ 2312 */
1449#define MC_CMD_SET_LINK 0x2a 2313#define MC_CMD_SET_LINK 0x2a
1450 2314
1451/* MC_CMD_SET_LINK_IN msgrequest */ 2315/* MC_CMD_SET_LINK_IN msgrequest */
1452#define MC_CMD_SET_LINK_IN_LEN 16 2316#define MC_CMD_SET_LINK_IN_LEN 16
2317/* ??? */
1453#define MC_CMD_SET_LINK_IN_CAP_OFST 0 2318#define MC_CMD_SET_LINK_IN_CAP_OFST 0
2319/* Flags */
1454#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4 2320#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
1455#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0 2321#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
1456#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1 2322#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
@@ -1458,9 +2324,13 @@
1458#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1 2324#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
1459#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2 2325#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
1460#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1 2326#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
2327/* Loopback mode. */
1461#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8 2328#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
1462/* Enum values, see field(s): */ 2329/* Enum values, see field(s): */
1463/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ 2330/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
2331/* A loopback speed of "0" is supported, and means (choose any available
2332 * speed).
2333 */
1464#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12 2334#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
1465 2335
1466/* MC_CMD_SET_LINK_OUT msgresponse */ 2336/* MC_CMD_SET_LINK_OUT msgresponse */
@@ -1469,12 +2339,13 @@
1469 2339
1470/***********************************/ 2340/***********************************/
1471/* MC_CMD_SET_ID_LED 2341/* MC_CMD_SET_ID_LED
1472 * Set indentification LED state. 2342 * Set identification LED state. Locks required: None. Return code: 0, EINVAL
1473 */ 2343 */
1474#define MC_CMD_SET_ID_LED 0x2b 2344#define MC_CMD_SET_ID_LED 0x2b
1475 2345
1476/* MC_CMD_SET_ID_LED_IN msgrequest */ 2346/* MC_CMD_SET_ID_LED_IN msgrequest */
1477#define MC_CMD_SET_ID_LED_IN_LEN 4 2347#define MC_CMD_SET_ID_LED_IN_LEN 4
2348/* Set LED state. */
1478#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0 2349#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
1479#define MC_CMD_LED_OFF 0x0 /* enum */ 2350#define MC_CMD_LED_OFF 0x0 /* enum */
1480#define MC_CMD_LED_ON 0x1 /* enum */ 2351#define MC_CMD_LED_ON 0x1 /* enum */
@@ -1486,12 +2357,15 @@
1486 2357
1487/***********************************/ 2358/***********************************/
1488/* MC_CMD_SET_MAC 2359/* MC_CMD_SET_MAC
1489 * Set MAC configuration. 2360 * Set MAC configuration. Locks required: None. Return code: 0, EINVAL
1490 */ 2361 */
1491#define MC_CMD_SET_MAC 0x2c 2362#define MC_CMD_SET_MAC 0x2c
1492 2363
1493/* MC_CMD_SET_MAC_IN msgrequest */ 2364/* MC_CMD_SET_MAC_IN msgrequest */
1494#define MC_CMD_SET_MAC_IN_LEN 24 2365#define MC_CMD_SET_MAC_IN_LEN 24
2366/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
2367 * EtherII, VLAN, bug16011 padding).
2368 */
1495#define MC_CMD_SET_MAC_IN_MTU_OFST 0 2369#define MC_CMD_SET_MAC_IN_MTU_OFST 0
1496#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4 2370#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
1497#define MC_CMD_SET_MAC_IN_ADDR_OFST 8 2371#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
@@ -1504,10 +2378,14 @@
1504#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1 2378#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
1505#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 2379#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
1506#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 2380#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
2381/* enum: Flow control is off. */
1507/* MC_CMD_FCNTL_OFF 0x0 */ 2382/* MC_CMD_FCNTL_OFF 0x0 */
2383/* enum: Respond to flow control. */
1508/* MC_CMD_FCNTL_RESPOND 0x1 */ 2384/* MC_CMD_FCNTL_RESPOND 0x1 */
2385/* enum: Respond to and Issue flow control. */
1509/* MC_CMD_FCNTL_BIDIR 0x2 */ 2386/* MC_CMD_FCNTL_BIDIR 0x2 */
1510#define MC_CMD_FCNTL_AUTO 0x3 /* enum */ 2387/* enum: Auto neg flow control. */
2388#define MC_CMD_FCNTL_AUTO 0x3
1511 2389
1512/* MC_CMD_SET_MAC_OUT msgresponse */ 2390/* MC_CMD_SET_MAC_OUT msgresponse */
1513#define MC_CMD_SET_MAC_OUT_LEN 0 2391#define MC_CMD_SET_MAC_OUT_LEN 0
@@ -1515,12 +2393,18 @@
1515 2393
1516/***********************************/ 2394/***********************************/
1517/* MC_CMD_PHY_STATS 2395/* MC_CMD_PHY_STATS
1518 * Get generic PHY statistics. 2396 * Get generic PHY statistics. This call returns the statistics for a generic
2397 * PHY in a sparse array (indexed by the enumerate). Each value is represented
2398 * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the
2399 * statistics may be read from the message response. If DMA_ADDR != 0, then the
2400 * statistics are dmad to that (page-aligned location). Locks required: None.
2401 * Returns: 0, ETIME
1519 */ 2402 */
1520#define MC_CMD_PHY_STATS 0x2d 2403#define MC_CMD_PHY_STATS 0x2d
1521 2404
1522/* MC_CMD_PHY_STATS_IN msgrequest */ 2405/* MC_CMD_PHY_STATS_IN msgrequest */
1523#define MC_CMD_PHY_STATS_IN_LEN 8 2406#define MC_CMD_PHY_STATS_IN_LEN 8
2407/* ??? */
1524#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0 2408#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
1525#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8 2409#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
1526#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0 2410#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
@@ -1534,40 +2418,71 @@
1534#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0 2418#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
1535#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4 2419#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
1536#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS 2420#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
1537#define MC_CMD_OUI 0x0 /* enum */ 2421/* enum: OUI. */
1538#define MC_CMD_PMA_PMD_LINK_UP 0x1 /* enum */ 2422#define MC_CMD_OUI 0x0
1539#define MC_CMD_PMA_PMD_RX_FAULT 0x2 /* enum */ 2423/* enum: PMA-PMD Link Up. */
1540#define MC_CMD_PMA_PMD_TX_FAULT 0x3 /* enum */ 2424#define MC_CMD_PMA_PMD_LINK_UP 0x1
1541#define MC_CMD_PMA_PMD_SIGNAL 0x4 /* enum */ 2425/* enum: PMA-PMD RX Fault. */
1542#define MC_CMD_PMA_PMD_SNR_A 0x5 /* enum */ 2426#define MC_CMD_PMA_PMD_RX_FAULT 0x2
1543#define MC_CMD_PMA_PMD_SNR_B 0x6 /* enum */ 2427/* enum: PMA-PMD TX Fault. */
1544#define MC_CMD_PMA_PMD_SNR_C 0x7 /* enum */ 2428#define MC_CMD_PMA_PMD_TX_FAULT 0x3
1545#define MC_CMD_PMA_PMD_SNR_D 0x8 /* enum */ 2429/* enum: PMA-PMD Signal */
1546#define MC_CMD_PCS_LINK_UP 0x9 /* enum */ 2430#define MC_CMD_PMA_PMD_SIGNAL 0x4
1547#define MC_CMD_PCS_RX_FAULT 0xa /* enum */ 2431/* enum: PMA-PMD SNR A. */
1548#define MC_CMD_PCS_TX_FAULT 0xb /* enum */ 2432#define MC_CMD_PMA_PMD_SNR_A 0x5
1549#define MC_CMD_PCS_BER 0xc /* enum */ 2433/* enum: PMA-PMD SNR B. */
1550#define MC_CMD_PCS_BLOCK_ERRORS 0xd /* enum */ 2434#define MC_CMD_PMA_PMD_SNR_B 0x6
1551#define MC_CMD_PHYXS_LINK_UP 0xe /* enum */ 2435/* enum: PMA-PMD SNR C. */
1552#define MC_CMD_PHYXS_RX_FAULT 0xf /* enum */ 2436#define MC_CMD_PMA_PMD_SNR_C 0x7
1553#define MC_CMD_PHYXS_TX_FAULT 0x10 /* enum */ 2437/* enum: PMA-PMD SNR D. */
1554#define MC_CMD_PHYXS_ALIGN 0x11 /* enum */ 2438#define MC_CMD_PMA_PMD_SNR_D 0x8
1555#define MC_CMD_PHYXS_SYNC 0x12 /* enum */ 2439/* enum: PCS Link Up. */
1556#define MC_CMD_AN_LINK_UP 0x13 /* enum */ 2440#define MC_CMD_PCS_LINK_UP 0x9
1557#define MC_CMD_AN_COMPLETE 0x14 /* enum */ 2441/* enum: PCS RX Fault. */
1558#define MC_CMD_AN_10GBT_STATUS 0x15 /* enum */ 2442#define MC_CMD_PCS_RX_FAULT 0xa
1559#define MC_CMD_CL22_LINK_UP 0x16 /* enum */ 2443/* enum: PCS TX Fault. */
1560#define MC_CMD_PHY_NSTATS 0x17 /* enum */ 2444#define MC_CMD_PCS_TX_FAULT 0xb
2445/* enum: PCS BER. */
2446#define MC_CMD_PCS_BER 0xc
2447/* enum: PCS Block Errors. */
2448#define MC_CMD_PCS_BLOCK_ERRORS 0xd
2449/* enum: PhyXS Link Up. */
2450#define MC_CMD_PHYXS_LINK_UP 0xe
2451/* enum: PhyXS RX Fault. */
2452#define MC_CMD_PHYXS_RX_FAULT 0xf
2453/* enum: PhyXS TX Fault. */
2454#define MC_CMD_PHYXS_TX_FAULT 0x10
2455/* enum: PhyXS Align. */
2456#define MC_CMD_PHYXS_ALIGN 0x11
2457/* enum: PhyXS Sync. */
2458#define MC_CMD_PHYXS_SYNC 0x12
2459/* enum: AN link-up. */
2460#define MC_CMD_AN_LINK_UP 0x13
2461/* enum: AN Complete. */
2462#define MC_CMD_AN_COMPLETE 0x14
2463/* enum: AN 10GBaseT Status. */
2464#define MC_CMD_AN_10GBT_STATUS 0x15
2465/* enum: Clause 22 Link-Up. */
2466#define MC_CMD_CL22_LINK_UP 0x16
2467/* enum: (Last entry) */
2468#define MC_CMD_PHY_NSTATS 0x17
1561 2469
1562 2470
1563/***********************************/ 2471/***********************************/
1564/* MC_CMD_MAC_STATS 2472/* MC_CMD_MAC_STATS
1565 * Get generic MAC statistics. 2473 * Get generic MAC statistics. This call returns unified statistics maintained
2474 * by the MC as it switches between the GMAC and XMAC. The MC will write out
2475 * all supported stats. The driver should zero initialise the buffer to
2476 * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
2477 * performed, and the statistics may be read from the message response. If
2478 * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
2479 * Locks required: None. Returns: 0, ETIME
1566 */ 2480 */
1567#define MC_CMD_MAC_STATS 0x2e 2481#define MC_CMD_MAC_STATS 0x2e
1568 2482
1569/* MC_CMD_MAC_STATS_IN msgrequest */ 2483/* MC_CMD_MAC_STATS_IN msgrequest */
1570#define MC_CMD_MAC_STATS_IN_LEN 16 2484#define MC_CMD_MAC_STATS_IN_LEN 16
2485/* ??? */
1571#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0 2486#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
1572#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8 2487#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
1573#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0 2488#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
@@ -1684,6 +2599,7 @@
1684 2599
1685/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */ 2600/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
1686#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32 2601#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
2602/* this is only used for the first record */
1687#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0 2603#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
1688#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0 2604#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
1689#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32 2605#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
@@ -1713,7 +2629,23 @@
1713 2629
1714/***********************************/ 2630/***********************************/
1715/* MC_CMD_MEMCPY 2631/* MC_CMD_MEMCPY
1716 * Perform memory copy operation. 2632 * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
2633 * embedded directly in the command.
2634 *
2635 * A common pattern is for a client to use generation counts to signal a dma
2636 * update of a datastructure. To facilitate this, this MCDI operation can
2637 * contain multiple requests which are executed in strict order. Requests take
2638 * the form of duplicating the entire MCDI request continuously (including the
2639 * requests record, which is ignored in all but the first structure)
2640 *
2641 * The source data can either come from a DMA from the host, or it can be
2642 * embedded within the request directly, thereby eliminating a DMA read. To
2643 * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
2644 * ADDR_LO=offset, and inserts the data at %offset from the start of the
2645 * payload. It's the callers responsibility to ensure that the embedded data
2646 * doesn't overlap the records.
2647 *
2648 * Returns: 0, EINVAL (invalid RID)
1717 */ 2649 */
1718#define MC_CMD_MEMCPY 0x31 2650#define MC_CMD_MEMCPY 0x31
1719 2651
@@ -1721,6 +2653,7 @@
1721#define MC_CMD_MEMCPY_IN_LENMIN 32 2653#define MC_CMD_MEMCPY_IN_LENMIN 32
1722#define MC_CMD_MEMCPY_IN_LENMAX 224 2654#define MC_CMD_MEMCPY_IN_LENMAX 224
1723#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num)) 2655#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
2656/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
1724#define MC_CMD_MEMCPY_IN_RECORD_OFST 0 2657#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
1725#define MC_CMD_MEMCPY_IN_RECORD_LEN 32 2658#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
1726#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1 2659#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
@@ -1741,14 +2674,22 @@
1741#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 2674#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
1742#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */ 2675#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
1743#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */ 2676#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
2677/* A type value of 1 is unused. */
1744#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 2678#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
1745#define MC_CMD_WOL_TYPE_MAGIC 0x0 /* enum */ 2679/* enum: Magic */
1746#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 /* enum */ 2680#define MC_CMD_WOL_TYPE_MAGIC 0x0
1747#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 /* enum */ 2681/* enum: MS Windows Magic */
1748#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 /* enum */ 2682#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
1749#define MC_CMD_WOL_TYPE_BITMAP 0x5 /* enum */ 2683/* enum: IPv4 Syn */
1750#define MC_CMD_WOL_TYPE_LINK 0x6 /* enum */ 2684#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
1751#define MC_CMD_WOL_TYPE_MAX 0x7 /* enum */ 2685/* enum: IPv6 Syn */
2686#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
2687/* enum: Bitmap */
2688#define MC_CMD_WOL_TYPE_BITMAP 0x5
2689/* enum: Link */
2690#define MC_CMD_WOL_TYPE_LINK 0x6
2691/* enum: (Above this for future use) */
2692#define MC_CMD_WOL_TYPE_MAX 0x7
1752#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8 2693#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
1753#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4 2694#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
1754#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46 2695#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
@@ -1818,7 +2759,7 @@
1818 2759
1819/***********************************/ 2760/***********************************/
1820/* MC_CMD_WOL_FILTER_REMOVE 2761/* MC_CMD_WOL_FILTER_REMOVE
1821 * Remove a WoL filter. 2762 * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS
1822 */ 2763 */
1823#define MC_CMD_WOL_FILTER_REMOVE 0x33 2764#define MC_CMD_WOL_FILTER_REMOVE 0x33
1824 2765
@@ -1832,7 +2773,8 @@
1832 2773
1833/***********************************/ 2774/***********************************/
1834/* MC_CMD_WOL_FILTER_RESET 2775/* MC_CMD_WOL_FILTER_RESET
1835 * Reset (i.e. remove all) WoL filters. 2776 * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0,
2777 * ENOSYS
1836 */ 2778 */
1837#define MC_CMD_WOL_FILTER_RESET 0x34 2779#define MC_CMD_WOL_FILTER_RESET 0x34
1838 2780
@@ -1848,7 +2790,7 @@
1848 2790
1849/***********************************/ 2791/***********************************/
1850/* MC_CMD_SET_MCAST_HASH 2792/* MC_CMD_SET_MCAST_HASH
1851 * Set the MCASH hash value. 2793 * Set the MCAST hash value without otherwise reconfiguring the MAC
1852 */ 2794 */
1853#define MC_CMD_SET_MCAST_HASH 0x35 2795#define MC_CMD_SET_MCAST_HASH 0x35
1854 2796
@@ -1865,7 +2807,8 @@
1865 2807
1866/***********************************/ 2808/***********************************/
1867/* MC_CMD_NVRAM_TYPES 2809/* MC_CMD_NVRAM_TYPES
1868 * Get virtual NVRAM partitions information. 2810 * Return bitfield indicating available types of virtual NVRAM partitions.
2811 * Locks required: none. Returns: 0
1869 */ 2812 */
1870#define MC_CMD_NVRAM_TYPES 0x36 2813#define MC_CMD_NVRAM_TYPES 0x36
1871 2814
@@ -1874,26 +2817,54 @@
1874 2817
1875/* MC_CMD_NVRAM_TYPES_OUT msgresponse */ 2818/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
1876#define MC_CMD_NVRAM_TYPES_OUT_LEN 4 2819#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
2820/* Bit mask of supported types. */
1877#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0 2821#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
1878#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 /* enum */ 2822/* enum: Disabled callisto. */
1879#define MC_CMD_NVRAM_TYPE_MC_FW 0x1 /* enum */ 2823#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
1880#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2 /* enum */ 2824/* enum: MC firmware. */
1881#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3 /* enum */ 2825#define MC_CMD_NVRAM_TYPE_MC_FW 0x1
1882#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4 /* enum */ 2826/* enum: MC backup firmware. */
1883#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5 /* enum */ 2827#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2
1884#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6 /* enum */ 2828/* enum: Static configuration Port0. */
1885#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7 /* enum */ 2829#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3
1886#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8 /* enum */ 2830/* enum: Static configuration Port1. */
1887#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9 /* enum */ 2831#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4
1888#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa /* enum */ 2832/* enum: Dynamic configuration Port0. */
1889#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb /* enum */ 2833#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5
1890#define MC_CMD_NVRAM_TYPE_LOG 0xc /* enum */ 2834/* enum: Dynamic configuration Port1. */
1891#define MC_CMD_NVRAM_TYPE_FPGA 0xd /* enum */ 2835#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6
2836/* enum: Expansion Rom. */
2837#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7
2838/* enum: Expansion Rom Configuration Port0. */
2839#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8
2840/* enum: Expansion Rom Configuration Port1. */
2841#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9
2842/* enum: Phy Configuration Port0. */
2843#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa
2844/* enum: Phy Configuration Port1. */
2845#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb
2846/* enum: Log. */
2847#define MC_CMD_NVRAM_TYPE_LOG 0xc
2848/* enum: FPGA image. */
2849#define MC_CMD_NVRAM_TYPE_FPGA 0xd
2850/* enum: FPGA backup image */
2851#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe
2852/* enum: FC firmware. */
2853#define MC_CMD_NVRAM_TYPE_FC_FW 0xf
2854/* enum: FC backup firmware. */
2855#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10
2856/* enum: CPLD image. */
2857#define MC_CMD_NVRAM_TYPE_CPLD 0x11
2858/* enum: Licensing information. */
2859#define MC_CMD_NVRAM_TYPE_LICENSE 0x12
2860/* enum: FC Log. */
2861#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13
1892 2862
1893 2863
1894/***********************************/ 2864/***********************************/
1895/* MC_CMD_NVRAM_INFO 2865/* MC_CMD_NVRAM_INFO
1896 * Read info about a virtual NVRAM partition. 2866 * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0,
2867 * EINVAL (bad type).
1897 */ 2868 */
1898#define MC_CMD_NVRAM_INFO 0x37 2869#define MC_CMD_NVRAM_INFO 0x37
1899 2870
@@ -1913,13 +2884,19 @@
1913#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12 2884#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
1914#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0 2885#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
1915#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1 2886#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
2887#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
2888#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
2889#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
2890#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
1916#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 2891#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
1917#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 2892#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
1918 2893
1919 2894
1920/***********************************/ 2895/***********************************/
1921/* MC_CMD_NVRAM_UPDATE_START 2896/* MC_CMD_NVRAM_UPDATE_START
1922 * Start a group of update operations on a virtual NVRAM partition. 2897 * Start a group of update operations on a virtual NVRAM partition. Locks
2898 * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if
2899 * PHY_LOCK required and not held).
1923 */ 2900 */
1924#define MC_CMD_NVRAM_UPDATE_START 0x38 2901#define MC_CMD_NVRAM_UPDATE_START 0x38
1925 2902
@@ -1935,7 +2912,9 @@
1935 2912
1936/***********************************/ 2913/***********************************/
1937/* MC_CMD_NVRAM_READ 2914/* MC_CMD_NVRAM_READ
1938 * Read data from a virtual NVRAM partition. 2915 * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if
2916 * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
2917 * PHY_LOCK required and not held)
1939 */ 2918 */
1940#define MC_CMD_NVRAM_READ 0x39 2919#define MC_CMD_NVRAM_READ 0x39
1941 2920
@@ -1945,6 +2924,7 @@
1945/* Enum values, see field(s): */ 2924/* Enum values, see field(s): */
1946/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ 2925/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
1947#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4 2926#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
2927/* amount to read in bytes */
1948#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 2928#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
1949 2929
1950/* MC_CMD_NVRAM_READ_OUT msgresponse */ 2930/* MC_CMD_NVRAM_READ_OUT msgresponse */
@@ -1959,7 +2939,9 @@
1959 2939
1960/***********************************/ 2940/***********************************/
1961/* MC_CMD_NVRAM_WRITE 2941/* MC_CMD_NVRAM_WRITE
1962 * Write data to a virtual NVRAM partition. 2942 * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if
2943 * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
2944 * PHY_LOCK required and not held)
1963 */ 2945 */
1964#define MC_CMD_NVRAM_WRITE 0x3a 2946#define MC_CMD_NVRAM_WRITE 0x3a
1965 2947
@@ -1983,7 +2965,9 @@
1983 2965
1984/***********************************/ 2966/***********************************/
1985/* MC_CMD_NVRAM_ERASE 2967/* MC_CMD_NVRAM_ERASE
1986 * Erase sector(s) from a virtual NVRAM partition. 2968 * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if
2969 * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
2970 * PHY_LOCK required and not held)
1987 */ 2971 */
1988#define MC_CMD_NVRAM_ERASE 0x3b 2972#define MC_CMD_NVRAM_ERASE 0x3b
1989 2973
@@ -2001,7 +2985,9 @@
2001 2985
2002/***********************************/ 2986/***********************************/
2003/* MC_CMD_NVRAM_UPDATE_FINISH 2987/* MC_CMD_NVRAM_UPDATE_FINISH
2004 * Finish a group of update operations on a virtual NVRAM partition. 2988 * Finish a group of update operations on a virtual NVRAM partition. Locks
2989 * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad
2990 * type/offset/length), EACCES (if PHY_LOCK required and not held)
2005 */ 2991 */
2006#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c 2992#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
2007 2993
@@ -2019,6 +3005,20 @@
2019/***********************************/ 3005/***********************************/
2020/* MC_CMD_REBOOT 3006/* MC_CMD_REBOOT
2021 * Reboot the MC. 3007 * Reboot the MC.
3008 *
3009 * The AFTER_ASSERTION flag is intended to be used when the driver notices an
3010 * assertion failure (at which point it is expected to perform a complete tear
3011 * down and reinitialise), to allow both ports to reset the MC once in an
3012 * atomic fashion.
3013 *
3014 * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
3015 * which means that they will automatically reboot out of the assertion
3016 * handler, so this is in practise an optional operation. It is still
3017 * recommended that drivers execute this to support custom firmwares with
3018 * REBOOT_ON_ASSERT=0.
3019 *
3020 * Locks required: NONE Returns: Nothing. You get back a response with ERR=1,
3021 * DATALEN=0
2022 */ 3022 */
2023#define MC_CMD_REBOOT 0x3d 3023#define MC_CMD_REBOOT 0x3d
2024 3024
@@ -2033,7 +3033,9 @@
2033 3033
2034/***********************************/ 3034/***********************************/
2035/* MC_CMD_SCHEDINFO 3035/* MC_CMD_SCHEDINFO
2036 * Request scheduler info. 3036 * Request scheduler info. Locks required: NONE. Returns: An array of
3037 * (timeslice,maximum overrun), one for each thread, in ascending order of
3038 * thread address.
2037 */ 3039 */
2038#define MC_CMD_SCHEDINFO 0x3e 3040#define MC_CMD_SCHEDINFO 0x3e
2039 3041
@@ -2052,14 +3054,24 @@
2052 3054
2053/***********************************/ 3055/***********************************/
2054/* MC_CMD_REBOOT_MODE 3056/* MC_CMD_REBOOT_MODE
3057 * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
3058 * mode to the specified value. Returns the old mode.
2055 */ 3059 */
2056#define MC_CMD_REBOOT_MODE 0x3f 3060#define MC_CMD_REBOOT_MODE 0x3f
2057 3061
2058/* MC_CMD_REBOOT_MODE_IN msgrequest */ 3062/* MC_CMD_REBOOT_MODE_IN msgrequest */
2059#define MC_CMD_REBOOT_MODE_IN_LEN 4 3063#define MC_CMD_REBOOT_MODE_IN_LEN 4
2060#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 3064#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
2061#define MC_CMD_REBOOT_MODE_NORMAL 0x0 /* enum */ 3065/* enum: Normal. */
2062#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 /* enum */ 3066#define MC_CMD_REBOOT_MODE_NORMAL 0x0
3067/* enum: Power-on Reset. */
3068#define MC_CMD_REBOOT_MODE_POR 0x2
3069/* enum: Snapper. */
3070#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
3071/* enum: snapper fake POR */
3072#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
3073#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
3074#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
2063 3075
2064/* MC_CMD_REBOOT_MODE_OUT msgresponse */ 3076/* MC_CMD_REBOOT_MODE_OUT msgresponse */
2065#define MC_CMD_REBOOT_MODE_OUT_LEN 4 3077#define MC_CMD_REBOOT_MODE_OUT_LEN 4
@@ -2069,32 +3081,145 @@
2069/***********************************/ 3081/***********************************/
2070/* MC_CMD_SENSOR_INFO 3082/* MC_CMD_SENSOR_INFO
2071 * Returns information about every available sensor. 3083 * Returns information about every available sensor.
3084 *
3085 * Each sensor has a single (16bit) value, and a corresponding state. The
3086 * mapping between value and state is nominally determined by the MC, but may
3087 * be implemented using up to 2 ranges per sensor.
3088 *
3089 * This call returns a mask (32bit) of the sensors that are supported by this
3090 * platform, then an array of sensor information structures, in order of sensor
3091 * type (but without gaps for unimplemented sensors). Each structure defines
3092 * the ranges for the corresponding sensor. An unused range is indicated by
3093 * equal limit values. If one range is used, a value outside that range results
3094 * in STATE_FATAL. If two ranges are used, a value outside the second range
3095 * results in STATE_FATAL while a value outside the first and inside the second
3096 * range results in STATE_WARNING.
3097 *
3098 * Sensor masks and sensor information arrays are organised into pages. For
3099 * backward compatibility, older host software can only use sensors in page 0.
3100 * Bit 32 in the sensor mask was previously unused, and is no reserved for use
3101 * as the next page flag.
3102 *
3103 * If the request does not contain a PAGE value then firmware will only return
3104 * page 0 of sensor information, with bit 31 in the sensor mask cleared.
3105 *
3106 * If the request contains a PAGE value then firmware responds with the sensor
3107 * mask and sensor information array for that page of sensors. In this case bit
3108 * 31 in the mask is set if another page exists.
3109 *
3110 * Locks required: None Returns: 0
2072 */ 3111 */
2073#define MC_CMD_SENSOR_INFO 0x41 3112#define MC_CMD_SENSOR_INFO 0x41
2074 3113
2075/* MC_CMD_SENSOR_INFO_IN msgrequest */ 3114/* MC_CMD_SENSOR_INFO_IN msgrequest */
2076#define MC_CMD_SENSOR_INFO_IN_LEN 0 3115#define MC_CMD_SENSOR_INFO_IN_LEN 0
2077 3116
3117/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */
3118#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4
3119/* Which page of sensors to report.
3120 *
3121 * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
3122 *
3123 * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
3124 */
3125#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
3126
2078/* MC_CMD_SENSOR_INFO_OUT msgresponse */ 3127/* MC_CMD_SENSOR_INFO_OUT msgresponse */
2079#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12 3128#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12
2080#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252 3129#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
2081#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num)) 3130#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
2082#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0 3131#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
2083#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 /* enum */ 3132/* enum: Controller temperature: degC */
2084#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 /* enum */ 3133#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
2085#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 /* enum */ 3134/* enum: Phy common temperature: degC */
2086#define MC_CMD_SENSOR_PHY0_TEMP 0x3 /* enum */ 3135#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
2087#define MC_CMD_SENSOR_PHY0_COOLING 0x4 /* enum */ 3136/* enum: Controller cooling: bool */
2088#define MC_CMD_SENSOR_PHY1_TEMP 0x5 /* enum */ 3137#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
2089#define MC_CMD_SENSOR_PHY1_COOLING 0x6 /* enum */ 3138/* enum: Phy 0 temperature: degC */
2090#define MC_CMD_SENSOR_IN_1V0 0x7 /* enum */ 3139#define MC_CMD_SENSOR_PHY0_TEMP 0x3
2091#define MC_CMD_SENSOR_IN_1V2 0x8 /* enum */ 3140/* enum: Phy 0 cooling: bool */
2092#define MC_CMD_SENSOR_IN_1V8 0x9 /* enum */ 3141#define MC_CMD_SENSOR_PHY0_COOLING 0x4
2093#define MC_CMD_SENSOR_IN_2V5 0xa /* enum */ 3142/* enum: Phy 1 temperature: degC */
2094#define MC_CMD_SENSOR_IN_3V3 0xb /* enum */ 3143#define MC_CMD_SENSOR_PHY1_TEMP 0x5
2095#define MC_CMD_SENSOR_IN_12V0 0xc /* enum */ 3144/* enum: Phy 1 cooling: bool */
2096#define MC_CMD_SENSOR_IN_1V2A 0xd /* enum */ 3145#define MC_CMD_SENSOR_PHY1_COOLING 0x6
2097#define MC_CMD_SENSOR_IN_VREF 0xe /* enum */ 3146/* enum: 1.0v power: mV */
3147#define MC_CMD_SENSOR_IN_1V0 0x7
3148/* enum: 1.2v power: mV */
3149#define MC_CMD_SENSOR_IN_1V2 0x8
3150/* enum: 1.8v power: mV */
3151#define MC_CMD_SENSOR_IN_1V8 0x9
3152/* enum: 2.5v power: mV */
3153#define MC_CMD_SENSOR_IN_2V5 0xa
3154/* enum: 3.3v power: mV */
3155#define MC_CMD_SENSOR_IN_3V3 0xb
3156/* enum: 12v power: mV */
3157#define MC_CMD_SENSOR_IN_12V0 0xc
3158/* enum: 1.2v analogue power: mV */
3159#define MC_CMD_SENSOR_IN_1V2A 0xd
3160/* enum: reference voltage: mV */
3161#define MC_CMD_SENSOR_IN_VREF 0xe
3162/* enum: AOE FPGA power: mV */
3163#define MC_CMD_SENSOR_OUT_VAOE 0xf
3164/* enum: AOE FPGA temperature: degC */
3165#define MC_CMD_SENSOR_AOE_TEMP 0x10
3166/* enum: AOE FPGA PSU temperature: degC */
3167#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
3168/* enum: AOE PSU temperature: degC */
3169#define MC_CMD_SENSOR_PSU_TEMP 0x12
3170/* enum: Fan 0 speed: RPM */
3171#define MC_CMD_SENSOR_FAN_0 0x13
3172/* enum: Fan 1 speed: RPM */
3173#define MC_CMD_SENSOR_FAN_1 0x14
3174/* enum: Fan 2 speed: RPM */
3175#define MC_CMD_SENSOR_FAN_2 0x15
3176/* enum: Fan 3 speed: RPM */
3177#define MC_CMD_SENSOR_FAN_3 0x16
3178/* enum: Fan 4 speed: RPM */
3179#define MC_CMD_SENSOR_FAN_4 0x17
3180/* enum: AOE FPGA input power: mV */
3181#define MC_CMD_SENSOR_IN_VAOE 0x18
3182/* enum: AOE FPGA current: mA */
3183#define MC_CMD_SENSOR_OUT_IAOE 0x19
3184/* enum: AOE FPGA input current: mA */
3185#define MC_CMD_SENSOR_IN_IAOE 0x1a
3186/* enum: NIC power consumption: W */
3187#define MC_CMD_SENSOR_NIC_POWER 0x1b
3188/* enum: 0.9v power voltage: mV */
3189#define MC_CMD_SENSOR_IN_0V9 0x1c
3190/* enum: 0.9v power current: mA */
3191#define MC_CMD_SENSOR_IN_I0V9 0x1d
3192/* enum: 1.2v power current: mA */
3193#define MC_CMD_SENSOR_IN_I1V2 0x1e
3194/* enum: Not a sensor: reserved for the next page flag */
3195#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
3196/* enum: 0.9v power voltage (at ADC): mV */
3197#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
3198/* enum: Controller temperature 2: degC */
3199#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
3200/* enum: Voltage regulator internal temperature: degC */
3201#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
3202/* enum: 0.9V voltage regulator temperature: degC */
3203#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
3204/* enum: 1.2V voltage regulator temperature: degC */
3205#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
3206/* enum: controller internal temperature sensor voltage (internal ADC): mV */
3207#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
3208/* enum: controller internal temperature (internal ADC): degC */
3209#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
3210/* enum: controller internal temperature sensor voltage (external ADC): mV */
3211#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
3212/* enum: controller internal temperature (external ADC): degC */
3213#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
3214/* enum: ambient temperature: degC */
3215#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
3216/* enum: air flow: bool */
3217#define MC_CMD_SENSOR_AIRFLOW 0x2a
3218/* enum: voltage between VSS08D and VSS08D at CSR: mV */
3219#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
3220/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
3221#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
3222/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
2098#define MC_CMD_SENSOR_ENTRY_OFST 4 3223#define MC_CMD_SENSOR_ENTRY_OFST 4
2099#define MC_CMD_SENSOR_ENTRY_LEN 8 3224#define MC_CMD_SENSOR_ENTRY_LEN 8
2100#define MC_CMD_SENSOR_ENTRY_LO_OFST 4 3225#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
@@ -2102,6 +3227,23 @@
2102#define MC_CMD_SENSOR_ENTRY_MINNUM 1 3227#define MC_CMD_SENSOR_ENTRY_MINNUM 1
2103#define MC_CMD_SENSOR_ENTRY_MAXNUM 31 3228#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
2104 3229
3230/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
3231#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 12
3232#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
3233#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
3234#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
3235/* Enum values, see field(s): */
3236/* MC_CMD_SENSOR_INFO_OUT */
3237#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
3238#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1
3239/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
3240/* MC_CMD_SENSOR_ENTRY_OFST 4 */
3241/* MC_CMD_SENSOR_ENTRY_LEN 8 */
3242/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
3243/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
3244/* MC_CMD_SENSOR_ENTRY_MINNUM 1 */
3245/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
3246
2105/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */ 3247/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
2106#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8 3248#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
2107#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0 3249#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
@@ -2124,39 +3266,80 @@
2124 3266
2125/***********************************/ 3267/***********************************/
2126/* MC_CMD_READ_SENSORS 3268/* MC_CMD_READ_SENSORS
2127 * Returns the current reading from each sensor. 3269 * Returns the current reading from each sensor. DMAs an array of sensor
3270 * readings, in order of sensor type (but without gaps for unimplemented
3271 * sensors), into host memory. Each array element is a
3272 * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword.
3273 *
3274 * If the request does not contain the LENGTH field then only sensors 0 to 30
3275 * are reported, to avoid DMA buffer overflow in older host software. If the
3276 * sensor reading require more space than the LENGTH allows, then return
3277 * EINVAL.
3278 *
3279 * The MC will send a SENSOREVT event every time any sensor changes state. The
3280 * driver is responsible for ensuring that it doesn't miss any events. The
3281 * board will function normally if all sensors are in STATE_OK or
3282 * STATE_WARNING. Otherwise the board should not be expected to function.
2128 */ 3283 */
2129#define MC_CMD_READ_SENSORS 0x42 3284#define MC_CMD_READ_SENSORS 0x42
2130 3285
2131/* MC_CMD_READ_SENSORS_IN msgrequest */ 3286/* MC_CMD_READ_SENSORS_IN msgrequest */
2132#define MC_CMD_READ_SENSORS_IN_LEN 8 3287#define MC_CMD_READ_SENSORS_IN_LEN 8
3288/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
2133#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0 3289#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
2134#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8 3290#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
2135#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0 3291#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
2136#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4 3292#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
2137 3293
3294/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
3295#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
3296/* DMA address of host buffer for sensor readings */
3297#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
3298#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
3299#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
3300#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
3301/* Size in bytes of host buffer. */
3302#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
3303
2138/* MC_CMD_READ_SENSORS_OUT msgresponse */ 3304/* MC_CMD_READ_SENSORS_OUT msgresponse */
2139#define MC_CMD_READ_SENSORS_OUT_LEN 0 3305#define MC_CMD_READ_SENSORS_OUT_LEN 0
2140 3306
3307/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */
3308#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0
3309
2141/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */ 3310/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
2142#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 3 3311#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4
2143#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0 3312#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
2144#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2 3313#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
2145#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0 3314#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
2146#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16 3315#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
2147#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2 3316#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
2148#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1 3317#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
2149#define MC_CMD_SENSOR_STATE_OK 0x0 /* enum */ 3318/* enum: Ok. */
2150#define MC_CMD_SENSOR_STATE_WARNING 0x1 /* enum */ 3319#define MC_CMD_SENSOR_STATE_OK 0x0
2151#define MC_CMD_SENSOR_STATE_FATAL 0x2 /* enum */ 3320/* enum: Breached warning threshold. */
2152#define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum */ 3321#define MC_CMD_SENSOR_STATE_WARNING 0x1
3322/* enum: Breached fatal threshold. */
3323#define MC_CMD_SENSOR_STATE_FATAL 0x2
3324/* enum: Fault with sensor. */
3325#define MC_CMD_SENSOR_STATE_BROKEN 0x3
3326/* enum: Sensor is working but does not currently have a reading. */
3327#define MC_CMD_SENSOR_STATE_NO_READING 0x4
2153#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16 3328#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
2154#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8 3329#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
3330#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
3331#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1
3332/* Enum values, see field(s): */
3333/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
3334#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24
3335#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8
2155 3336
2156 3337
2157/***********************************/ 3338/***********************************/
2158/* MC_CMD_GET_PHY_STATE 3339/* MC_CMD_GET_PHY_STATE
2159 * Report current state of PHY. 3340 * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot
3341 * (e.g. due to missing or corrupted firmware). Locks required: None. Return
3342 * code: 0
2160 */ 3343 */
2161#define MC_CMD_GET_PHY_STATE 0x43 3344#define MC_CMD_GET_PHY_STATE 0x43
2162 3345
@@ -2166,13 +3349,16 @@
2166/* MC_CMD_GET_PHY_STATE_OUT msgresponse */ 3349/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
2167#define MC_CMD_GET_PHY_STATE_OUT_LEN 4 3350#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
2168#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0 3351#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
2169#define MC_CMD_PHY_STATE_OK 0x1 /* enum */ 3352/* enum: Ok. */
2170#define MC_CMD_PHY_STATE_ZOMBIE 0x2 /* enum */ 3353#define MC_CMD_PHY_STATE_OK 0x1
3354/* enum: Faulty. */
3355#define MC_CMD_PHY_STATE_ZOMBIE 0x2
2171 3356
2172 3357
2173/***********************************/ 3358/***********************************/
2174/* MC_CMD_SETUP_8021QBB 3359/* MC_CMD_SETUP_8021QBB
2175 * 802.1Qbb control. 3360 * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
3361 * disable 802.Qbb for a given priority.
2176 */ 3362 */
2177#define MC_CMD_SETUP_8021QBB 0x44 3363#define MC_CMD_SETUP_8021QBB 0x44
2178 3364
@@ -2187,7 +3373,7 @@
2187 3373
2188/***********************************/ 3374/***********************************/
2189/* MC_CMD_WOL_FILTER_GET 3375/* MC_CMD_WOL_FILTER_GET
2190 * Retrieve ID of any WoL filters. 3376 * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
2191 */ 3377 */
2192#define MC_CMD_WOL_FILTER_GET 0x45 3378#define MC_CMD_WOL_FILTER_GET 0x45
2193 3379
@@ -2201,7 +3387,8 @@
2201 3387
2202/***********************************/ 3388/***********************************/
2203/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD 3389/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
2204 * Add a protocol offload to NIC for lights-out state. 3390 * Add a protocol offload to NIC for lights-out state. Locks required: None.
3391 * Returns: 0, ENOSYS
2205 */ 3392 */
2206#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46 3393#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
2207 3394
@@ -2241,7 +3428,8 @@
2241 3428
2242/***********************************/ 3429/***********************************/
2243/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 3430/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
2244 * Remove a protocol offload from NIC for lights-out state. 3431 * Remove a protocol offload from NIC for lights-out state. Locks required:
3432 * None. Returns: 0, ENOSYS
2245 */ 3433 */
2246#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47 3434#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
2247 3435
@@ -2256,7 +3444,7 @@
2256 3444
2257/***********************************/ 3445/***********************************/
2258/* MC_CMD_MAC_RESET_RESTORE 3446/* MC_CMD_MAC_RESET_RESTORE
2259 * Restore MAC after block reset. 3447 * Restore MAC after block reset. Locks required: None. Returns: 0.
2260 */ 3448 */
2261#define MC_CMD_MAC_RESET_RESTORE 0x48 3449#define MC_CMD_MAC_RESET_RESTORE 0x48
2262 3450
@@ -2269,6 +3457,9 @@
2269 3457
2270/***********************************/ 3458/***********************************/
2271/* MC_CMD_TESTASSERT 3459/* MC_CMD_TESTASSERT
3460 * Deliberately trigger an assert-detonation in the firmware for testing
3461 * purposes (i.e. to allow tests that the driver copes gracefully). Locks
3462 * required: None Returns: 0
2272 */ 3463 */
2273#define MC_CMD_TESTASSERT 0x49 3464#define MC_CMD_TESTASSERT 0x49
2274 3465
@@ -2281,14 +3472,23 @@
2281 3472
2282/***********************************/ 3473/***********************************/
2283/* MC_CMD_WORKAROUND 3474/* MC_CMD_WORKAROUND
2284 * Enable/Disable a given workaround. 3475 * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
3476 * understand the given workaround number - which should not be treated as a
3477 * hard error by client code. This op does not imply any semantics about each
3478 * workaround, that's between the driver and the mcfw on a per-workaround
3479 * basis. Locks required: None. Returns: 0, EINVAL .
2285 */ 3480 */
2286#define MC_CMD_WORKAROUND 0x4a 3481#define MC_CMD_WORKAROUND 0x4a
2287 3482
2288/* MC_CMD_WORKAROUND_IN msgrequest */ 3483/* MC_CMD_WORKAROUND_IN msgrequest */
2289#define MC_CMD_WORKAROUND_IN_LEN 8 3484#define MC_CMD_WORKAROUND_IN_LEN 8
2290#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 3485#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
2291#define MC_CMD_WORKAROUND_BUG17230 0x1 /* enum */ 3486/* enum: Bug 17230 work around. */
3487#define MC_CMD_WORKAROUND_BUG17230 0x1
3488/* enum: Bug 35388 work around (unsafe EVQ writes). */
3489#define MC_CMD_WORKAROUND_BUG35388 0x2
3490/* enum: Bug35017 workaround (A64 tables must be identity map) */
3491#define MC_CMD_WORKAROUND_BUG35017 0x3
2292#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 3492#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
2293 3493
2294/* MC_CMD_WORKAROUND_OUT msgresponse */ 3494/* MC_CMD_WORKAROUND_OUT msgresponse */
@@ -2297,7 +3497,12 @@
2297 3497
2298/***********************************/ 3498/***********************************/
2299/* MC_CMD_GET_PHY_MEDIA_INFO 3499/* MC_CMD_GET_PHY_MEDIA_INFO
2300 * Read media-specific data from PHY. 3500 * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
3501 * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
3502 * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
3503 * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
3504 * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
3505 * Anything else: currently undefined. Locks required: None. Return code: 0.
2301 */ 3506 */
2302#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b 3507#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
2303 3508
@@ -2309,6 +3514,7 @@
2309#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5 3514#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
2310#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252 3515#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
2311#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num)) 3516#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
3517/* in bytes */
2312#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 3518#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
2313#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 3519#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
2314#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1 3520#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
@@ -2318,7 +3524,8 @@
2318 3524
2319/***********************************/ 3525/***********************************/
2320/* MC_CMD_NVRAM_TEST 3526/* MC_CMD_NVRAM_TEST
2321 * Test a particular NVRAM partition. 3527 * Test a particular NVRAM partition for valid contents (where "valid" depends
3528 * on the type of partition).
2322 */ 3529 */
2323#define MC_CMD_NVRAM_TEST 0x4c 3530#define MC_CMD_NVRAM_TEST 0x4c
2324 3531
@@ -2331,22 +3538,31 @@
2331/* MC_CMD_NVRAM_TEST_OUT msgresponse */ 3538/* MC_CMD_NVRAM_TEST_OUT msgresponse */
2332#define MC_CMD_NVRAM_TEST_OUT_LEN 4 3539#define MC_CMD_NVRAM_TEST_OUT_LEN 4
2333#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0 3540#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
2334#define MC_CMD_NVRAM_TEST_PASS 0x0 /* enum */ 3541/* enum: Passed. */
2335#define MC_CMD_NVRAM_TEST_FAIL 0x1 /* enum */ 3542#define MC_CMD_NVRAM_TEST_PASS 0x0
2336#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 /* enum */ 3543/* enum: Failed. */
3544#define MC_CMD_NVRAM_TEST_FAIL 0x1
3545/* enum: Not supported. */
3546#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2
2337 3547
2338 3548
2339/***********************************/ 3549/***********************************/
2340/* MC_CMD_MRSFP_TWEAK 3550/* MC_CMD_MRSFP_TWEAK
2341 * Read status and/or set parameters for the 'mrsfp' driver. 3551 * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
3552 * I2C I/O expander bits are always read; if equaliser parameters are supplied,
3553 * they are configured first. Locks required: None. Return code: 0, EINVAL.
2342 */ 3554 */
2343#define MC_CMD_MRSFP_TWEAK 0x4d 3555#define MC_CMD_MRSFP_TWEAK 0x4d
2344 3556
2345/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */ 3557/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
2346#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16 3558#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
3559/* 0-6 low->high de-emph. */
2347#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0 3560#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
3561/* 0-8 low->high ref.V */
2348#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4 3562#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
3563/* 0-8 0-8 low->high boost */
2349#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8 3564#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
3565/* 0-8 low->high ref.V */
2350#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12 3566#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
2351 3567
2352/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */ 3568/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
@@ -2354,16 +3570,23 @@
2354 3570
2355/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */ 3571/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
2356#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12 3572#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
3573/* input bits */
2357#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 3574#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
3575/* output bits */
2358#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 3576#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
3577/* direction */
2359#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 3578#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
2360#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 /* enum */ 3579/* enum: Out. */
2361#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 /* enum */ 3580#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
3581/* enum: In. */
3582#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
2362 3583
2363 3584
2364/***********************************/ 3585/***********************************/
2365/* MC_CMD_SENSOR_SET_LIMS 3586/* MC_CMD_SENSOR_SET_LIMS
2366 * Adjusts the sensor limits. 3587 * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
3588 * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
3589 * of range.
2367 */ 3590 */
2368#define MC_CMD_SENSOR_SET_LIMS 0x4e 3591#define MC_CMD_SENSOR_SET_LIMS 0x4e
2369 3592
@@ -2372,9 +3595,13 @@
2372#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0 3595#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
2373/* Enum values, see field(s): */ 3596/* Enum values, see field(s): */
2374/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ 3597/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
3598/* interpretation is is sensor-specific. */
2375#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4 3599#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
3600/* interpretation is is sensor-specific. */
2376#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8 3601#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
3602/* interpretation is is sensor-specific. */
2377#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12 3603#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
3604/* interpretation is is sensor-specific. */
2378#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16 3605#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
2379 3606
2380/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */ 3607/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
@@ -2396,9 +3623,3640 @@
2396#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8 3623#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
2397#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12 3624#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
2398 3625
3626
3627/***********************************/
3628/* MC_CMD_NVRAM_PARTITIONS
3629 * Reads the list of available virtual NVRAM partition types. Locks required:
3630 * none. Returns: 0, EINVAL (bad type).
3631 */
3632#define MC_CMD_NVRAM_PARTITIONS 0x51
3633
3634/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
3635#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
3636
3637/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */
3638#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4
3639#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252
3640#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
3641/* total number of partitions */
3642#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
3643/* type ID code for each of NUM_PARTITIONS partitions */
3644#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
3645#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
3646#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0
3647#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62
3648
3649
3650/***********************************/
3651/* MC_CMD_NVRAM_METADATA
3652 * Reads soft metadata for a virtual NVRAM partition type. Locks required:
3653 * none. Returns: 0, EINVAL (bad type).
3654 */
3655#define MC_CMD_NVRAM_METADATA 0x52
3656
3657/* MC_CMD_NVRAM_METADATA_IN msgrequest */
3658#define MC_CMD_NVRAM_METADATA_IN_LEN 4
3659/* Partition type ID code */
3660#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
3661
3662/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
3663#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
3664#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252
3665#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
3666/* Partition type ID code */
3667#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
3668#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
3669#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
3670#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
3671#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
3672#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1
3673#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2
3674#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
3675/* Subtype ID code for content of this partition */
3676#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
3677/* 1st component of W.X.Y.Z version number for content of this partition */
3678#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
3679#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
3680/* 2nd component of W.X.Y.Z version number for content of this partition */
3681#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14
3682#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2
3683/* 3rd component of W.X.Y.Z version number for content of this partition */
3684#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16
3685#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2
3686/* 4th component of W.X.Y.Z version number for content of this partition */
3687#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18
3688#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2
3689/* Zero-terminated string describing the content of this partition */
3690#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20
3691#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1
3692#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0
3693#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232
3694
3695
3696/***********************************/
3697/* MC_CMD_GET_MAC_ADDRESSES
3698 * Returns the base MAC, count and stride for the requestiong function
3699 */
3700#define MC_CMD_GET_MAC_ADDRESSES 0x55
3701
3702/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
3703#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
3704
3705/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */
3706#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16
3707/* Base MAC address */
3708#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0
3709#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6
3710/* Padding */
3711#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6
3712#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
3713/* Number of allocated MAC addresses */
3714#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
3715/* Spacing of allocated MAC addresses */
3716#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
3717
2399/* MC_CMD_RESOURCE_SPECIFIER enum */ 3718/* MC_CMD_RESOURCE_SPECIFIER enum */
2400#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff /* enum */ 3719/* enum: Any */
2401#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */ 3720#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
3721/* enum: None */
3722#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe
3723
3724/* EVB_PORT_ID structuredef */
3725#define EVB_PORT_ID_LEN 4
3726#define EVB_PORT_ID_PORT_ID_OFST 0
3727/* enum: An invalid port handle. */
3728#define EVB_PORT_ID_NULL 0x0
3729/* enum: The port assigned to this function.. */
3730#define EVB_PORT_ID_ASSIGNED 0x1000000
3731/* enum: External network port 0 */
3732#define EVB_PORT_ID_MAC0 0x2000000
3733/* enum: External network port 1 */
3734#define EVB_PORT_ID_MAC1 0x2000001
3735/* enum: External network port 2 */
3736#define EVB_PORT_ID_MAC2 0x2000002
3737/* enum: External network port 3 */
3738#define EVB_PORT_ID_MAC3 0x2000003
3739#define EVB_PORT_ID_PORT_ID_LBN 0
3740#define EVB_PORT_ID_PORT_ID_WIDTH 32
3741
3742/* EVB_VLAN_TAG structuredef */
3743#define EVB_VLAN_TAG_LEN 2
3744/* The VLAN tag value */
3745#define EVB_VLAN_TAG_VLAN_ID_LBN 0
3746#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
3747#define EVB_VLAN_TAG_MODE_LBN 12
3748#define EVB_VLAN_TAG_MODE_WIDTH 4
3749/* enum: Insert the VLAN. */
3750#define EVB_VLAN_TAG_INSERT 0x0
3751/* enum: Replace the VLAN if already present. */
3752#define EVB_VLAN_TAG_REPLACE 0x1
3753
3754/* BUFTBL_ENTRY structuredef */
3755#define BUFTBL_ENTRY_LEN 12
3756/* the owner ID */
3757#define BUFTBL_ENTRY_OID_OFST 0
3758#define BUFTBL_ENTRY_OID_LEN 2
3759#define BUFTBL_ENTRY_OID_LBN 0
3760#define BUFTBL_ENTRY_OID_WIDTH 16
3761/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
3762#define BUFTBL_ENTRY_PGSZ_OFST 2
3763#define BUFTBL_ENTRY_PGSZ_LEN 2
3764#define BUFTBL_ENTRY_PGSZ_LBN 16
3765#define BUFTBL_ENTRY_PGSZ_WIDTH 16
3766/* the raw 64-bit address field from the SMC, not adjusted for page size */
3767#define BUFTBL_ENTRY_RAWADDR_OFST 4
3768#define BUFTBL_ENTRY_RAWADDR_LEN 8
3769#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
3770#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
3771#define BUFTBL_ENTRY_RAWADDR_LBN 32
3772#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
3773
3774/* NVRAM_PARTITION_TYPE structuredef */
3775#define NVRAM_PARTITION_TYPE_LEN 2
3776#define NVRAM_PARTITION_TYPE_ID_OFST 0
3777#define NVRAM_PARTITION_TYPE_ID_LEN 2
3778/* enum: Primary MC firmware partition */
3779#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
3780/* enum: Secondary MC firmware partition */
3781#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
3782/* enum: Expansion ROM partition */
3783#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
3784/* enum: Static configuration TLV partition */
3785#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
3786/* enum: Dynamic configuration TLV partition */
3787#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
3788/* enum: Expansion ROM configuration data for port 0 */
3789#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
3790/* enum: Expansion ROM configuration data for port 1 */
3791#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
3792/* enum: Expansion ROM configuration data for port 2 */
3793#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
3794/* enum: Expansion ROM configuration data for port 3 */
3795#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
3796/* enum: Non-volatile log output partition */
3797#define NVRAM_PARTITION_TYPE_LOG 0x700
3798/* enum: Device state dump output partition */
3799#define NVRAM_PARTITION_TYPE_DUMP 0x800
3800/* enum: Application license key storage partition */
3801#define NVRAM_PARTITION_TYPE_LICENSE 0x900
3802/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
3803#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
3804/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
3805#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
3806/* enum: Start of reserved value range (firmware may use for any purpose) */
3807#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
3808/* enum: End of reserved value range (firmware may use for any purpose) */
3809#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
3810/* enum: Recovery partition map (provided if real map is missing or corrupt) */
3811#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
3812/* enum: Partition map (real map as stored in flash) */
3813#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
3814#define NVRAM_PARTITION_TYPE_ID_LBN 0
3815#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
3816
3817
3818/***********************************/
3819/* MC_CMD_READ_REGS
3820 * Get a dump of the MCPU registers
3821 */
3822#define MC_CMD_READ_REGS 0x50
3823
3824/* MC_CMD_READ_REGS_IN msgrequest */
3825#define MC_CMD_READ_REGS_IN_LEN 0
3826
3827/* MC_CMD_READ_REGS_OUT msgresponse */
3828#define MC_CMD_READ_REGS_OUT_LEN 308
3829/* Whether the corresponding register entry contains a valid value */
3830#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
3831#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
3832/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
3833 * fir, fp)
3834 */
3835#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
3836#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
3837#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
3838
3839
3840/***********************************/
3841/* MC_CMD_INIT_EVQ
3842 * Set up an event queue according to the supplied parameters. The IN arguments
3843 * end with an address for each 4k of host memory required to back the EVQ.
3844 */
3845#define MC_CMD_INIT_EVQ 0x80
3846
3847/* MC_CMD_INIT_EVQ_IN msgrequest */
3848#define MC_CMD_INIT_EVQ_IN_LENMIN 44
3849#define MC_CMD_INIT_EVQ_IN_LENMAX 548
3850#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
3851/* Size, in entries */
3852#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
3853/* Desired instance. Must be set to a specific instance, which is a function
3854 * local queue index.
3855 */
3856#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
3857/* The initial timer value. The load value is ignored if the timer mode is DIS.
3858 */
3859#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
3860/* The reload value is ignored in one-shot modes */
3861#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
3862/* tbd */
3863#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
3864#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
3865#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
3866#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
3867#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1
3868#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2
3869#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1
3870#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3
3871#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1
3872#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4
3873#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
3874#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
3875#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
3876#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
3877/* enum: Disabled */
3878#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
3879/* enum: Immediate */
3880#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1
3881/* enum: Triggered */
3882#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2
3883/* enum: Hold-off */
3884#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
3885/* Target EVQ for wakeups if in wakeup mode. */
3886#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
3887/* Target interrupt if in interrupting mode (note union with target EVQ). Use
3888 * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
3889 * purposes.
3890 */
3891#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
3892/* Event Counter Mode. */
3893#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
3894/* enum: Disabled */
3895#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
3896/* enum: Disabled */
3897#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1
3898/* enum: Disabled */
3899#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2
3900/* enum: Disabled */
3901#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
3902/* Event queue packet count threshold. */
3903#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
3904/* 64-bit address of 4k of 4k-aligned host memory buffer */
3905#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
3906#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
3907#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
3908#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
3909#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
3910#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
3911
3912/* MC_CMD_INIT_EVQ_OUT msgresponse */
3913#define MC_CMD_INIT_EVQ_OUT_LEN 4
3914/* Only valid if INTRFLAG was true */
3915#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
3916
3917/* QUEUE_CRC_MODE structuredef */
3918#define QUEUE_CRC_MODE_LEN 1
3919#define QUEUE_CRC_MODE_MODE_LBN 0
3920#define QUEUE_CRC_MODE_MODE_WIDTH 4
3921/* enum: No CRC. */
3922#define QUEUE_CRC_MODE_NONE 0x0
3923/* enum: CRC Fiber channel over ethernet. */
3924#define QUEUE_CRC_MODE_FCOE 0x1
3925/* enum: CRC (digest) iSCSI header only. */
3926#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
3927/* enum: CRC (digest) iSCSI header and payload. */
3928#define QUEUE_CRC_MODE_ISCSI 0x3
3929/* enum: CRC Fiber channel over IP over ethernet. */
3930#define QUEUE_CRC_MODE_FCOIPOE 0x4
3931/* enum: CRC MPA. */
3932#define QUEUE_CRC_MODE_MPA 0x5
3933#define QUEUE_CRC_MODE_SPARE_LBN 4
3934#define QUEUE_CRC_MODE_SPARE_WIDTH 4
3935
3936
3937/***********************************/
3938/* MC_CMD_INIT_RXQ
3939 * set up a receive queue according to the supplied parameters. The IN
3940 * arguments end with an address for each 4k of host memory required to back
3941 * the RXQ.
3942 */
3943#define MC_CMD_INIT_RXQ 0x81
3944
3945/* MC_CMD_INIT_RXQ_IN msgrequest */
3946#define MC_CMD_INIT_RXQ_IN_LENMIN 36
3947#define MC_CMD_INIT_RXQ_IN_LENMAX 252
3948#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
3949/* Size, in entries */
3950#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
3951/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
3952 */
3953#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
3954/* The value to put in the event data. Check hardware spec. for valid range. */
3955#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
3956/* Desired instance. Must be set to a specific instance, which is a function
3957 * local queue index.
3958 */
3959#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
3960/* There will be more flags here. */
3961#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
3962#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
3963#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
3964#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
3965#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1
3966#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2
3967#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1
3968#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3
3969#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4
3970#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7
3971#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
3972#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
3973#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
3974/* Owner ID to use if in buffer mode (zero if physical) */
3975#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
3976/* The port ID associated with the v-adaptor which should contain this DMAQ. */
3977#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
3978/* 64-bit address of 4k of 4k-aligned host memory buffer */
3979#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
3980#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
3981#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
3982#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
3983#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
3984#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
3985
3986/* MC_CMD_INIT_RXQ_OUT msgresponse */
3987#define MC_CMD_INIT_RXQ_OUT_LEN 0
3988
3989
3990/***********************************/
3991/* MC_CMD_INIT_TXQ
3992 */
3993#define MC_CMD_INIT_TXQ 0x82
3994
3995/* MC_CMD_INIT_TXQ_IN msgrequest */
3996#define MC_CMD_INIT_TXQ_IN_LENMIN 36
3997#define MC_CMD_INIT_TXQ_IN_LENMAX 252
3998#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
3999/* Size, in entries */
4000#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
4001/* The EVQ to send events to. This is an index originally specified to
4002 * INIT_EVQ.
4003 */
4004#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
4005/* The value to put in the event data. Check hardware spec. for valid range. */
4006#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
4007/* Desired instance. Must be set to a specific instance, which is a function
4008 * local queue index.
4009 */
4010#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
4011/* There will be more flags here. */
4012#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
4013#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
4014#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
4015#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
4016#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1
4017#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2
4018#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
4019#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3
4020#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
4021#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4
4022#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4
4023#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8
4024#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
4025#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
4026#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
4027/* Owner ID to use if in buffer mode (zero if physical) */
4028#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
4029/* The port ID associated with the v-adaptor which should contain this DMAQ. */
4030#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
4031/* 64-bit address of 4k of 4k-aligned host memory buffer */
4032#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
4033#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
4034#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
4035#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
4036#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
4037#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
4038
4039/* MC_CMD_INIT_TXQ_OUT msgresponse */
4040#define MC_CMD_INIT_TXQ_OUT_LEN 0
4041
4042
4043/***********************************/
4044/* MC_CMD_FINI_EVQ
4045 * Teardown an EVQ.
4046 *
4047 * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first
4048 * or the operation will fail with EBUSY
4049 */
4050#define MC_CMD_FINI_EVQ 0x83
4051
4052/* MC_CMD_FINI_EVQ_IN msgrequest */
4053#define MC_CMD_FINI_EVQ_IN_LEN 4
4054/* Instance of EVQ to destroy. Should be the same instance as that previously
4055 * passed to INIT_EVQ
4056 */
4057#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
4058
4059/* MC_CMD_FINI_EVQ_OUT msgresponse */
4060#define MC_CMD_FINI_EVQ_OUT_LEN 0
4061
4062
4063/***********************************/
4064/* MC_CMD_FINI_RXQ
4065 * Teardown a RXQ.
4066 */
4067#define MC_CMD_FINI_RXQ 0x84
4068
4069/* MC_CMD_FINI_RXQ_IN msgrequest */
4070#define MC_CMD_FINI_RXQ_IN_LEN 4
4071/* Instance of RXQ to destroy */
4072#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
4073
4074/* MC_CMD_FINI_RXQ_OUT msgresponse */
4075#define MC_CMD_FINI_RXQ_OUT_LEN 0
4076
4077
4078/***********************************/
4079/* MC_CMD_FINI_TXQ
4080 * Teardown a TXQ.
4081 */
4082#define MC_CMD_FINI_TXQ 0x85
4083
4084/* MC_CMD_FINI_TXQ_IN msgrequest */
4085#define MC_CMD_FINI_TXQ_IN_LEN 4
4086/* Instance of TXQ to destroy */
4087#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
4088
4089/* MC_CMD_FINI_TXQ_OUT msgresponse */
4090#define MC_CMD_FINI_TXQ_OUT_LEN 0
4091
4092
4093/***********************************/
4094/* MC_CMD_DRIVER_EVENT
4095 * Generate an event on an EVQ belonging to the function issuing the command.
4096 */
4097#define MC_CMD_DRIVER_EVENT 0x86
4098
4099/* MC_CMD_DRIVER_EVENT_IN msgrequest */
4100#define MC_CMD_DRIVER_EVENT_IN_LEN 12
4101/* Handle of target EVQ */
4102#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
4103/* Bits 0 - 63 of event */
4104#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
4105#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
4106#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
4107#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
4108
4109/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
4110#define MC_CMD_DRIVER_EVENT_OUT_LEN 0
4111
4112
4113/***********************************/
4114/* MC_CMD_PROXY_CMD
4115 * Execute an arbitrary MCDI command on behalf of a different function, subject
4116 * to security restrictions. The command to be proxied follows immediately
4117 * afterward in the host buffer (or on the UART). This command supercedes
4118 * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated.
4119 */
4120#define MC_CMD_PROXY_CMD 0x5b
4121
4122/* MC_CMD_PROXY_CMD_IN msgrequest */
4123#define MC_CMD_PROXY_CMD_IN_LEN 4
4124/* The handle of the target function. */
4125#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
4126#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
4127#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
4128#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
4129#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
4130#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
4131
4132
4133/***********************************/
4134/* MC_CMD_ALLOC_BUFTBL_CHUNK
4135 * Allocate a set of buffer table entries using the specified owner ID. This
4136 * operation allocates the required buffer table entries (and fails if it
4137 * cannot do so). The buffer table entries will initially be zeroed.
4138 */
4139#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
4140
4141/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
4142#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
4143/* Owner ID to use */
4144#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
4145/* Size of buffer table pages to use, in bytes (note that only a few values are
4146 * legal on any specific hardware).
4147 */
4148#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
4149
4150/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
4151#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
4152#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
4153#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
4154/* Buffer table IDs for use in DMA descriptors. */
4155#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
4156
4157
4158/***********************************/
4159/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
4160 * Reprogram a set of buffer table entries in the specified chunk.
4161 */
4162#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
4163
4164/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
4165#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
4166#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 252
4167#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
4168#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
4169/* ID */
4170#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
4171/* Num entries */
4172#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
4173/* Buffer table entry address */
4174#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
4175#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
4176#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
4177#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
4178#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
4179#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 30
4180
4181/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
4182#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
4183
4184
4185/***********************************/
4186/* MC_CMD_FREE_BUFTBL_CHUNK
4187 */
4188#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
4189
4190/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
4191#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
4192#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
4193
4194/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
4195#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
4196
4197
4198/***********************************/
4199/* MC_CMD_FILTER_OP
4200 * Multiplexed MCDI call for filter operations
4201 */
4202#define MC_CMD_FILTER_OP 0x8a
4203
4204/* MC_CMD_FILTER_OP_IN msgrequest */
4205#define MC_CMD_FILTER_OP_IN_LEN 108
4206/* identifies the type of operation requested */
4207#define MC_CMD_FILTER_OP_IN_OP_OFST 0
4208/* enum: single-recipient filter insert */
4209#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
4210/* enum: single-recipient filter remove */
4211#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
4212/* enum: multi-recipient filter subscribe */
4213#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
4214/* enum: multi-recipient filter unsubscribe */
4215#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
4216/* enum: replace one recipient with another (warning - the filter handle may
4217 * change)
4218 */
4219#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
4220/* filter handle (for remove / unsubscribe operations) */
4221#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
4222#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
4223#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
4224#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
4225/* The port ID associated with the v-adaptor which should contain this filter.
4226 */
4227#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
4228/* fields to include in match criteria */
4229#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
4230#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
4231#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
4232#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
4233#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1
4234#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2
4235#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1
4236#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3
4237#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1
4238#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4
4239#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1
4240#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5
4241#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1
4242#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6
4243#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1
4244#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7
4245#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1
4246#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8
4247#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1
4248#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9
4249#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1
4250#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10
4251#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1
4252#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
4253#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
4254#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
4255#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
4256#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
4257#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
4258/* receive destination */
4259#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
4260/* enum: drop packets */
4261#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
4262/* enum: receive to host */
4263#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
4264/* enum: receive to MC */
4265#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
4266/* enum: loop back to port 0 TX MAC */
4267#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
4268/* enum: loop back to port 1 TX MAC */
4269#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
4270/* receive queue handle (for multiple queue modes, this is the base queue) */
4271#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
4272/* receive mode */
4273#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
4274/* enum: receive to just the specified queue */
4275#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
4276/* enum: receive to multiple queues using RSS context */
4277#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
4278/* enum: receive to multiple queues using .1p mapping */
4279#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
4280/* enum: install a filter entry that will never match; for test purposes only
4281 */
4282#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
4283/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
4284 * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
4285 * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered
4286 * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be
4287 * a valid handle.
4288 */
4289#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
4290/* transmit domain (reserved; set to 0) */
4291#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
4292/* transmit destination (either set the MAC and/or PM bits for explicit
4293 * control, or set this field to TX_DEST_DEFAULT for sensible default
4294 * behaviour)
4295 */
4296#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
4297/* enum: request default behaviour (based on filter type) */
4298#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
4299#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
4300#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
4301#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
4302#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1
4303/* source MAC address to match (as bytes in network order) */
4304#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44
4305#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6
4306/* source port to match (as bytes in network order) */
4307#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50
4308#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2
4309/* destination MAC address to match (as bytes in network order) */
4310#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52
4311#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6
4312/* destination port to match (as bytes in network order) */
4313#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58
4314#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2
4315/* Ethernet type to match (as bytes in network order) */
4316#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60
4317#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2
4318/* Inner VLAN tag to match (as bytes in network order) */
4319#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62
4320#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2
4321/* Outer VLAN tag to match (as bytes in network order) */
4322#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64
4323#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2
4324/* IP protocol to match (in low byte; set high byte to 0) */
4325#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66
4326#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
4327/* Firmware defined register 0 to match (reserved; set to 0) */
4328#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
4329/* Firmware defined register 1 to match (reserved; set to 0) */
4330#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
4331/* source IP address to match (as bytes in network order; set last 12 bytes to
4332 * 0 for IPv4 address)
4333 */
4334#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76
4335#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16
4336/* destination IP address to match (as bytes in network order; set last 12
4337 * bytes to 0 for IPv4 address)
4338 */
4339#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
4340#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
4341
4342/* MC_CMD_FILTER_OP_OUT msgresponse */
4343#define MC_CMD_FILTER_OP_OUT_LEN 12
4344/* identifies the type of operation requested */
4345#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
4346/* Enum values, see field(s): */
4347/* MC_CMD_FILTER_OP_IN/OP */
4348/* Returned filter handle (for insert / subscribe operations). Note that these
4349 * handles should be considered opaque to the host, although a value of
4350 * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
4351 */
4352#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
4353#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
4354#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
4355#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
4356
4357
4358/***********************************/
4359/* MC_CMD_GET_PARSER_DISP_INFO
4360 * Get information related to the parser-dispatcher subsystem
4361 */
4362#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
4363
4364/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
4365#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
4366/* identifies the type of operation requested */
4367#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
4368/* enum: read the list of supported RX filter matches */
4369#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
4370
4371/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
4372#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
4373#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252
4374#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
4375/* identifies the type of operation requested */
4376#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
4377/* Enum values, see field(s): */
4378/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
4379/* number of supported match types */
4380#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
4381/* array of supported match types (valid MATCH_FIELDS values for
4382 * MC_CMD_FILTER_OP) sorted in decreasing priority order
4383 */
4384#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8
4385#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4
4386#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
4387#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
4388
4389
4390/***********************************/
4391/* MC_CMD_PARSER_DISP_RW
4392 * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging
4393 */
4394#define MC_CMD_PARSER_DISP_RW 0xe5
4395
4396/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
4397#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
4398/* identifies the target of the operation */
4399#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
4400/* enum: RX dispatcher CPU */
4401#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
4402/* enum: TX dispatcher CPU */
4403#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
4404/* enum: Lookup engine */
4405#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
4406/* identifies the type of operation requested */
4407#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
4408/* enum: read a word of DICPU DMEM or a LUE entry */
4409#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
4410/* enum: write a word of DICPU DMEM or a LUE entry */
4411#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
4412/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
4413#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
4414/* data memory address or LUE index */
4415#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
4416/* value to write (for DMEM writes) */
4417#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
4418/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
4419#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
4420/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
4421#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
4422/* value to write (for LUE writes) */
4423#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
4424#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
4425
4426/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
4427#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
4428/* value read (for DMEM reads) */
4429#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
4430/* value read (for LUE reads) */
4431#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
4432#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
4433/* up to 8 32-bit words of additional soft state from the LUE manager (the
4434 * exact content is firmware-dependent and intended only for debug use)
4435 */
4436#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
4437#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
4438
4439
4440/***********************************/
4441/* MC_CMD_GET_PF_COUNT
4442 * Get number of PFs on the device.
4443 */
4444#define MC_CMD_GET_PF_COUNT 0xb6
4445
4446/* MC_CMD_GET_PF_COUNT_IN msgrequest */
4447#define MC_CMD_GET_PF_COUNT_IN_LEN 0
4448
4449/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
4450#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
4451/* Identifies the number of PFs on the device. */
4452#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
4453#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
4454
4455
4456/***********************************/
4457/* MC_CMD_SET_PF_COUNT
4458 * Set number of PFs on the device.
4459 */
4460#define MC_CMD_SET_PF_COUNT 0xb7
4461
4462/* MC_CMD_SET_PF_COUNT_IN msgrequest */
4463#define MC_CMD_SET_PF_COUNT_IN_LEN 4
4464/* New number of PFs on the device. */
4465#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
4466
4467/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
4468#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
4469
4470
4471/***********************************/
4472/* MC_CMD_GET_PORT_ASSIGNMENT
4473 * Get port assignment for current PCI function.
4474 */
4475#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
4476
4477/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
4478#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
4479
4480/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
4481#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
4482/* Identifies the port assignment for this function. */
4483#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
4484
4485
4486/***********************************/
4487/* MC_CMD_SET_PORT_ASSIGNMENT
4488 * Set port assignment for current PCI function.
4489 */
4490#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
4491
4492/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
4493#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
4494/* Identifies the port assignment for this function. */
4495#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
4496
4497/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
4498#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
4499
4500
4501/***********************************/
4502/* MC_CMD_ALLOC_VIS
4503 * Allocate VIs for current PCI function.
4504 */
4505#define MC_CMD_ALLOC_VIS 0x8b
4506
4507/* MC_CMD_ALLOC_VIS_IN msgrequest */
4508#define MC_CMD_ALLOC_VIS_IN_LEN 8
4509/* The minimum number of VIs that is acceptable */
4510#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
4511/* The maximum number of VIs that would be useful */
4512#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
4513
4514/* MC_CMD_ALLOC_VIS_OUT msgresponse */
4515#define MC_CMD_ALLOC_VIS_OUT_LEN 8
4516/* The number of VIs allocated on this function */
4517#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
4518/* The base absolute VI number allocated to this function. Required to
4519 * correctly interpret wakeup events.
4520 */
4521#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
4522
4523
4524/***********************************/
4525/* MC_CMD_FREE_VIS
4526 * Free VIs for current PCI function. Any linked PIO buffers will be unlinked,
4527 * but not freed.
4528 */
4529#define MC_CMD_FREE_VIS 0x8c
4530
4531/* MC_CMD_FREE_VIS_IN msgrequest */
4532#define MC_CMD_FREE_VIS_IN_LEN 0
4533
4534/* MC_CMD_FREE_VIS_OUT msgresponse */
4535#define MC_CMD_FREE_VIS_OUT_LEN 0
4536
4537
4538/***********************************/
4539/* MC_CMD_GET_SRIOV_CFG
4540 * Get SRIOV config for this PF.
4541 */
4542#define MC_CMD_GET_SRIOV_CFG 0xba
4543
4544/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
4545#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
4546
4547/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */
4548#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
4549/* Number of VFs currently enabled. */
4550#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
4551/* Max number of VFs before sriov stride and offset may need to be changed. */
4552#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
4553#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
4554#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
4555#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
4556/* RID offset of first VF from PF. */
4557#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
4558/* RID offset of each subsequent VF from the previous. */
4559#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
4560
4561
4562/***********************************/
4563/* MC_CMD_SET_SRIOV_CFG
4564 * Set SRIOV config for this PF.
4565 */
4566#define MC_CMD_SET_SRIOV_CFG 0xbb
4567
4568/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
4569#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
4570/* Number of VFs currently enabled. */
4571#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
4572/* Max number of VFs before sriov stride and offset may need to be changed. */
4573#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
4574#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
4575#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
4576#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
4577/* RID offset of first VF from PF, or 0 for no change, or
4578 * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
4579 */
4580#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
4581/* RID offset of each subsequent VF from the previous, 0 for no change, or
4582 * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
4583 */
4584#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
4585
4586/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
4587#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
4588
4589
4590/***********************************/
4591/* MC_CMD_GET_VI_ALLOC_INFO
4592 * Get information about number of VI's and base VI number allocated to this
4593 * function.
4594 */
4595#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
4596
4597/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
4598#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
4599
4600/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
4601#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8
4602/* The number of VIs allocated on this function */
4603#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
4604/* The base absolute VI number allocated to this function. Required to
4605 * correctly interpret wakeup events.
4606 */
4607#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
4608
4609
4610/***********************************/
4611/* MC_CMD_DUMP_VI_STATE
4612 * For CmdClient use. Dump pertinent information on a specific absolute VI.
4613 */
4614#define MC_CMD_DUMP_VI_STATE 0x8e
4615
4616/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
4617#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
4618/* The VI number to query. */
4619#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
4620
4621/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
4622#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
4623/* The PF part of the function owning this VI. */
4624#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
4625#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
4626/* The VF part of the function owning this VI. */
4627#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
4628#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
4629/* Base of VIs allocated to this function. */
4630#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
4631#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
4632/* Count of VIs allocated to the owner function. */
4633#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
4634#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
4635/* Base interrupt vector allocated to this function. */
4636#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
4637#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
4638/* Number of interrupt vectors allocated to this function. */
4639#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
4640#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
4641/* Raw evq ptr table data. */
4642#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
4643#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
4644#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
4645#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
4646/* Raw evq timer table data. */
4647#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
4648#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
4649#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
4650#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
4651/* Combined metadata field. */
4652#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
4653#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
4654#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
4655#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
4656#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
4657#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
4658#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
4659/* TXDPCPU raw table data for queue. */
4660#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
4661#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
4662#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
4663#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
4664/* TXDPCPU raw table data for queue. */
4665#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
4666#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
4667#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
4668#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
4669/* TXDPCPU raw table data for queue. */
4670#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
4671#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
4672#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
4673#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
4674/* Combined metadata field. */
4675#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
4676#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
4677#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
4678#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
4679#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
4680#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
4681#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
4682#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
4683#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
4684#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
4685#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
4686#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
4687#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
4688#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
4689/* RXDPCPU raw table data for queue. */
4690#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
4691#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
4692#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
4693#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
4694/* RXDPCPU raw table data for queue. */
4695#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
4696#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
4697#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
4698#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
4699/* Reserved, currently 0. */
4700#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
4701#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
4702#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
4703#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
4704/* Combined metadata field. */
4705#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
4706#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
4707#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
4708#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
4709#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
4710#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
4711#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
4712#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
4713#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
4714#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
4715#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
4716#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
4717
4718
4719/***********************************/
4720/* MC_CMD_ALLOC_PIOBUF
4721 * Allocate a push I/O buffer for later use with a tx queue.
4722 */
4723#define MC_CMD_ALLOC_PIOBUF 0x8f
4724
4725/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
4726#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
4727
4728/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */
4729#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
4730/* Handle for allocated push I/O buffer. */
4731#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
4732
4733
4734/***********************************/
4735/* MC_CMD_FREE_PIOBUF
4736 * Free a push I/O buffer.
4737 */
4738#define MC_CMD_FREE_PIOBUF 0x90
4739
4740/* MC_CMD_FREE_PIOBUF_IN msgrequest */
4741#define MC_CMD_FREE_PIOBUF_IN_LEN 4
4742/* Handle for allocated push I/O buffer. */
4743#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
4744
4745/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
4746#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
4747
4748
4749/***********************************/
4750/* MC_CMD_GET_VI_TLP_PROCESSING
4751 * Get TLP steering and ordering information for a VI.
4752 */
4753#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
4754
4755/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
4756#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
4757/* VI number to get information for. */
4758#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
4759
4760/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
4761#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
4762/* Transaction processing steering hint 1 for use with the Rx Queue. */
4763#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
4764#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
4765/* Transaction processing steering hint 2 for use with the Ev Queue. */
4766#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
4767#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
4768/* Use Relaxed ordering model for TLPs on this VI. */
4769#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
4770#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
4771/* Use ID based ordering for TLPs on this VI. */
4772#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
4773#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
4774/* Set no snoop bit for TLPs on this VI. */
4775#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
4776#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
4777/* Enable TPH for TLPs on this VI. */
4778#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
4779#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
4780#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
4781
4782
4783/***********************************/
4784/* MC_CMD_SET_VI_TLP_PROCESSING
4785 * Set TLP steering and ordering information for a VI.
4786 */
4787#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
4788
4789/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
4790#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
4791/* VI number to set information for. */
4792#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
4793/* Transaction processing steering hint 1 for use with the Rx Queue. */
4794#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
4795#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
4796/* Transaction processing steering hint 2 for use with the Ev Queue. */
4797#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
4798#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
4799/* Use Relaxed ordering model for TLPs on this VI. */
4800#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
4801#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
4802/* Use ID based ordering for TLPs on this VI. */
4803#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
4804#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
4805/* Set the no snoop bit for TLPs on this VI. */
4806#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
4807#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
4808/* Enable TPH for TLPs on this VI. */
4809#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
4810#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
4811#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
4812
4813/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
4814#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
4815
4816
4817/***********************************/
4818/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
4819 * Get global PCIe steering and transaction processing configuration.
4820 */
4821#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
4822
4823/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
4824#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
4825#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
4826/* enum: MISC. */
4827#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
4828/* enum: IDO. */
4829#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
4830/* enum: RO. */
4831#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
4832/* enum: TPH Type. */
4833#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
4834
4835/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
4836#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
4837#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
4838/* Enum values, see field(s): */
4839/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
4840/* Amalgamated TLP info word. */
4841#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
4842#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
4843#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
4844#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
4845#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
4846#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
4847#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
4848#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
4849#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
4850#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
4851#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
4852#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
4853#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
4854#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
4855#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
4856#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
4857#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
4858#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
4859#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
4860#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
4861#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
4862#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
4863#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
4864#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
4865#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
4866#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
4867#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
4868#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
4869#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
4870#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
4871#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
4872#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
4873#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
4874#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
4875#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
4876
4877
4878/***********************************/
4879/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
4880 * Set global PCIe steering and transaction processing configuration.
4881 */
4882#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
4883
4884/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
4885#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
4886#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
4887/* Enum values, see field(s): */
4888/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
4889/* Amalgamated TLP info word. */
4890#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
4891#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
4892#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
4893#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
4894#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
4895#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
4896#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
4897#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
4898#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
4899#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
4900#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
4901#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
4902#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
4903#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
4904#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
4905#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
4906#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
4907#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
4908#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
4909#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
4910#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
4911#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
4912#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
4913#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
4914#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
4915#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
4916#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
4917#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
4918#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
4919
4920/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
4921#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
4922
4923
4924/***********************************/
4925/* MC_CMD_SATELLITE_DOWNLOAD
4926 * Download a new set of images to the satellite CPUs from the host.
4927 */
4928#define MC_CMD_SATELLITE_DOWNLOAD 0x91
4929
4930/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
4931 * are subtle, and so downloads must proceed in a number of phases.
4932 *
4933 * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
4934 *
4935 * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
4936 * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
4937 * be a checksum (a simple 32-bit sum) of the transferred data. An individual
4938 * download may be aborted using CHUNK_ID_ABORT.
4939 *
4940 * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
4941 * similar to PHASE_IMEMS.
4942 *
4943 * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
4944 *
4945 * After any error (a requested abort is not considered to be an error) the
4946 * sequence must be restarted from PHASE_RESET.
4947 */
4948#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
4949#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
4950#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
4951/* Download phase. (Note: the IDLE phase is used internally and is never valid
4952 * in a command from the host.)
4953 */
4954#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
4955#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
4956#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
4957#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
4958#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
4959#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
4960/* Target for download. (These match the blob numbers defined in
4961 * mc_flash_layout.h.)
4962 */
4963#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
4964/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4965#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
4966/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4967#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
4968/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4969#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
4970/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4971#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
4972/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4973#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
4974/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4975#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
4976/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4977#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
4978/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4979#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
4980/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4981#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
4982/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4983#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
4984/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4985#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
4986/* enum: Valid in phase 2 (PHASE_IMEMS) only */
4987#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
4988/* enum: Valid in phase 3 (PHASE_VECTORS) only */
4989#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
4990/* enum: Valid in phase 3 (PHASE_VECTORS) only */
4991#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
4992/* enum: Valid in phase 3 (PHASE_VECTORS) only */
4993#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
4994/* enum: Valid in phase 3 (PHASE_VECTORS) only */
4995#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
4996/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
4997#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
4998/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
4999#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
5000/* enum: Last chunk, containing checksum rather than data */
5001#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
5002/* enum: Abort download of this item */
5003#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
5004/* Length of this chunk in bytes */
5005#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
5006/* Data for this chunk */
5007#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
5008#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
5009#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
5010#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
5011
5012/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
5013#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
5014/* Same as MC_CMD_ERR field, but included as 0 in success cases */
5015#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
5016/* Extra status information */
5017#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
5018/* enum: Code download OK, completed. */
5019#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
5020/* enum: Code download aborted as requested. */
5021#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
5022/* enum: Code download OK so far, send next chunk. */
5023#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
5024/* enum: Download phases out of sequence */
5025#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
5026/* enum: Bad target for this phase */
5027#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
5028/* enum: Chunk ID out of sequence */
5029#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
5030/* enum: Chunk length zero or too large */
5031#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
5032/* enum: Checksum was incorrect */
5033#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
5034
5035
5036/***********************************/
5037/* MC_CMD_GET_CAPABILITIES
5038 * Get device capabilities.
5039 *
5040 * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
5041 * reference inherent device capabilities as opposed to current NVRAM config.
5042 */
5043#define MC_CMD_GET_CAPABILITIES 0xbe
5044
5045/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
5046#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
5047
5048/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */
5049#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
5050/* First word of flags. */
5051#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
5052#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
5053#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
5054#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
5055#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1
5056#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21
5057#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1
5058#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22
5059#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1
5060#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23
5061#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1
5062#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24
5063#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1
5064#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25
5065#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
5066#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
5067#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
5068/* RxDPCPU firmware id. */
5069#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
5070#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
5071/* enum: Standard RXDP firmware */
5072#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
5073/* enum: Low latency RXDP firmware */
5074#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
5075/* enum: RXDP Test firmware image 1 */
5076#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
5077/* enum: RXDP Test firmware image 2 */
5078#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
5079/* enum: RXDP Test firmware image 3 */
5080#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
5081/* enum: RXDP Test firmware image 4 */
5082#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
5083/* enum: RXDP Test firmware image 5 */
5084#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
5085/* enum: RXDP Test firmware image 6 */
5086#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
5087/* enum: RXDP Test firmware image 7 */
5088#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
5089/* enum: RXDP Test firmware image 8 */
5090#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
5091/* TxDPCPU firmware id. */
5092#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
5093#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
5094/* enum: Standard TXDP firmware */
5095#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
5096/* enum: Low latency TXDP firmware */
5097#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
5098/* enum: TXDP Test firmware image 1 */
5099#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
5100/* enum: TXDP Test firmware image 2 */
5101#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
5102#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
5103#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
5104#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
5105#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
5106#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
5107#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
5108#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
5109#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
5110#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum */
5111#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
5112#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
5113#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
5114#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
5115#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
5116#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
5117#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
5118#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
5119#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
5120#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
5121#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum */
5122#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
5123#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
5124/* Hardware capabilities of NIC */
5125#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
5126/* Licensed capabilities */
5127#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
5128
5129
5130/***********************************/
5131/* MC_CMD_V2_EXTN
5132 * Encapsulation for a v2 extended command
5133 */
5134#define MC_CMD_V2_EXTN 0x7f
5135
5136/* MC_CMD_V2_EXTN_IN msgrequest */
5137#define MC_CMD_V2_EXTN_IN_LEN 4
5138/* the extended command number */
5139#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
5140#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
5141#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
5142#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
5143/* the actual length of the encapsulated command (which is not in the v1
5144 * header)
5145 */
5146#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
5147#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
5148#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
5149#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6
5150
5151
5152/***********************************/
5153/* MC_CMD_TCM_BUCKET_ALLOC
5154 * Allocate a pacer bucket (for qau rp or a snapper test)
5155 */
5156#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
5157
5158/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
5159#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
5160
5161/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
5162#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
5163/* the bucket id */
5164#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
5165
5166
5167/***********************************/
5168/* MC_CMD_TCM_BUCKET_FREE
5169 * Free a pacer bucket
5170 */
5171#define MC_CMD_TCM_BUCKET_FREE 0xb3
5172
5173/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
5174#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
5175/* the bucket id */
5176#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
5177
5178/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
5179#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
5180
5181
5182/***********************************/
5183/* MC_CMD_TCM_BUCKET_INIT
5184 * Initialise pacer bucket with a given rate
5185 */
5186#define MC_CMD_TCM_BUCKET_INIT 0xb4
5187
5188/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
5189#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
5190/* the bucket id */
5191#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
5192/* the rate in mbps */
5193#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
5194
5195/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
5196#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
5197
5198
5199/***********************************/
5200/* MC_CMD_TCM_TXQ_INIT
5201 * Initialise txq in pacer with given options or set options
5202 */
5203#define MC_CMD_TCM_TXQ_INIT 0xb5
5204
5205/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
5206#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
5207/* the txq id */
5208#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
5209/* the static priority associated with the txq */
5210#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
5211/* bitmask of the priority queues this txq is inserted into */
5212#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
5213/* the reaction point (RP) bucket */
5214#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
5215/* an already reserved bucket (typically set to bucket associated with outer
5216 * vswitch)
5217 */
5218#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
5219/* an already reserved bucket (typically set to bucket associated with inner
5220 * vswitch)
5221 */
5222#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
5223/* the min bucket (typically for ETS/minimum bandwidth) */
5224#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
5225
5226/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
5227#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
5228
5229
5230/***********************************/
5231/* MC_CMD_LINK_PIOBUF
5232 * Link a push I/O buffer to a TxQ
5233 */
5234#define MC_CMD_LINK_PIOBUF 0x92
5235
5236/* MC_CMD_LINK_PIOBUF_IN msgrequest */
5237#define MC_CMD_LINK_PIOBUF_IN_LEN 8
5238/* Handle for allocated push I/O buffer. */
5239#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
5240/* Function Local Instance (VI) number. */
5241#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
5242
5243/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
5244#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
5245
5246
5247/***********************************/
5248/* MC_CMD_UNLINK_PIOBUF
5249 * Unlink a push I/O buffer from a TxQ
5250 */
5251#define MC_CMD_UNLINK_PIOBUF 0x93
5252
5253/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
5254#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
5255/* Function Local Instance (VI) number. */
5256#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
5257
5258/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
5259#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
5260
5261
5262/***********************************/
5263/* MC_CMD_VSWITCH_ALLOC
5264 * allocate and initialise a v-switch.
5265 */
5266#define MC_CMD_VSWITCH_ALLOC 0x94
5267
5268/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
5269#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
5270/* The port to connect to the v-switch's upstream port. */
5271#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
5272/* The type of v-switch to create. */
5273#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
5274/* enum: VLAN */
5275#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
5276/* enum: VEB */
5277#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
5278/* enum: VEPA */
5279#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
5280/* Flags controlling v-port creation */
5281#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
5282#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
5283#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
5284/* The number of VLAN tags to support. */
5285#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
5286
5287/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
5288#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
5289
5290
5291/***********************************/
5292/* MC_CMD_VSWITCH_FREE
5293 * de-allocate a v-switch.
5294 */
5295#define MC_CMD_VSWITCH_FREE 0x95
5296
5297/* MC_CMD_VSWITCH_FREE_IN msgrequest */
5298#define MC_CMD_VSWITCH_FREE_IN_LEN 4
5299/* The port to which the v-switch is connected. */
5300#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
5301
5302/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
5303#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
5304
5305
5306/***********************************/
5307/* MC_CMD_VPORT_ALLOC
5308 * allocate a v-port.
5309 */
5310#define MC_CMD_VPORT_ALLOC 0x96
5311
5312/* MC_CMD_VPORT_ALLOC_IN msgrequest */
5313#define MC_CMD_VPORT_ALLOC_IN_LEN 20
5314/* The port to which the v-switch is connected. */
5315#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
5316/* The type of the new v-port. */
5317#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
5318/* enum: VLAN (obsolete) */
5319#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
5320/* enum: VEB (obsolete) */
5321#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
5322/* enum: VEPA (obsolete) */
5323#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
5324/* enum: A normal v-port receives packets which match a specified MAC and/or
5325 * VLAN.
5326 */
5327#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
5328/* enum: An expansion v-port packets traffic which don't match any other
5329 * v-port.
5330 */
5331#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
5332/* enum: An test v-port receives packets which match any filters installed by
5333 * its downstream components.
5334 */
5335#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
5336/* Flags controlling v-port creation */
5337#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
5338#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
5339#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
5340/* The number of VLAN tags to insert/remove. */
5341#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
5342/* The actual VLAN tags to insert/remove */
5343#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
5344#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
5345#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
5346#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
5347#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16
5348
5349/* MC_CMD_VPORT_ALLOC_OUT msgresponse */
5350#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
5351/* The handle of the new v-port */
5352#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
5353
5354
5355/***********************************/
5356/* MC_CMD_VPORT_FREE
5357 * de-allocate a v-port.
5358 */
5359#define MC_CMD_VPORT_FREE 0x97
5360
5361/* MC_CMD_VPORT_FREE_IN msgrequest */
5362#define MC_CMD_VPORT_FREE_IN_LEN 4
5363/* The handle of the v-port */
5364#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
5365
5366/* MC_CMD_VPORT_FREE_OUT msgresponse */
5367#define MC_CMD_VPORT_FREE_OUT_LEN 0
5368
5369
5370/***********************************/
5371/* MC_CMD_VADAPTOR_ALLOC
5372 * allocate a v-adaptor.
5373 */
5374#define MC_CMD_VADAPTOR_ALLOC 0x98
5375
5376/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
5377#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 16
5378/* The port to connect to the v-adaptor's port. */
5379#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
5380/* Flags controlling v-adaptor creation */
5381#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
5382#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
5383#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
5384/* The number of VLAN tags to strip on receive */
5385#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
5386
5387/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
5388#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
5389
5390
5391/***********************************/
5392/* MC_CMD_VADAPTOR_FREE
5393 * de-allocate a v-adaptor.
5394 */
5395#define MC_CMD_VADAPTOR_FREE 0x99
5396
5397/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
5398#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
5399/* The port to which the v-adaptor is connected. */
5400#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
5401
5402/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
5403#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
5404
5405
5406/***********************************/
5407/* MC_CMD_EVB_PORT_ASSIGN
5408 * assign a port to a PCI function.
5409 */
5410#define MC_CMD_EVB_PORT_ASSIGN 0x9a
5411
5412/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
5413#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
5414/* The port to assign. */
5415#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
5416/* The target function to modify. */
5417#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
5418#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
5419#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
5420#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
5421#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16
5422
5423/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */
5424#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0
5425
5426
5427/***********************************/
5428/* MC_CMD_RDWR_A64_REGIONS
5429 * Assign the 64 bit region addresses.
5430 */
5431#define MC_CMD_RDWR_A64_REGIONS 0x9b
5432
5433/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
5434#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
5435#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
5436#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
5437#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
5438#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
5439/* Write enable bits 0-3, set to write, clear to read. */
5440#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
5441#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
5442#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
5443#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
5444
5445/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
5446 * regardless of state of write bits in the request.
5447 */
5448#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
5449#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
5450#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
5451#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
5452#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
5453
5454
5455/***********************************/
5456/* MC_CMD_ONLOAD_STACK_ALLOC
5457 * Allocate an Onload stack ID.
5458 */
5459#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
5460
5461/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
5462#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
5463/* The handle of the owning upstream port */
5464#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
5465
5466/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
5467#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
5468/* The handle of the new Onload stack */
5469#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
5470
5471
5472/***********************************/
5473/* MC_CMD_ONLOAD_STACK_FREE
5474 * Free an Onload stack ID.
5475 */
5476#define MC_CMD_ONLOAD_STACK_FREE 0x9d
5477
5478/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
5479#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
5480/* The handle of the Onload stack */
5481#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
5482
5483/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
5484#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
5485
5486
5487/***********************************/
5488/* MC_CMD_RSS_CONTEXT_ALLOC
5489 * Allocate an RSS context.
5490 */
5491#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
5492
5493/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
5494#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
5495/* The handle of the owning upstream port */
5496#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
5497/* The type of context to allocate */
5498#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
5499/* enum: Allocate a context for exclusive use. The key and indirection table
5500 * must be explicitly configured.
5501 */
5502#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
5503/* enum: Allocate a context for shared use; this will spread across a range of
5504 * queues, but the key and indirection table are pre-configured and may not be
5505 * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
5506 */
5507#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
5508/* Number of queues spanned by this context, in the range 1-64; valid offsets
5509 * in the indirection table will be in the range 0 to NUM_QUEUES-1.
5510 */
5511#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
5512
5513/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
5514#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
5515/* The handle of the new RSS context */
5516#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
5517
5518
5519/***********************************/
5520/* MC_CMD_RSS_CONTEXT_FREE
5521 * Free an RSS context.
5522 */
5523#define MC_CMD_RSS_CONTEXT_FREE 0x9f
5524
5525/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
5526#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
5527/* The handle of the RSS context */
5528#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
5529
5530/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
5531#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
5532
5533
5534/***********************************/
5535/* MC_CMD_RSS_CONTEXT_SET_KEY
5536 * Set the Toeplitz hash key for an RSS context.
5537 */
5538#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
5539
5540/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
5541#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
5542/* The handle of the RSS context */
5543#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
5544/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
5545#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
5546#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
5547
5548/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */
5549#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0
5550
5551
5552/***********************************/
5553/* MC_CMD_RSS_CONTEXT_GET_KEY
5554 * Get the Toeplitz hash key for an RSS context.
5555 */
5556#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
5557
5558/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
5559#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
5560/* The handle of the RSS context */
5561#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
5562
5563/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
5564#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
5565/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
5566#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4
5567#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40
5568
5569
5570/***********************************/
5571/* MC_CMD_RSS_CONTEXT_SET_TABLE
5572 * Set the indirection table for an RSS context.
5573 */
5574#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
5575
5576/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
5577#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
5578/* The handle of the RSS context */
5579#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
5580/* The 128-byte indirection table (1 byte per entry) */
5581#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
5582#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
5583
5584/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */
5585#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0
5586
5587
5588/***********************************/
5589/* MC_CMD_RSS_CONTEXT_GET_TABLE
5590 * Get the indirection table for an RSS context.
5591 */
5592#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
5593
5594/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
5595#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
5596/* The handle of the RSS context */
5597#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
5598
5599/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
5600#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
5601/* The 128-byte indirection table (1 byte per entry) */
5602#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4
5603#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128
5604
5605
5606/***********************************/
5607/* MC_CMD_RSS_CONTEXT_SET_FLAGS
5608 * Set various control flags for an RSS context.
5609 */
5610#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
5611
5612/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
5613#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
5614/* The handle of the RSS context */
5615#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
5616/* Hash control flags */
5617#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
5618#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
5619#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
5620#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
5621#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1
5622#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2
5623#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
5624#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
5625#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
5626
5627/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
5628#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
5629
5630
5631/***********************************/
5632/* MC_CMD_RSS_CONTEXT_GET_FLAGS
5633 * Get various control flags for an RSS context.
5634 */
5635#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
5636
5637/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
5638#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
5639/* The handle of the RSS context */
5640#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
5641
5642/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
5643#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
5644/* Hash control flags */
5645#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
5646#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
5647#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
5648#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
5649#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1
5650#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2
5651#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
5652#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
5653#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
5654
5655
5656/***********************************/
5657/* MC_CMD_DOT1P_MAPPING_ALLOC
5658 * Allocate a .1p mapping.
5659 */
5660#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
5661
5662/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
5663#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
5664/* The handle of the owning upstream port */
5665#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
5666/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
5667 * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
5668 * referenced RSS contexts must span no more than this number.
5669 */
5670#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
5671
5672/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
5673#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
5674/* The handle of the new .1p mapping */
5675#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
5676
5677
5678/***********************************/
5679/* MC_CMD_DOT1P_MAPPING_FREE
5680 * Free a .1p mapping.
5681 */
5682#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
5683
5684/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
5685#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
5686/* The handle of the .1p mapping */
5687#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
5688
5689/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
5690#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
5691
5692
5693/***********************************/
5694/* MC_CMD_DOT1P_MAPPING_SET_TABLE
5695 * Set the mapping table for a .1p mapping.
5696 */
5697#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
5698
5699/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
5700#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
5701/* The handle of the .1p mapping */
5702#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
5703/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
5704 * handle)
5705 */
5706#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
5707#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
5708
5709/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
5710#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
5711
5712
5713/***********************************/
5714/* MC_CMD_DOT1P_MAPPING_GET_TABLE
5715 * Get the mapping table for a .1p mapping.
5716 */
5717#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
5718
5719/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
5720#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
5721/* The handle of the .1p mapping */
5722#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
5723
5724/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
5725#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
5726/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
5727 * handle)
5728 */
5729#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
5730#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
5731
5732
5733/***********************************/
5734/* MC_CMD_GET_VECTOR_CFG
5735 * Get Interrupt Vector config for this PF.
5736 */
5737#define MC_CMD_GET_VECTOR_CFG 0xbf
5738
5739/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
5740#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
5741
5742/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
5743#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
5744/* Base absolute interrupt vector number. */
5745#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
5746/* Number of interrupt vectors allocate to this PF. */
5747#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
5748/* Number of interrupt vectors to allocate per VF. */
5749#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
5750
5751
5752/***********************************/
5753/* MC_CMD_SET_VECTOR_CFG
5754 * Set Interrupt Vector config for this PF.
5755 */
5756#define MC_CMD_SET_VECTOR_CFG 0xc0
5757
5758/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
5759#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
5760/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
5761 * let the system find a suitable base.
5762 */
5763#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
5764/* Number of interrupt vectors allocate to this PF. */
5765#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
5766/* Number of interrupt vectors to allocate per VF. */
5767#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
5768
5769/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
5770#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
5771
5772
5773/***********************************/
5774/* MC_CMD_RMON_RX_CLASS_STATS
5775 * Retrieve rmon rx class statistics
5776 */
5777#define MC_CMD_RMON_RX_CLASS_STATS 0xc3
5778
5779/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */
5780#define MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4
5781/* flags */
5782#define MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0
5783#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0
5784#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8
5785#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8
5786#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1
5787
5788/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */
5789#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4
5790#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252
5791#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
5792/* Array of stats */
5793#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0
5794#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4
5795#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1
5796#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
5797
5798
5799/***********************************/
5800/* MC_CMD_RMON_TX_CLASS_STATS
5801 * Retrieve rmon tx class statistics
5802 */
5803#define MC_CMD_RMON_TX_CLASS_STATS 0xc4
5804
5805/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */
5806#define MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4
5807/* flags */
5808#define MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0
5809#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0
5810#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8
5811#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8
5812#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1
5813
5814/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */
5815#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4
5816#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252
5817#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
5818/* Array of stats */
5819#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0
5820#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4
5821#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1
5822#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
5823
5824
5825/***********************************/
5826/* MC_CMD_RMON_RX_SUPER_CLASS_STATS
5827 * Retrieve rmon rx super_class statistics
5828 */
5829#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5
5830
5831/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */
5832#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4
5833/* flags */
5834#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
5835#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
5836#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
5837#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4
5838#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
5839
5840/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */
5841#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4
5842#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252
5843#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
5844/* Array of stats */
5845#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
5846#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
5847#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
5848#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
5849
5850
5851/***********************************/
5852/* MC_CMD_RMON_TX_SUPER_CLASS_STATS
5853 * Retrieve rmon tx super_class statistics
5854 */
5855#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6
5856
5857/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */
5858#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4
5859/* flags */
5860#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
5861#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
5862#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
5863#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4
5864#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
5865
5866/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */
5867#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4
5868#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252
5869#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
5870/* Array of stats */
5871#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
5872#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
5873#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
5874#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
5875
5876
5877/***********************************/
5878/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS
5879 * Add qid to class for statistics collection
5880 */
5881#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7
5882
5883/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */
5884#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12
5885/* class */
5886#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
5887/* qid */
5888#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4
5889/* flags */
5890#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
5891#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
5892#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
5893#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
5894#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
5895#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
5896#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
5897
5898/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */
5899#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0
5900
5901
5902/***********************************/
5903/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS
5904 * Add qid to class for statistics collection
5905 */
5906#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8
5907
5908/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */
5909#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12
5910/* class */
5911#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
5912/* qid */
5913#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4
5914/* flags */
5915#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
5916#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
5917#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
5918#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
5919#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
5920#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
5921#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
5922
5923/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */
5924#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0
5925
5926
5927/***********************************/
5928/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS
5929 * Add qid to class for statistics collection
5930 */
5931#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9
5932
5933/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */
5934#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12
5935/* class */
5936#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
5937/* qid */
5938#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4
5939/* flags */
5940#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
5941#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
5942#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
5943#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
5944#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
5945#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8
5946#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
5947
5948/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */
5949#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0
5950
5951
5952/***********************************/
5953/* MC_CMD_RMON_ALLOC_CLASS
5954 * Allocate an rmon class
5955 */
5956#define MC_CMD_RMON_ALLOC_CLASS 0xca
5957
5958/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */
5959#define MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0
5960
5961/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */
5962#define MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4
5963/* class */
5964#define MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0
5965
5966
5967/***********************************/
5968/* MC_CMD_RMON_DEALLOC_CLASS
5969 * Deallocate an rmon class
5970 */
5971#define MC_CMD_RMON_DEALLOC_CLASS 0xcb
5972
5973/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */
5974#define MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4
5975/* class */
5976#define MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0
5977
5978/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */
5979#define MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0
5980
5981
5982/***********************************/
5983/* MC_CMD_RMON_ALLOC_SUPER_CLASS
5984 * Allocate an rmon super_class
5985 */
5986#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc
5987
5988/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */
5989#define MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0
5990
5991/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */
5992#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4
5993/* super_class */
5994#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0
5995
5996
5997/***********************************/
5998/* MC_CMD_RMON_DEALLOC_SUPER_CLASS
5999 * Deallocate an rmon tx super_class
6000 */
6001#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd
6002
6003/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */
6004#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4
6005/* super_class */
6006#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0
6007
6008/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */
6009#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0
6010
6011
6012/***********************************/
6013/* MC_CMD_RMON_RX_UP_CONV_STATS
6014 * Retrieve up converter statistics
6015 */
6016#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce
6017
6018/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */
6019#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4
6020/* flags */
6021#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0
6022#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0
6023#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2
6024#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2
6025#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1
6026
6027/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */
6028#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4
6029#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252
6030#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num))
6031/* Array of stats */
6032#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0
6033#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4
6034#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1
6035#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63
6036
6037
6038/***********************************/
6039/* MC_CMD_RMON_RX_IPI_STATS
6040 * Retrieve rx ipi stats
6041 */
6042#define MC_CMD_RMON_RX_IPI_STATS 0xcf
6043
6044/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */
6045#define MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4
6046/* flags */
6047#define MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0
6048#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0
6049#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5
6050#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5
6051#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1
6052
6053/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */
6054#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4
6055#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252
6056#define MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num))
6057/* Array of stats */
6058#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0
6059#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4
6060#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1
6061#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63
6062
6063
6064/***********************************/
6065/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS
6066 * Retrieve rx ipsec cntxt_ptr indexed stats
6067 */
6068#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0
6069
6070/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
6071#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
6072/* flags */
6073#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
6074#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
6075#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
6076#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
6077#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
6078
6079/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
6080#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
6081#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
6082#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
6083/* Array of stats */
6084#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
6085#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
6086#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
6087#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
6088
6089
6090/***********************************/
6091/* MC_CMD_RMON_RX_IPSEC_PORT_STATS
6092 * Retrieve rx ipsec port indexed stats
6093 */
6094#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1
6095
6096/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */
6097#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4
6098/* flags */
6099#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
6100#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0
6101#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
6102#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2
6103#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
6104
6105/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */
6106#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4
6107#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252
6108#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
6109/* Array of stats */
6110#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
6111#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
6112#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
6113#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
6114
6115
6116/***********************************/
6117/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS
6118 * Retrieve tx ipsec overflow
6119 */
6120#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2
6121
6122/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */
6123#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4
6124/* flags */
6125#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
6126#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
6127#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
6128#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
6129#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
6130
6131/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */
6132#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
6133#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
6134#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
6135/* Array of stats */
6136#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
6137#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
6138#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
6139#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
6140
6141
6142/***********************************/
6143/* MC_CMD_VPORT_ADD_MAC_ADDRESS
6144 * Add a MAC address to a v-port
6145 */
6146#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
6147
6148/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
6149#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
6150/* The handle of the v-port */
6151#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
6152/* MAC address to add */
6153#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
6154#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
6155
6156/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
6157#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
6158
6159
6160/***********************************/
6161/* MC_CMD_VPORT_DEL_MAC_ADDRESS
6162 * Delete a MAC address from a v-port
6163 */
6164#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
6165
6166/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
6167#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
6168/* The handle of the v-port */
6169#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
6170/* MAC address to add */
6171#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
6172#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
6173
6174/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */
6175#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0
6176
6177
6178/***********************************/
6179/* MC_CMD_VPORT_GET_MAC_ADDRESSES
6180 * Delete a MAC address from a v-port
6181 */
6182#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
6183
6184/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
6185#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
6186/* The handle of the v-port */
6187#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
6188
6189/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
6190#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
6191#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250
6192#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
6193/* The number of MAC addresses returned */
6194#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
6195/* Array of MAC addresses */
6196#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
6197#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
6198#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0
6199#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41
6200
6201
6202/***********************************/
6203/* MC_CMD_DUMP_BUFTBL_ENTRIES
6204 * Dump buffer table entries, mainly for command client debug use. Dumps
6205 * absolute entries, and does not use chunk handles. All entries must be in
6206 * range, and used for q page mapping, Although the latter restriction may be
6207 * lifted in future.
6208 */
6209#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
6210
6211/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
6212#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
6213/* Index of the first buffer table entry. */
6214#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
6215/* Number of buffer table entries to dump. */
6216#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
6217
6218/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
6219#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
6220#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
6221#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
6222/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
6223#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
6224#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
6225#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
6226#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
6227
6228
6229/***********************************/
6230/* MC_CMD_SET_RXDP_CONFIG
6231 * Set global RXDP configuration settings
6232 */
6233#define MC_CMD_SET_RXDP_CONFIG 0xc1
6234
6235/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
6236#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
6237#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
6238#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
6239#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
6240
6241/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
6242#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
6243
6244
6245/***********************************/
6246/* MC_CMD_GET_RXDP_CONFIG
6247 * Get global RXDP configuration settings
6248 */
6249#define MC_CMD_GET_RXDP_CONFIG 0xc2
6250
6251/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
6252#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
6253
6254/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
6255#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
6256#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
6257#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
6258#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
6259
6260
6261/***********************************/
6262/* MC_CMD_RMON_RX_CLASS_DROPS_STATS
6263 * Retrieve rx class drop stats
6264 */
6265#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3
6266
6267/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */
6268#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4
6269/* flags */
6270#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
6271#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0
6272#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8
6273#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8
6274#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1
6275
6276/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */
6277#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4
6278#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252
6279#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
6280/* Array of stats */
6281#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
6282#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
6283#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
6284#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
6285
6286
6287/***********************************/
6288/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS
6289 * Retrieve rx super class drop stats
6290 */
6291#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4
6292
6293/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */
6294#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4
6295/* flags */
6296#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
6297#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0
6298#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4
6299#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4
6300#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1
6301
6302/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */
6303#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4
6304#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252
6305#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
6306/* Array of stats */
6307#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
6308#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
6309#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
6310#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
6311
6312
6313/***********************************/
6314/* MC_CMD_RMON_RX_ERRORS_STATS
6315 * Retrieve rxdp errors
6316 */
6317#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5
6318
6319/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */
6320#define MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4
6321/* flags */
6322#define MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0
6323#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0
6324#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11
6325#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11
6326#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1
6327
6328/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */
6329#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4
6330#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252
6331#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
6332/* Array of stats */
6333#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0
6334#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4
6335#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
6336#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
6337
6338
6339/***********************************/
6340/* MC_CMD_RMON_RX_OVERFLOW_STATS
6341 * Retrieve rxdp overflow
6342 */
6343#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6
6344
6345/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */
6346#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4
6347/* flags */
6348#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0
6349#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0
6350#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
6351#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8
6352#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1
6353
6354/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */
6355#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4
6356#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252
6357#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
6358/* Array of stats */
6359#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
6360#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
6361#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
6362#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
6363
6364
6365/***********************************/
6366/* MC_CMD_RMON_TX_IPI_STATS
6367 * Retrieve tx ipi stats
6368 */
6369#define MC_CMD_RMON_TX_IPI_STATS 0xd7
6370
6371/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */
6372#define MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4
6373/* flags */
6374#define MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0
6375#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0
6376#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5
6377#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5
6378#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1
6379
6380/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */
6381#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4
6382#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252
6383#define MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num))
6384/* Array of stats */
6385#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0
6386#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4
6387#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1
6388#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63
6389
6390
6391/***********************************/
6392/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS
6393 * Retrieve tx ipsec counters by cntxt_ptr
6394 */
6395#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8
6396
6397/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
6398#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
6399/* flags */
6400#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
6401#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
6402#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
6403#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
6404#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
6405
6406/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
6407#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
6408#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
6409#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
6410/* Array of stats */
6411#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
6412#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
6413#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
6414#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
6415
6416
6417/***********************************/
6418/* MC_CMD_RMON_TX_IPSEC_PORT_STATS
6419 * Retrieve tx ipsec counters by port
6420 */
6421#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9
6422
6423/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */
6424#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4
6425/* flags */
6426#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
6427#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0
6428#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
6429#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2
6430#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
6431
6432/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */
6433#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4
6434#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252
6435#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
6436/* Array of stats */
6437#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
6438#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
6439#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
6440#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
6441
6442
6443/***********************************/
6444/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS
6445 * Retrieve tx ipsec overflow
6446 */
6447#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda
6448
6449/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */
6450#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4
6451/* flags */
6452#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
6453#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
6454#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
6455#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
6456#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
6457
6458/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */
6459#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
6460#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
6461#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
6462/* Array of stats */
6463#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
6464#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
6465#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
6466#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
6467
6468
6469/***********************************/
6470/* MC_CMD_RMON_TX_NOWHERE_STATS
6471 * Retrieve tx nowhere stats
6472 */
6473#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb
6474
6475/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */
6476#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4
6477/* flags */
6478#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0
6479#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0
6480#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8
6481#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8
6482#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1
6483
6484/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */
6485#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4
6486#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252
6487#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num))
6488/* Array of stats */
6489#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0
6490#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4
6491#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1
6492#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63
6493
6494
6495/***********************************/
6496/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS
6497 * Retrieve tx nowhere qbb stats
6498 */
6499#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc
6500
6501/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */
6502#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4
6503/* flags */
6504#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0
6505#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0
6506#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3
6507#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3
6508#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1
6509
6510/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */
6511#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4
6512#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252
6513#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num))
6514/* Array of stats */
6515#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0
6516#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4
6517#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1
6518#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63
6519
6520
6521/***********************************/
6522/* MC_CMD_RMON_TX_ERRORS_STATS
6523 * Retrieve rxdp errors
6524 */
6525#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd
6526
6527/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */
6528#define MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4
6529/* flags */
6530#define MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0
6531#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0
6532#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11
6533#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11
6534#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1
6535
6536/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */
6537#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4
6538#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252
6539#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
6540/* Array of stats */
6541#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0
6542#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4
6543#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
6544#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
6545
6546
6547/***********************************/
6548/* MC_CMD_RMON_TX_OVERFLOW_STATS
6549 * Retrieve rxdp overflow
6550 */
6551#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde
6552
6553/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */
6554#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4
6555/* flags */
6556#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0
6557#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0
6558#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
6559#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8
6560#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1
6561
6562/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */
6563#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4
6564#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252
6565#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
6566/* Array of stats */
6567#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
6568#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
6569#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
6570#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
6571
6572
6573/***********************************/
6574/* MC_CMD_RMON_COLLECT_CLASS_STATS
6575 * Explicitly collect class stats at the specified evb port
6576 */
6577#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf
6578
6579/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */
6580#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4
6581/* The port id associated with the vport/pport at which to collect class stats
6582 */
6583#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0
6584
6585/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */
6586#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4
6587/* class */
6588#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0
6589
6590
6591/***********************************/
6592/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS
6593 * Explicitly collect class stats at the specified evb port
6594 */
6595#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0
6596
6597/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */
6598#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4
6599/* The port id associated with the vport/pport at which to collect class stats
6600 */
6601#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0
6602
6603/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */
6604#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4
6605/* super_class */
6606#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0
6607
6608
6609/***********************************/
6610/* MC_CMD_GET_CLOCK
6611 * Return the system and PDCPU clock frequencies.
6612 */
6613#define MC_CMD_GET_CLOCK 0xac
6614
6615/* MC_CMD_GET_CLOCK_IN msgrequest */
6616#define MC_CMD_GET_CLOCK_IN_LEN 0
6617
6618/* MC_CMD_GET_CLOCK_OUT msgresponse */
6619#define MC_CMD_GET_CLOCK_OUT_LEN 8
6620/* System frequency, MHz */
6621#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
6622/* DPCPU frequency, MHz */
6623#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
6624
6625
6626/***********************************/
6627/* MC_CMD_SET_CLOCK
6628 * Control the system and DPCPU clock frequencies. Changes are lost reboot.
6629 */
6630#define MC_CMD_SET_CLOCK 0xad
6631
6632/* MC_CMD_SET_CLOCK_IN msgrequest */
6633#define MC_CMD_SET_CLOCK_IN_LEN 12
6634/* Requested system frequency in MHz; 0 leaves unchanged. */
6635#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
6636/* Requested inter-core frequency in MHz; 0 leaves unchanged. */
6637#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
6638/* Request DPCPU frequency in MHz; 0 leaves unchanged. */
6639#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
6640
6641/* MC_CMD_SET_CLOCK_OUT msgresponse */
6642#define MC_CMD_SET_CLOCK_OUT_LEN 12
6643/* Resulting system frequency in MHz */
6644#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
6645/* Resulting inter-core frequency in MHz */
6646#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
6647/* Resulting DPCPU frequency in MHz */
6648#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
6649
6650
6651/***********************************/
6652/* MC_CMD_DPCPU_RPC
6653 * Send an arbitrary DPCPU message.
6654 */
6655#define MC_CMD_DPCPU_RPC 0xae
6656
6657/* MC_CMD_DPCPU_RPC_IN msgrequest */
6658#define MC_CMD_DPCPU_RPC_IN_LEN 36
6659#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
6660/* enum: RxDPCPU */
6661#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x0
6662/* enum: TxDPCPU0 */
6663#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
6664/* enum: TxDPCPU1 */
6665#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
6666/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
6667 * initialised to zero
6668 */
6669#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
6670#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
6671#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
6672#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
6673#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
6674#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
6675#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
6676#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
6677#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
6678#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
6679#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
6680#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
6681#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
6682#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
6683#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
6684#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
6685#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
6686#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
6687#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
6688#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
6689#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
6690#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
6691#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
6692#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
6693#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
6694#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
6695#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
6696#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
6697#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
6698#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
6699#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
6700#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
6701#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
6702#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
6703#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
6704#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
6705#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
6706#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
6707#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
6708#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
6709#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
6710#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
6711#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
6712/* Register data to write. Only valid in write/write-read. */
6713#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
6714/* Register address. */
6715#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
6716
6717/* MC_CMD_DPCPU_RPC_OUT msgresponse */
6718#define MC_CMD_DPCPU_RPC_OUT_LEN 36
6719#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
6720/* DATA */
6721#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
6722#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
6723#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
6724#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
6725#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
6726#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
6727#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
6728#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
6729#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
6730#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
6731#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
6732#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
6733
6734
6735/***********************************/
6736/* MC_CMD_TRIGGER_INTERRUPT
6737 * Trigger an interrupt by prodding the BIU.
6738 */
6739#define MC_CMD_TRIGGER_INTERRUPT 0xe3
6740
6741/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
6742#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
6743/* Interrupt level relative to base for function. */
6744#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
6745
6746/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
6747#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
6748
6749
6750/***********************************/
6751/* MC_CMD_DUMP_DO
6752 * Take a dump of the DUT state
6753 */
6754#define MC_CMD_DUMP_DO 0xe8
6755
6756/* MC_CMD_DUMP_DO_IN msgrequest */
6757#define MC_CMD_DUMP_DO_IN_LEN 52
6758#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
6759#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
6760#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
6761#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
6762#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
6763#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
6764#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
6765#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
6766#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
6767#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
6768#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
6769#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
6770#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
6771#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
6772#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
6773#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
6774#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
6775#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
6776#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
6777#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
6778#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
6779#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
6780#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
6781#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
6782/* Enum values, see field(s): */
6783/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
6784#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
6785#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
6786#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
6787#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
6788#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
6789#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
6790#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
6791#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
6792#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
6793
6794/* MC_CMD_DUMP_DO_OUT msgresponse */
6795#define MC_CMD_DUMP_DO_OUT_LEN 4
6796#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
6797
6798
6799/***********************************/
6800/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED
6801 * Configure unsolicited dumps
6802 */
6803#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
6804
6805/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
6806#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
6807#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
6808#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
6809/* Enum values, see field(s): */
6810/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
6811#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
6812/* Enum values, see field(s): */
6813/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
6814#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
6815#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
6816#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
6817#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
6818#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
6819#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
6820#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
6821#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
6822#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
6823#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
6824/* Enum values, see field(s): */
6825/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
6826#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
6827/* Enum values, see field(s): */
6828/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
6829#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
6830#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
6831#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
6832#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
6833#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
6834#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
6835#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
6836#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
6837#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
6838
6839
6840/***********************************/
6841/* MC_CMD_SET_PSU
6842 * Adjusts power supply parameters. This is a warranty-voiding operation.
6843 * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
6844 * the parameter is out of range.
6845 */
6846#define MC_CMD_SET_PSU 0xea
6847
6848/* MC_CMD_SET_PSU_IN msgrequest */
6849#define MC_CMD_SET_PSU_IN_LEN 12
6850#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
6851#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
6852#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
6853#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
6854#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
6855/* desired value, eg voltage in mV */
6856#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
6857
6858/* MC_CMD_SET_PSU_OUT msgresponse */
6859#define MC_CMD_SET_PSU_OUT_LEN 0
6860
6861
6862/***********************************/
6863/* MC_CMD_GET_FUNCTION_INFO
6864 * Get function information. PF and VF number.
6865 */
6866#define MC_CMD_GET_FUNCTION_INFO 0xec
6867
6868/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
6869#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
6870
6871/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
6872#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
6873#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
6874#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
6875
6876
6877/***********************************/
6878/* MC_CMD_ENABLE_OFFLINE_BIST
6879 * Enters offline BIST mode. All queues are torn down, chip enters quiescent
6880 * mode, calling function gets exclusive MCDI ownership. The only way out is
6881 * reboot.
6882 */
6883#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
6884
6885/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
6886#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
6887
6888/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */
6889#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0
6890
6891
6892/***********************************/
6893/* MC_CMD_START_KR_EYE_PLOT
6894 * Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
6895 * signal.
6896 */
6897#define MC_CMD_START_KR_EYE_PLOT 0xee
6898
6899/* MC_CMD_START_KR_EYE_PLOT_IN msgrequest */
6900#define MC_CMD_START_KR_EYE_PLOT_IN_LEN 4
6901#define MC_CMD_START_KR_EYE_PLOT_IN_LANE_OFST 0
6902
6903/* MC_CMD_START_KR_EYE_PLOT_OUT msgresponse */
6904#define MC_CMD_START_KR_EYE_PLOT_OUT_LEN 0
6905
6906
6907/***********************************/
6908/* MC_CMD_POLL_KR_EYE_PLOT
6909 * Poll KR Serdes Eye diagram plot. Returns one row of BER data. The caller
6910 * should call this command repeatedly after starting eye plot, until no more
6911 * data is returned.
6912 */
6913#define MC_CMD_POLL_KR_EYE_PLOT 0xef
6914
6915/* MC_CMD_POLL_KR_EYE_PLOT_IN msgrequest */
6916#define MC_CMD_POLL_KR_EYE_PLOT_IN_LEN 0
6917
6918/* MC_CMD_POLL_KR_EYE_PLOT_OUT msgresponse */
6919#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMIN 0
6920#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMAX 252
6921#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LEN(num) (0+2*(num))
6922#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_OFST 0
6923#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_LEN 2
6924#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MINNUM 0
6925#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
6926
6927
6928/***********************************/
6929/* MC_CMD_READ_FUSES
6930 * Read data programmed into the device One-Time-Programmable (OTP) Fuses
6931 */
6932#define MC_CMD_READ_FUSES 0xf0
6933
6934/* MC_CMD_READ_FUSES_IN msgrequest */
6935#define MC_CMD_READ_FUSES_IN_LEN 8
6936/* Offset in OTP to read */
6937#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
6938/* Length of data to read in bytes */
6939#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
6940
6941/* MC_CMD_READ_FUSES_OUT msgresponse */
6942#define MC_CMD_READ_FUSES_OUT_LENMIN 4
6943#define MC_CMD_READ_FUSES_OUT_LENMAX 252
6944#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
6945/* Length of returned OTP data in bytes */
6946#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
6947/* Returned data */
6948#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
6949#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
6950#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
6951#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
6952
6953
6954/***********************************/
6955/* MC_CMD_KR_TUNE
6956 * Get or set KR Serdes RXEQ and TX Driver settings
6957 */
6958#define MC_CMD_KR_TUNE 0xf1
6959
6960/* MC_CMD_KR_TUNE_IN msgrequest */
6961#define MC_CMD_KR_TUNE_IN_LENMIN 4
6962#define MC_CMD_KR_TUNE_IN_LENMAX 252
6963#define MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num))
6964/* Requested operation */
6965#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0
6966#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1
6967/* enum: Get current RXEQ settings */
6968#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0
6969/* enum: Override RXEQ settings */
6970#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1
6971/* enum: Get current TX Driver settings */
6972#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2
6973/* enum: Override TX Driver settings */
6974#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
6975/* enum: Force KR Serdes reset / recalibration */
6976#define MC_CMD_KR_TUNE_IN_RECAL 0x4
6977/* Align the arguments to 32 bits */
6978#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
6979#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
6980/* Arguments specific to the operation */
6981#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_OFST 4
6982#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4
6983#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0
6984#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62
6985
6986/* MC_CMD_KR_TUNE_OUT msgresponse */
6987#define MC_CMD_KR_TUNE_OUT_LEN 0
6988
6989/* MC_CMD_KR_TUNE_RXEQ_GET_IN msgrequest */
6990#define MC_CMD_KR_TUNE_RXEQ_GET_IN_LEN 4
6991/* Requested operation */
6992#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_OFST 0
6993#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_LEN 1
6994/* Align the arguments to 32 bits */
6995#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
6996#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
6997
6998/* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */
6999#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4
7000#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252
7001#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
7002/* RXEQ Parameter */
7003#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
7004#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
7005#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
7006#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
7007#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
7008#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
7009/* enum: Attenuation (0-15) */
7010#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
7011/* enum: CTLE Boost (0-15) */
7012#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
7013/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
7014#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
7015/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
7016#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
7017/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
7018#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
7019/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
7020#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
7021/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
7022#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
7023#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
7024#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
7025#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
7026#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
7027#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
7028#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
7029#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
7030#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11
7031#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
7032#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
7033#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4
7034#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16
7035#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
7036#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
7037#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
7038
7039/* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */
7040#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8
7041#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252
7042#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
7043/* Requested operation */
7044#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0
7045#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1
7046/* Align the arguments to 32 bits */
7047#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
7048#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
7049/* RXEQ Parameter */
7050#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_OFST 4
7051#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4
7052#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
7053#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
7054#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
7055#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
7056/* Enum values, see field(s): */
7057/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */
7058#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
7059#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3
7060/* Enum values, see field(s): */
7061/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */
7062#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11
7063#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
7064#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12
7065#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4
7066#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
7067#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
7068#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
7069#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
7070
7071/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
7072#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
7073
7074/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
7075#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4
7076/* Requested operation */
7077#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_OFST 0
7078#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_LEN 1
7079/* Align the arguments to 32 bits */
7080#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_OFST 1
7081#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_LEN 3
7082
7083/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
7084#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
7085
7086
7087/***********************************/
7088/* MC_CMD_PCIE_TUNE
7089 * Get or set PCIE Serdes RXEQ and TX Driver settings
7090 */
7091#define MC_CMD_PCIE_TUNE 0xf2
7092
7093/* MC_CMD_PCIE_TUNE_IN msgrequest */
7094#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
7095#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
7096#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
7097/* Requested operation */
7098#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
7099#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
7100/* enum: Get current RXEQ settings */
7101#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
7102/* enum: Override RXEQ settings */
7103#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
7104/* enum: Get current TX Driver settings */
7105#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
7106/* enum: Override TX Driver settings */
7107#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
7108/* Align the arguments to 32 bits */
7109#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
7110#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
7111/* Arguments specific to the operation */
7112#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
7113#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
7114#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
7115#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
7116
7117/* MC_CMD_PCIE_TUNE_OUT msgresponse */
7118#define MC_CMD_PCIE_TUNE_OUT_LEN 0
7119
7120/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
7121#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
7122/* Requested operation */
7123#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
7124#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
7125/* Align the arguments to 32 bits */
7126#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
7127#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
7128
7129/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
7130#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
7131#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
7132#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
7133/* RXEQ Parameter */
7134#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
7135#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
7136#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
7137#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
7138#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
7139#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
7140/* enum: Attenuation (0-15) */
7141#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
7142/* enum: CTLE Boost (0-15) */
7143#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
7144/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
7145#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
7146/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
7147#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
7148/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
7149#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
7150/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
7151#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
7152/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
7153#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
7154#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
7155#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 4
7156#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
7157#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
7158#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
7159#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
7160#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
7161#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
7162#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
7163#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
7164#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x8 /* enum */
7165#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
7166#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 12
7167#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
7168#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
7169
7170/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
7171#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
7172/* Requested operation */
7173#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
7174#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
7175/* Align the arguments to 32 bits */
7176#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
7177#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
7178
7179/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
7180#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
7181#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
7182#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
7183/* RXEQ Parameter */
7184#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
7185#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
7186#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
7187#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
7188#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
7189#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
7190/* enum: TxMargin (PIPE) */
7191#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
7192/* enum: TxSwing (PIPE) */
7193#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
7194/* enum: De-emphasis coefficient C(-1) (PIPE) */
7195#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
7196/* enum: De-emphasis coefficient C(0) (PIPE) */
7197#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
7198/* enum: De-emphasis coefficient C(+1) (PIPE) */
7199#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
7200#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
7201#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
7202/* Enum values, see field(s): */
7203/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
7204#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
7205#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
7206#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
7207#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
7208
7209
7210/***********************************/
7211/* MC_CMD_LICENSING
7212 * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
7213 */
7214#define MC_CMD_LICENSING 0xf3
7215
7216/* MC_CMD_LICENSING_IN msgrequest */
7217#define MC_CMD_LICENSING_IN_LEN 4
7218/* identifies the type of operation requested */
7219#define MC_CMD_LICENSING_IN_OP_OFST 0
7220/* enum: re-read and apply licenses after a license key partition update; note
7221 * that this operation returns a zero-length response
7222 */
7223#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
7224/* enum: report counts of installed licenses */
7225#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
7226
7227/* MC_CMD_LICENSING_OUT msgresponse */
7228#define MC_CMD_LICENSING_OUT_LEN 28
7229/* count of application keys which are valid */
7230#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
7231/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
7232 * MC_CMD_FC_OP_LICENSE)
7233 */
7234#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
7235/* count of application keys which are invalid due to being blacklisted */
7236#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
7237/* count of application keys which are invalid due to being unverifiable */
7238#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
7239/* count of application keys which are invalid due to being for the wrong node
7240 */
7241#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
7242/* licensing state (for diagnostics; the exact meaning of the bits in this
7243 * field are private to the firmware)
7244 */
7245#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
7246/* licensing subsystem self-test report (for manftest) */
7247#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
7248/* enum: licensing subsystem self-test failed */
7249#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
7250/* enum: licensing subsystem self-test passed */
7251#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
7252
7253
7254/***********************************/
7255/* MC_CMD_MC2MC_PROXY
7256 * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
7257 * This will fail on a single-core system.
7258 */
7259#define MC_CMD_MC2MC_PROXY 0xf4
2402 7260
2403 7261
2404#endif /* MCDI_PCOL_H */ 7262#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_phy.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 13cb40fe90c1..8d33da6697fb 100644
--- a/drivers/net/ethernet/sfc/mcdi_phy.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2009-2010 Solarflare Communications Inc. 3 * Copyright 2009-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -36,7 +36,7 @@ struct efx_mcdi_phy_data {
36static int 36static int
37efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg) 37efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
38{ 38{
39 u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN]; 39 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
40 size_t outlen; 40 size_t outlen;
41 int rc; 41 int rc;
42 42
@@ -78,7 +78,7 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
78 u32 flags, u32 loopback_mode, 78 u32 flags, u32 loopback_mode,
79 u32 loopback_speed) 79 u32 loopback_speed)
80{ 80{
81 u8 inbuf[MC_CMD_SET_LINK_IN_LEN]; 81 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
82 int rc; 82 int rc;
83 83
84 BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0); 84 BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
@@ -102,7 +102,7 @@ fail:
102 102
103static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) 103static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
104{ 104{
105 u8 outbuf[MC_CMD_GET_LOOPBACK_MODES_OUT_LEN]; 105 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
106 size_t outlen; 106 size_t outlen;
107 int rc; 107 int rc;
108 108
@@ -111,7 +111,8 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
111 if (rc) 111 if (rc)
112 goto fail; 112 goto fail;
113 113
114 if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) { 114 if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
115 MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
115 rc = -EIO; 116 rc = -EIO;
116 goto fail; 117 goto fail;
117 } 118 }
@@ -125,16 +126,16 @@ fail:
125 return rc; 126 return rc;
126} 127}
127 128
128int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus, 129static int efx_mcdi_mdio_read(struct net_device *net_dev,
129 unsigned int prtad, unsigned int devad, u16 addr, 130 int prtad, int devad, u16 addr)
130 u16 *value_out, u32 *status_out)
131{ 131{
132 u8 inbuf[MC_CMD_MDIO_READ_IN_LEN]; 132 struct efx_nic *efx = netdev_priv(net_dev);
133 u8 outbuf[MC_CMD_MDIO_READ_OUT_LEN]; 133 MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
134 MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
134 size_t outlen; 135 size_t outlen;
135 int rc; 136 int rc;
136 137
137 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, bus); 138 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
138 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad); 139 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
139 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad); 140 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
140 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr); 141 MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
@@ -144,25 +145,27 @@ int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
144 if (rc) 145 if (rc)
145 goto fail; 146 goto fail;
146 147
147 *value_out = (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE); 148 if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
148 *status_out = MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS); 149 MC_CMD_MDIO_STATUS_GOOD)
149 return 0; 150 return -EIO;
151
152 return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
150 153
151fail: 154fail:
152 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 155 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
153 return rc; 156 return rc;
154} 157}
155 158
156int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus, 159static int efx_mcdi_mdio_write(struct net_device *net_dev,
157 unsigned int prtad, unsigned int devad, u16 addr, 160 int prtad, int devad, u16 addr, u16 value)
158 u16 value, u32 *status_out)
159{ 161{
160 u8 inbuf[MC_CMD_MDIO_WRITE_IN_LEN]; 162 struct efx_nic *efx = netdev_priv(net_dev);
161 u8 outbuf[MC_CMD_MDIO_WRITE_OUT_LEN]; 163 MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
164 MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
162 size_t outlen; 165 size_t outlen;
163 int rc; 166 int rc;
164 167
165 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, bus); 168 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
166 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad); 169 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
167 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad); 170 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
168 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr); 171 MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
@@ -173,7 +176,10 @@ int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
173 if (rc) 176 if (rc)
174 goto fail; 177 goto fail;
175 178
176 *status_out = MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS); 179 if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
180 MC_CMD_MDIO_STATUS_GOOD)
181 return -EIO;
182
177 return 0; 183 return 0;
178 184
179fail: 185fail:
@@ -304,10 +310,37 @@ static u32 mcdi_to_ethtool_media(u32 media)
304 } 310 }
305} 311}
306 312
313static void efx_mcdi_phy_decode_link(struct efx_nic *efx,
314 struct efx_link_state *link_state,
315 u32 speed, u32 flags, u32 fcntl)
316{
317 switch (fcntl) {
318 case MC_CMD_FCNTL_AUTO:
319 WARN_ON(1); /* This is not a link mode */
320 link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
321 break;
322 case MC_CMD_FCNTL_BIDIR:
323 link_state->fc = EFX_FC_TX | EFX_FC_RX;
324 break;
325 case MC_CMD_FCNTL_RESPOND:
326 link_state->fc = EFX_FC_RX;
327 break;
328 default:
329 WARN_ON(1);
330 case MC_CMD_FCNTL_OFF:
331 link_state->fc = 0;
332 break;
333 }
334
335 link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
336 link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
337 link_state->speed = speed;
338}
339
307static int efx_mcdi_phy_probe(struct efx_nic *efx) 340static int efx_mcdi_phy_probe(struct efx_nic *efx)
308{ 341{
309 struct efx_mcdi_phy_data *phy_data; 342 struct efx_mcdi_phy_data *phy_data;
310 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; 343 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
311 u32 caps; 344 u32 caps;
312 int rc; 345 int rc;
313 346
@@ -403,7 +436,7 @@ fail:
403 return rc; 436 return rc;
404} 437}
405 438
406int efx_mcdi_phy_reconfigure(struct efx_nic *efx) 439int efx_mcdi_port_reconfigure(struct efx_nic *efx)
407{ 440{
408 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; 441 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
409 u32 caps = (efx->link_advertising ? 442 u32 caps = (efx->link_advertising ?
@@ -414,37 +447,10 @@ int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
414 efx->loopback_mode, 0); 447 efx->loopback_mode, 0);
415} 448}
416 449
417void efx_mcdi_phy_decode_link(struct efx_nic *efx,
418 struct efx_link_state *link_state,
419 u32 speed, u32 flags, u32 fcntl)
420{
421 switch (fcntl) {
422 case MC_CMD_FCNTL_AUTO:
423 WARN_ON(1); /* This is not a link mode */
424 link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
425 break;
426 case MC_CMD_FCNTL_BIDIR:
427 link_state->fc = EFX_FC_TX | EFX_FC_RX;
428 break;
429 case MC_CMD_FCNTL_RESPOND:
430 link_state->fc = EFX_FC_RX;
431 break;
432 default:
433 WARN_ON(1);
434 case MC_CMD_FCNTL_OFF:
435 link_state->fc = 0;
436 break;
437 }
438
439 link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
440 link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
441 link_state->speed = speed;
442}
443
444/* Verify that the forced flow control settings (!EFX_FC_AUTO) are 450/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
445 * supported by the link partner. Warn the user if this isn't the case 451 * supported by the link partner. Warn the user if this isn't the case
446 */ 452 */
447void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) 453static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
448{ 454{
449 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; 455 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
450 u32 rmtadv; 456 u32 rmtadv;
@@ -472,7 +478,7 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
472static bool efx_mcdi_phy_poll(struct efx_nic *efx) 478static bool efx_mcdi_phy_poll(struct efx_nic *efx)
473{ 479{
474 struct efx_link_state old_state = efx->link_state; 480 struct efx_link_state old_state = efx->link_state;
475 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; 481 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
476 int rc; 482 int rc;
477 483
478 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 484 WARN_ON(!mutex_is_locked(&efx->mac_lock));
@@ -507,7 +513,7 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx)
507static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 513static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
508{ 514{
509 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; 515 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
510 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; 516 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
511 int rc; 517 int rc;
512 518
513 ecmd->supported = 519 ecmd->supported =
@@ -579,7 +585,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
579 585
580static int efx_mcdi_phy_test_alive(struct efx_nic *efx) 586static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
581{ 587{
582 u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN]; 588 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
583 size_t outlen; 589 size_t outlen;
584 int rc; 590 int rc;
585 591
@@ -615,17 +621,15 @@ static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
615 unsigned int retry, i, count = 0; 621 unsigned int retry, i, count = 0;
616 size_t outlen; 622 size_t outlen;
617 u32 status; 623 u32 status;
618 u8 *buf, *ptr; 624 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
625 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN);
626 u8 *ptr;
619 int rc; 627 int rc;
620 628
621 buf = kzalloc(0x100, GFP_KERNEL);
622 if (buf == NULL)
623 return -ENOMEM;
624
625 BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0); 629 BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
626 MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode); 630 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode);
627 rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN, 631 rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST,
628 NULL, 0, NULL); 632 inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL);
629 if (rc) 633 if (rc)
630 goto out; 634 goto out;
631 635
@@ -633,11 +637,11 @@ static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
633 for (retry = 0; retry < 100; ++retry) { 637 for (retry = 0; retry < 100; ++retry) {
634 BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0); 638 BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
635 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, 639 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
636 buf, 0x100, &outlen); 640 outbuf, sizeof(outbuf), &outlen);
637 if (rc) 641 if (rc)
638 goto out; 642 goto out;
639 643
640 status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT); 644 status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
641 if (status != MC_CMD_POLL_BIST_RUNNING) 645 if (status != MC_CMD_POLL_BIST_RUNNING)
642 goto finished; 646 goto finished;
643 647
@@ -654,7 +658,7 @@ finished:
654 if (efx->phy_type == PHY_TYPE_SFT9001B && 658 if (efx->phy_type == PHY_TYPE_SFT9001B &&
655 (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT || 659 (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
656 bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) { 660 bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
657 ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A); 661 ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
658 if (status == MC_CMD_POLL_BIST_PASSED && 662 if (status == MC_CMD_POLL_BIST_PASSED &&
659 outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) { 663 outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
660 for (i = 0; i < 8; i++) { 664 for (i = 0; i < 8; i++) {
@@ -668,8 +672,6 @@ finished:
668 rc = count; 672 rc = count;
669 673
670out: 674out:
671 kfree(buf);
672
673 return rc; 675 return rc;
674} 676}
675 677
@@ -744,8 +746,8 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
744static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, 746static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
745 struct ethtool_eeprom *ee, u8 *data) 747 struct ethtool_eeprom *ee, u8 *data)
746{ 748{
747 u8 outbuf[MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX]; 749 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX);
748 u8 inbuf[MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN]; 750 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN);
749 size_t outlen; 751 size_t outlen;
750 int rc; 752 int rc;
751 unsigned int payload_len; 753 unsigned int payload_len;
@@ -785,8 +787,7 @@ static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
785 space_remaining : payload_len; 787 space_remaining : payload_len;
786 788
787 memcpy(user_data, 789 memcpy(user_data,
788 outbuf + page_off + 790 MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + page_off,
789 MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST,
790 to_copy); 791 to_copy);
791 792
792 space_remaining -= to_copy; 793 space_remaining -= to_copy;
@@ -813,10 +814,10 @@ static int efx_mcdi_phy_get_module_info(struct efx_nic *efx,
813 } 814 }
814} 815}
815 816
816const struct efx_phy_operations efx_mcdi_phy_ops = { 817static const struct efx_phy_operations efx_mcdi_phy_ops = {
817 .probe = efx_mcdi_phy_probe, 818 .probe = efx_mcdi_phy_probe,
818 .init = efx_port_dummy_op_int, 819 .init = efx_port_dummy_op_int,
819 .reconfigure = efx_mcdi_phy_reconfigure, 820 .reconfigure = efx_mcdi_port_reconfigure,
820 .poll = efx_mcdi_phy_poll, 821 .poll = efx_mcdi_phy_poll,
821 .fini = efx_port_dummy_op_void, 822 .fini = efx_port_dummy_op_void,
822 .remove = efx_mcdi_phy_remove, 823 .remove = efx_mcdi_phy_remove,
@@ -828,3 +829,199 @@ const struct efx_phy_operations efx_mcdi_phy_ops = {
828 .get_module_eeprom = efx_mcdi_phy_get_module_eeprom, 829 .get_module_eeprom = efx_mcdi_phy_get_module_eeprom,
829 .get_module_info = efx_mcdi_phy_get_module_info, 830 .get_module_info = efx_mcdi_phy_get_module_info,
830}; 831};
832
833u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
834{
835 struct efx_mcdi_phy_data *phy_data = efx->phy_data;
836
837 return phy_data->supported_cap;
838}
839
840static unsigned int efx_mcdi_event_link_speed[] = {
841 [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
842 [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
843 [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
844};
845
846void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
847{
848 u32 flags, fcntl, speed, lpa;
849
850 speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
851 EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
852 speed = efx_mcdi_event_link_speed[speed];
853
854 flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
855 fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
856 lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
857
858 /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
859 * which is only run after flushing the event queues. Therefore, it
860 * is safe to modify the link state outside of the mac_lock here.
861 */
862 efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
863
864 efx_mcdi_phy_check_fcntl(efx, lpa);
865
866 efx_link_status_changed(efx);
867}
868
869int efx_mcdi_set_mac(struct efx_nic *efx)
870{
871 u32 fcntl;
872 MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
873
874 BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
875
876 memcpy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
877 efx->net_dev->dev_addr, ETH_ALEN);
878
879 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
880 EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
881 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
882
883 /* Set simple MAC filter for Siena */
884 MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
885 SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
886
887 switch (efx->wanted_fc) {
888 case EFX_FC_RX | EFX_FC_TX:
889 fcntl = MC_CMD_FCNTL_BIDIR;
890 break;
891 case EFX_FC_RX:
892 fcntl = MC_CMD_FCNTL_RESPOND;
893 break;
894 default:
895 fcntl = MC_CMD_FCNTL_OFF;
896 break;
897 }
898 if (efx->wanted_fc & EFX_FC_AUTO)
899 fcntl = MC_CMD_FCNTL_AUTO;
900 if (efx->fc_disable)
901 fcntl = MC_CMD_FCNTL_OFF;
902
903 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
904
905 return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
906 NULL, 0, NULL);
907}
908
909bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
910{
911 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
912 size_t outlength;
913 int rc;
914
915 BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
916
917 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
918 outbuf, sizeof(outbuf), &outlength);
919 if (rc) {
920 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
921 __func__, rc);
922 return true;
923 }
924
925 return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
926}
927
928static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
929 u32 dma_len, int enable, int clear)
930{
931 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
932 int rc;
933 int period = enable ? 1000 : 0;
934
935 BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
936
937 MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr);
938 MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
939 MAC_STATS_IN_DMA, !!enable,
940 MAC_STATS_IN_CLEAR, clear,
941 MAC_STATS_IN_PERIODIC_CHANGE, 1,
942 MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
943 MAC_STATS_IN_PERIODIC_CLEAR, 0,
944 MAC_STATS_IN_PERIODIC_NOEVENT, 1,
945 MAC_STATS_IN_PERIOD_MS, period);
946 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
947
948 rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
949 NULL, 0, NULL);
950 if (rc)
951 goto fail;
952
953 return 0;
954
955fail:
956 netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
957 __func__, enable ? "enable" : "disable", rc);
958 return rc;
959}
960
961void efx_mcdi_mac_start_stats(struct efx_nic *efx)
962{
963 __le64 *dma_stats = efx->stats_buffer.addr;
964
965 dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
966
967 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
968 MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
969}
970
971void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
972{
973 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
974}
975
976int efx_mcdi_port_probe(struct efx_nic *efx)
977{
978 int rc;
979
980 /* Hook in PHY operations table */
981 efx->phy_op = &efx_mcdi_phy_ops;
982
983 /* Set up MDIO structure for PHY */
984 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
985 efx->mdio.mdio_read = efx_mcdi_mdio_read;
986 efx->mdio.mdio_write = efx_mcdi_mdio_write;
987
988 /* Fill out MDIO structure, loopback modes, and initial link state */
989 rc = efx->phy_op->probe(efx);
990 if (rc != 0)
991 return rc;
992
993 /* Allocate buffer for stats */
994 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
995 MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL);
996 if (rc)
997 return rc;
998 netif_dbg(efx, probe, efx->net_dev,
999 "stats buffer at %llx (virt %p phys %llx)\n",
1000 (u64)efx->stats_buffer.dma_addr,
1001 efx->stats_buffer.addr,
1002 (u64)virt_to_phys(efx->stats_buffer.addr));
1003
1004 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
1005
1006 return 0;
1007}
1008
1009void efx_mcdi_port_remove(struct efx_nic *efx)
1010{
1011 efx->phy_op->remove(efx);
1012 efx_nic_free_buffer(efx, &efx->stats_buffer);
1013}
1014
1015/* Get physical port number (EF10 only; on Siena it is same as PF number) */
1016int efx_mcdi_port_get_number(struct efx_nic *efx)
1017{
1018 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
1019 int rc;
1020
1021 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
1022 outbuf, sizeof(outbuf), NULL);
1023 if (rc)
1024 return rc;
1025
1026 return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
1027}
diff --git a/drivers/net/ethernet/sfc/mdio_10g.c b/drivers/net/ethernet/sfc/mdio_10g.c
index 9acfd6696ffb..8ff954c59efa 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.c
+++ b/drivers/net/ethernet/sfc/mdio_10g.c
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2006-2011 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h
index a97dbbd2de99..16824fecc5ee 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.h
+++ b/drivers/net/ethernet/sfc/mdio_10g.h
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2006-2011 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index 08f825b71ac8..a77a8bd2dd70 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -1,194 +1,32 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference. 8 * by the Free Software Foundation, incorporated herein by reference.
9 */ 9 */
10 10
11#include <linux/bitops.h>
12#include <linux/module.h> 11#include <linux/module.h>
13#include <linux/mtd/mtd.h> 12#include <linux/mtd/mtd.h>
14#include <linux/delay.h>
15#include <linux/slab.h> 13#include <linux/slab.h>
16#include <linux/rtnetlink.h> 14#include <linux/rtnetlink.h>
17 15
18#include "net_driver.h" 16#include "net_driver.h"
19#include "spi.h"
20#include "efx.h" 17#include "efx.h"
21#include "nic.h"
22#include "mcdi.h"
23#include "mcdi_pcol.h"
24
25#define EFX_SPI_VERIFY_BUF_LEN 16
26
27struct efx_mtd_partition {
28 struct mtd_info mtd;
29 union {
30 struct {
31 bool updating;
32 u8 nvram_type;
33 u16 fw_subtype;
34 } mcdi;
35 size_t offset;
36 };
37 const char *type_name;
38 char name[IFNAMSIZ + 20];
39};
40
41struct efx_mtd_ops {
42 int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
43 size_t *retlen, u8 *buffer);
44 int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
45 int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
46 size_t *retlen, const u8 *buffer);
47 int (*sync)(struct mtd_info *mtd);
48};
49
50struct efx_mtd {
51 struct list_head node;
52 struct efx_nic *efx;
53 const struct efx_spi_device *spi;
54 const char *name;
55 const struct efx_mtd_ops *ops;
56 size_t n_parts;
57 struct efx_mtd_partition part[0];
58};
59
60#define efx_for_each_partition(part, efx_mtd) \
61 for ((part) = &(efx_mtd)->part[0]; \
62 (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
63 (part)++)
64 18
65#define to_efx_mtd_partition(mtd) \ 19#define to_efx_mtd_partition(mtd) \
66 container_of(mtd, struct efx_mtd_partition, mtd) 20 container_of(mtd, struct efx_mtd_partition, mtd)
67 21
68static int falcon_mtd_probe(struct efx_nic *efx);
69static int siena_mtd_probe(struct efx_nic *efx);
70
71/* SPI utilities */
72
73static int
74efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
75{
76 struct efx_mtd *efx_mtd = part->mtd.priv;
77 const struct efx_spi_device *spi = efx_mtd->spi;
78 struct efx_nic *efx = efx_mtd->efx;
79 u8 status;
80 int rc, i;
81
82 /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
83 for (i = 0; i < 40; i++) {
84 __set_current_state(uninterruptible ?
85 TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
86 schedule_timeout(HZ / 10);
87 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
88 &status, sizeof(status));
89 if (rc)
90 return rc;
91 if (!(status & SPI_STATUS_NRDY))
92 return 0;
93 if (signal_pending(current))
94 return -EINTR;
95 }
96 pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name);
97 return -ETIMEDOUT;
98}
99
100static int
101efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
102{
103 const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
104 SPI_STATUS_BP0);
105 u8 status;
106 int rc;
107
108 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
109 &status, sizeof(status));
110 if (rc)
111 return rc;
112
113 if (!(status & unlock_mask))
114 return 0; /* already unlocked */
115
116 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
117 if (rc)
118 return rc;
119 rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
120 if (rc)
121 return rc;
122
123 status &= ~unlock_mask;
124 rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
125 NULL, sizeof(status));
126 if (rc)
127 return rc;
128 rc = falcon_spi_wait_write(efx, spi);
129 if (rc)
130 return rc;
131
132 return 0;
133}
134
135static int
136efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
137{
138 struct efx_mtd *efx_mtd = part->mtd.priv;
139 const struct efx_spi_device *spi = efx_mtd->spi;
140 struct efx_nic *efx = efx_mtd->efx;
141 unsigned pos, block_len;
142 u8 empty[EFX_SPI_VERIFY_BUF_LEN];
143 u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
144 int rc;
145
146 if (len != spi->erase_size)
147 return -EINVAL;
148
149 if (spi->erase_command == 0)
150 return -EOPNOTSUPP;
151
152 rc = efx_spi_unlock(efx, spi);
153 if (rc)
154 return rc;
155 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
156 if (rc)
157 return rc;
158 rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
159 NULL, 0);
160 if (rc)
161 return rc;
162 rc = efx_spi_slow_wait(part, false);
163
164 /* Verify the entire region has been wiped */
165 memset(empty, 0xff, sizeof(empty));
166 for (pos = 0; pos < len; pos += block_len) {
167 block_len = min(len - pos, sizeof(buffer));
168 rc = falcon_spi_read(efx, spi, start + pos, block_len,
169 NULL, buffer);
170 if (rc)
171 return rc;
172 if (memcmp(empty, buffer, block_len))
173 return -EIO;
174
175 /* Avoid locking up the system */
176 cond_resched();
177 if (signal_pending(current))
178 return -EINTR;
179 }
180
181 return rc;
182}
183
184/* MTD interface */ 22/* MTD interface */
185 23
186static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) 24static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
187{ 25{
188 struct efx_mtd *efx_mtd = mtd->priv; 26 struct efx_nic *efx = mtd->priv;
189 int rc; 27 int rc;
190 28
191 rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len); 29 rc = efx->type->mtd_erase(mtd, erase->addr, erase->len);
192 if (rc == 0) { 30 if (rc == 0) {
193 erase->state = MTD_ERASE_DONE; 31 erase->state = MTD_ERASE_DONE;
194 } else { 32 } else {
@@ -202,13 +40,13 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
202static void efx_mtd_sync(struct mtd_info *mtd) 40static void efx_mtd_sync(struct mtd_info *mtd)
203{ 41{
204 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); 42 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
205 struct efx_mtd *efx_mtd = mtd->priv; 43 struct efx_nic *efx = mtd->priv;
206 int rc; 44 int rc;
207 45
208 rc = efx_mtd->ops->sync(mtd); 46 rc = efx->type->mtd_sync(mtd);
209 if (rc) 47 if (rc)
210 pr_err("%s: %s sync failed (%d)\n", 48 pr_err("%s: %s sync failed (%d)\n",
211 part->name, efx_mtd->name, rc); 49 part->name, part->dev_type_name, rc);
212} 50}
213 51
214static void efx_mtd_remove_partition(struct efx_mtd_partition *part) 52static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
@@ -222,62 +60,44 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
222 ssleep(1); 60 ssleep(1);
223 } 61 }
224 WARN_ON(rc); 62 WARN_ON(rc);
63 list_del(&part->node);
225} 64}
226 65
227static void efx_mtd_remove_device(struct efx_mtd *efx_mtd) 66int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
228{ 67 size_t n_parts, size_t sizeof_part)
229 struct efx_mtd_partition *part;
230
231 efx_for_each_partition(part, efx_mtd)
232 efx_mtd_remove_partition(part);
233 list_del(&efx_mtd->node);
234 kfree(efx_mtd);
235}
236
237static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
238{
239 struct efx_mtd_partition *part;
240
241 efx_for_each_partition(part, efx_mtd)
242 if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
243 snprintf(part->name, sizeof(part->name),
244 "%s %s:%02x", efx_mtd->efx->name,
245 part->type_name, part->mcdi.fw_subtype);
246 else
247 snprintf(part->name, sizeof(part->name),
248 "%s %s", efx_mtd->efx->name,
249 part->type_name);
250}
251
252static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
253{ 68{
254 struct efx_mtd_partition *part; 69 struct efx_mtd_partition *part;
70 size_t i;
255 71
256 efx_mtd->efx = efx; 72 for (i = 0; i < n_parts; i++) {
73 part = (struct efx_mtd_partition *)((char *)parts +
74 i * sizeof_part);
257 75
258 efx_mtd_rename_device(efx_mtd);
259
260 efx_for_each_partition(part, efx_mtd) {
261 part->mtd.writesize = 1; 76 part->mtd.writesize = 1;
262 77
263 part->mtd.owner = THIS_MODULE; 78 part->mtd.owner = THIS_MODULE;
264 part->mtd.priv = efx_mtd; 79 part->mtd.priv = efx;
265 part->mtd.name = part->name; 80 part->mtd.name = part->name;
266 part->mtd._erase = efx_mtd_erase; 81 part->mtd._erase = efx_mtd_erase;
267 part->mtd._read = efx_mtd->ops->read; 82 part->mtd._read = efx->type->mtd_read;
268 part->mtd._write = efx_mtd->ops->write; 83 part->mtd._write = efx->type->mtd_write;
269 part->mtd._sync = efx_mtd_sync; 84 part->mtd._sync = efx_mtd_sync;
270 85
86 efx->type->mtd_rename(part);
87
271 if (mtd_device_register(&part->mtd, NULL, 0)) 88 if (mtd_device_register(&part->mtd, NULL, 0))
272 goto fail; 89 goto fail;
90
91 /* Add to list in order - efx_mtd_remove() depends on this */
92 list_add_tail(&part->node, &efx->mtd_list);
273 } 93 }
274 94
275 list_add(&efx_mtd->node, &efx->mtd_list);
276 return 0; 95 return 0;
277 96
278fail: 97fail:
279 while (part != &efx_mtd->part[0]) { 98 while (i--) {
280 --part; 99 part = (struct efx_mtd_partition *)((char *)parts +
100 i * sizeof_part);
281 efx_mtd_remove_partition(part); 101 efx_mtd_remove_partition(part);
282 } 102 }
283 /* Failure is unlikely here, but probably means we're out of memory */ 103 /* Failure is unlikely here, but probably means we're out of memory */
@@ -286,410 +106,28 @@ fail:
286 106
287void efx_mtd_remove(struct efx_nic *efx) 107void efx_mtd_remove(struct efx_nic *efx)
288{ 108{
289 struct efx_mtd *efx_mtd, *next; 109 struct efx_mtd_partition *parts, *part, *next;
290 110
291 WARN_ON(efx_dev_registered(efx)); 111 WARN_ON(efx_dev_registered(efx));
292 112
293 list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node) 113 if (list_empty(&efx->mtd_list))
294 efx_mtd_remove_device(efx_mtd); 114 return;
295}
296
297void efx_mtd_rename(struct efx_nic *efx)
298{
299 struct efx_mtd *efx_mtd;
300
301 ASSERT_RTNL();
302
303 list_for_each_entry(efx_mtd, &efx->mtd_list, node)
304 efx_mtd_rename_device(efx_mtd);
305}
306
307int efx_mtd_probe(struct efx_nic *efx)
308{
309 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
310 return siena_mtd_probe(efx);
311 else
312 return falcon_mtd_probe(efx);
313}
314
315/* Implementation of MTD operations for Falcon */
316
317static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
318 size_t len, size_t *retlen, u8 *buffer)
319{
320 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
321 struct efx_mtd *efx_mtd = mtd->priv;
322 const struct efx_spi_device *spi = efx_mtd->spi;
323 struct efx_nic *efx = efx_mtd->efx;
324 struct falcon_nic_data *nic_data = efx->nic_data;
325 int rc;
326
327 rc = mutex_lock_interruptible(&nic_data->spi_lock);
328 if (rc)
329 return rc;
330 rc = falcon_spi_read(efx, spi, part->offset + start, len,
331 retlen, buffer);
332 mutex_unlock(&nic_data->spi_lock);
333 return rc;
334}
335
336static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
337{
338 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
339 struct efx_mtd *efx_mtd = mtd->priv;
340 struct efx_nic *efx = efx_mtd->efx;
341 struct falcon_nic_data *nic_data = efx->nic_data;
342 int rc;
343
344 rc = mutex_lock_interruptible(&nic_data->spi_lock);
345 if (rc)
346 return rc;
347 rc = efx_spi_erase(part, part->offset + start, len);
348 mutex_unlock(&nic_data->spi_lock);
349 return rc;
350}
351
352static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
353 size_t len, size_t *retlen, const u8 *buffer)
354{
355 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
356 struct efx_mtd *efx_mtd = mtd->priv;
357 const struct efx_spi_device *spi = efx_mtd->spi;
358 struct efx_nic *efx = efx_mtd->efx;
359 struct falcon_nic_data *nic_data = efx->nic_data;
360 int rc;
361
362 rc = mutex_lock_interruptible(&nic_data->spi_lock);
363 if (rc)
364 return rc;
365 rc = falcon_spi_write(efx, spi, part->offset + start, len,
366 retlen, buffer);
367 mutex_unlock(&nic_data->spi_lock);
368 return rc;
369}
370
371static int falcon_mtd_sync(struct mtd_info *mtd)
372{
373 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
374 struct efx_mtd *efx_mtd = mtd->priv;
375 struct efx_nic *efx = efx_mtd->efx;
376 struct falcon_nic_data *nic_data = efx->nic_data;
377 int rc;
378
379 mutex_lock(&nic_data->spi_lock);
380 rc = efx_spi_slow_wait(part, true);
381 mutex_unlock(&nic_data->spi_lock);
382 return rc;
383}
384
385static const struct efx_mtd_ops falcon_mtd_ops = {
386 .read = falcon_mtd_read,
387 .erase = falcon_mtd_erase,
388 .write = falcon_mtd_write,
389 .sync = falcon_mtd_sync,
390};
391
392static int falcon_mtd_probe(struct efx_nic *efx)
393{
394 struct falcon_nic_data *nic_data = efx->nic_data;
395 struct efx_spi_device *spi;
396 struct efx_mtd *efx_mtd;
397 int rc = -ENODEV;
398
399 ASSERT_RTNL();
400
401 spi = &nic_data->spi_flash;
402 if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
403 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
404 GFP_KERNEL);
405 if (!efx_mtd)
406 return -ENOMEM;
407
408 efx_mtd->spi = spi;
409 efx_mtd->name = "flash";
410 efx_mtd->ops = &falcon_mtd_ops;
411
412 efx_mtd->n_parts = 1;
413 efx_mtd->part[0].mtd.type = MTD_NORFLASH;
414 efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
415 efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
416 efx_mtd->part[0].mtd.erasesize = spi->erase_size;
417 efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
418 efx_mtd->part[0].type_name = "sfc_flash_bootrom";
419
420 rc = efx_mtd_probe_device(efx, efx_mtd);
421 if (rc) {
422 kfree(efx_mtd);
423 return rc;
424 }
425 }
426
427 spi = &nic_data->spi_eeprom;
428 if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
429 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
430 GFP_KERNEL);
431 if (!efx_mtd)
432 return -ENOMEM;
433
434 efx_mtd->spi = spi;
435 efx_mtd->name = "EEPROM";
436 efx_mtd->ops = &falcon_mtd_ops;
437
438 efx_mtd->n_parts = 1;
439 efx_mtd->part[0].mtd.type = MTD_RAM;
440 efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
441 efx_mtd->part[0].mtd.size =
442 min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
443 EFX_EEPROM_BOOTCONFIG_START;
444 efx_mtd->part[0].mtd.erasesize = spi->erase_size;
445 efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
446 efx_mtd->part[0].type_name = "sfc_bootconfig";
447
448 rc = efx_mtd_probe_device(efx, efx_mtd);
449 if (rc) {
450 kfree(efx_mtd);
451 return rc;
452 }
453 }
454
455 return rc;
456}
457
458/* Implementation of MTD operations for Siena */
459
460static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
461 size_t len, size_t *retlen, u8 *buffer)
462{
463 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
464 struct efx_mtd *efx_mtd = mtd->priv;
465 struct efx_nic *efx = efx_mtd->efx;
466 loff_t offset = start;
467 loff_t end = min_t(loff_t, start + len, mtd->size);
468 size_t chunk;
469 int rc = 0;
470
471 while (offset < end) {
472 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
473 rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
474 buffer, chunk);
475 if (rc)
476 goto out;
477 offset += chunk;
478 buffer += chunk;
479 }
480out:
481 *retlen = offset - start;
482 return rc;
483}
484
485static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
486{
487 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
488 struct efx_mtd *efx_mtd = mtd->priv;
489 struct efx_nic *efx = efx_mtd->efx;
490 loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
491 loff_t end = min_t(loff_t, start + len, mtd->size);
492 size_t chunk = part->mtd.erasesize;
493 int rc = 0;
494
495 if (!part->mcdi.updating) {
496 rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
497 if (rc)
498 goto out;
499 part->mcdi.updating = true;
500 }
501
502 /* The MCDI interface can in fact do multiple erase blocks at once;
503 * but erasing may be slow, so we make multiple calls here to avoid
504 * tripping the MCDI RPC timeout. */
505 while (offset < end) {
506 rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset,
507 chunk);
508 if (rc)
509 goto out;
510 offset += chunk;
511 }
512out:
513 return rc;
514}
515
516static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
517 size_t len, size_t *retlen, const u8 *buffer)
518{
519 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
520 struct efx_mtd *efx_mtd = mtd->priv;
521 struct efx_nic *efx = efx_mtd->efx;
522 loff_t offset = start;
523 loff_t end = min_t(loff_t, start + len, mtd->size);
524 size_t chunk;
525 int rc = 0;
526
527 if (!part->mcdi.updating) {
528 rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
529 if (rc)
530 goto out;
531 part->mcdi.updating = true;
532 }
533
534 while (offset < end) {
535 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
536 rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
537 buffer, chunk);
538 if (rc)
539 goto out;
540 offset += chunk;
541 buffer += chunk;
542 }
543out:
544 *retlen = offset - start;
545 return rc;
546}
547
548static int siena_mtd_sync(struct mtd_info *mtd)
549{
550 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
551 struct efx_mtd *efx_mtd = mtd->priv;
552 struct efx_nic *efx = efx_mtd->efx;
553 int rc = 0;
554
555 if (part->mcdi.updating) {
556 part->mcdi.updating = false;
557 rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
558 }
559
560 return rc;
561}
562
563static const struct efx_mtd_ops siena_mtd_ops = {
564 .read = siena_mtd_read,
565 .erase = siena_mtd_erase,
566 .write = siena_mtd_write,
567 .sync = siena_mtd_sync,
568};
569
570struct siena_nvram_type_info {
571 int port;
572 const char *name;
573};
574 115
575static const struct siena_nvram_type_info siena_nvram_types[] = { 116 parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition,
576 [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" }, 117 node);
577 [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
578 [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
579 [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
580 [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
581 [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
582 [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
583 [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
584 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
585 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
586 [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
587 [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
588 [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
589};
590 118
591static int siena_mtd_probe_partition(struct efx_nic *efx, 119 list_for_each_entry_safe(part, next, &efx->mtd_list, node)
592 struct efx_mtd *efx_mtd, 120 efx_mtd_remove_partition(part);
593 unsigned int part_id,
594 unsigned int type)
595{
596 struct efx_mtd_partition *part = &efx_mtd->part[part_id];
597 const struct siena_nvram_type_info *info;
598 size_t size, erase_size;
599 bool protected;
600 int rc;
601
602 if (type >= ARRAY_SIZE(siena_nvram_types) ||
603 siena_nvram_types[type].name == NULL)
604 return -ENODEV;
605
606 info = &siena_nvram_types[type];
607
608 if (info->port != efx_port_num(efx))
609 return -ENODEV;
610
611 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
612 if (rc)
613 return rc;
614 if (protected)
615 return -ENODEV; /* hide it */
616
617 part->mcdi.nvram_type = type;
618 part->type_name = info->name;
619
620 part->mtd.type = MTD_NORFLASH;
621 part->mtd.flags = MTD_CAP_NORFLASH;
622 part->mtd.size = size;
623 part->mtd.erasesize = erase_size;
624 121
625 return 0; 122 kfree(parts);
626} 123}
627 124
628static int siena_mtd_get_fw_subtypes(struct efx_nic *efx, 125void efx_mtd_rename(struct efx_nic *efx)
629 struct efx_mtd *efx_mtd)
630{ 126{
631 struct efx_mtd_partition *part; 127 struct efx_mtd_partition *part;
632 uint16_t fw_subtype_list[
633 MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
634 int rc;
635
636 rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
637 if (rc)
638 return rc;
639
640 efx_for_each_partition(part, efx_mtd)
641 part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type];
642
643 return 0;
644}
645
646static int siena_mtd_probe(struct efx_nic *efx)
647{
648 struct efx_mtd *efx_mtd;
649 int rc = -ENODEV;
650 u32 nvram_types;
651 unsigned int type;
652 128
653 ASSERT_RTNL(); 129 ASSERT_RTNL();
654 130
655 rc = efx_mcdi_nvram_types(efx, &nvram_types); 131 list_for_each_entry(part, &efx->mtd_list, node)
656 if (rc) 132 efx->type->mtd_rename(part);
657 return rc;
658
659 efx_mtd = kzalloc(sizeof(*efx_mtd) +
660 hweight32(nvram_types) * sizeof(efx_mtd->part[0]),
661 GFP_KERNEL);
662 if (!efx_mtd)
663 return -ENOMEM;
664
665 efx_mtd->name = "Siena NVRAM manager";
666
667 efx_mtd->ops = &siena_mtd_ops;
668
669 type = 0;
670 efx_mtd->n_parts = 0;
671
672 while (nvram_types != 0) {
673 if (nvram_types & 1) {
674 rc = siena_mtd_probe_partition(efx, efx_mtd,
675 efx_mtd->n_parts, type);
676 if (rc == 0)
677 efx_mtd->n_parts++;
678 else if (rc != -ENODEV)
679 goto fail;
680 }
681 type++;
682 nvram_types >>= 1;
683 }
684
685 rc = siena_mtd_get_fw_subtypes(efx, efx_mtd);
686 if (rc)
687 goto fail;
688
689 rc = efx_mtd_probe_device(efx, efx_mtd);
690fail:
691 if (rc)
692 kfree(efx_mtd);
693 return rc;
694} 133}
695
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f4c7e6b67743..b172ed133055 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2011 Solarflare Communications Inc. 4 * Copyright 2005-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -27,9 +27,11 @@
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/mtd/mtd.h>
30 31
31#include "enum.h" 32#include "enum.h"
32#include "bitfield.h" 33#include "bitfield.h"
34#include "filter.h"
33 35
34/************************************************************************** 36/**************************************************************************
35 * 37 *
@@ -37,7 +39,7 @@
37 * 39 *
38 **************************************************************************/ 40 **************************************************************************/
39 41
40#define EFX_DRIVER_VERSION "3.2" 42#define EFX_DRIVER_VERSION "4.0"
41 43
42#ifdef DEBUG 44#ifdef DEBUG
43#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 45#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -93,21 +95,36 @@ struct efx_ptp_data;
93struct efx_self_tests; 95struct efx_self_tests;
94 96
95/** 97/**
96 * struct efx_special_buffer - An Efx special buffer 98 * struct efx_buffer - A general-purpose DMA buffer
97 * @addr: CPU base address of the buffer 99 * @addr: host base address of the buffer
98 * @dma_addr: DMA base address of the buffer 100 * @dma_addr: DMA base address of the buffer
99 * @len: Buffer length, in bytes 101 * @len: Buffer length, in bytes
100 * @index: Buffer index within controller;s buffer table
101 * @entries: Number of buffer table entries
102 * 102 *
103 * Special buffers are used for the event queues and the TX and RX 103 * The NIC uses these buffers for its interrupt status registers and
104 * descriptor queues for each channel. They are *not* used for the 104 * MAC stats dumps.
105 * actual transmit and receive buffers.
106 */ 105 */
107struct efx_special_buffer { 106struct efx_buffer {
108 void *addr; 107 void *addr;
109 dma_addr_t dma_addr; 108 dma_addr_t dma_addr;
110 unsigned int len; 109 unsigned int len;
110};
111
112/**
113 * struct efx_special_buffer - DMA buffer entered into buffer table
114 * @buf: Standard &struct efx_buffer
115 * @index: Buffer index within controller;s buffer table
116 * @entries: Number of buffer table entries
117 *
118 * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE.
119 * Event and descriptor rings are addressed via one or more buffer
120 * table entries (and so can be physically non-contiguous, although we
121 * currently do not take advantage of that). On Falcon and Siena we
122 * have to take care of allocating and initialising the entries
123 * ourselves. On later hardware this is managed by the firmware and
124 * @index and @entries are left as 0.
125 */
126struct efx_special_buffer {
127 struct efx_buffer buf;
111 unsigned int index; 128 unsigned int index;
112 unsigned int entries; 129 unsigned int entries;
113}; 130};
@@ -118,6 +135,7 @@ struct efx_special_buffer {
118 * freed when descriptor completes 135 * freed when descriptor completes
119 * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be 136 * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
120 * freed when descriptor completes. 137 * freed when descriptor completes.
138 * @option: When @flags & %EFX_TX_BUF_OPTION, a NIC-specific option descriptor.
121 * @dma_addr: DMA address of the fragment. 139 * @dma_addr: DMA address of the fragment.
122 * @flags: Flags for allocation and DMA mapping type 140 * @flags: Flags for allocation and DMA mapping type
123 * @len: Length of this fragment. 141 * @len: Length of this fragment.
@@ -129,7 +147,10 @@ struct efx_tx_buffer {
129 const struct sk_buff *skb; 147 const struct sk_buff *skb;
130 void *heap_buf; 148 void *heap_buf;
131 }; 149 };
132 dma_addr_t dma_addr; 150 union {
151 efx_qword_t option;
152 dma_addr_t dma_addr;
153 };
133 unsigned short flags; 154 unsigned short flags;
134 unsigned short len; 155 unsigned short len;
135 unsigned short unmap_len; 156 unsigned short unmap_len;
@@ -138,6 +159,7 @@ struct efx_tx_buffer {
138#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */ 159#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
139#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */ 160#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
140#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */ 161#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
162#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
141 163
142/** 164/**
143 * struct efx_tx_queue - An Efx TX queue 165 * struct efx_tx_queue - An Efx TX queue
@@ -169,6 +191,7 @@ struct efx_tx_buffer {
169 * variable indicates that the queue is empty. This is to 191 * variable indicates that the queue is empty. This is to
170 * avoid cache-line ping-pong between the xmit path and the 192 * avoid cache-line ping-pong between the xmit path and the
171 * completion path. 193 * completion path.
194 * @merge_events: Number of TX merged completion events
172 * @insert_count: Current insert pointer 195 * @insert_count: Current insert pointer
173 * This is the number of buffers that have been added to the 196 * This is the number of buffers that have been added to the
174 * software ring. 197 * software ring.
@@ -205,6 +228,7 @@ struct efx_tx_queue {
205 /* Members used mainly on the completion path */ 228 /* Members used mainly on the completion path */
206 unsigned int read_count ____cacheline_aligned_in_smp; 229 unsigned int read_count ____cacheline_aligned_in_smp;
207 unsigned int old_write_count; 230 unsigned int old_write_count;
231 unsigned int merge_events;
208 232
209 /* Members used only on the xmit path */ 233 /* Members used only on the xmit path */
210 unsigned int insert_count ____cacheline_aligned_in_smp; 234 unsigned int insert_count ____cacheline_aligned_in_smp;
@@ -244,6 +268,7 @@ struct efx_rx_buffer {
244#define EFX_RX_PKT_CSUMMED 0x0002 268#define EFX_RX_PKT_CSUMMED 0x0002
245#define EFX_RX_PKT_DISCARD 0x0004 269#define EFX_RX_PKT_DISCARD 0x0004
246#define EFX_RX_PKT_TCP 0x0040 270#define EFX_RX_PKT_TCP 0x0040
271#define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */
247 272
248/** 273/**
249 * struct efx_rx_page_state - Page-based rx buffer state 274 * struct efx_rx_page_state - Page-based rx buffer state
@@ -271,13 +296,14 @@ struct efx_rx_page_state {
271 * @buffer: The software buffer ring 296 * @buffer: The software buffer ring
272 * @rxd: The hardware descriptor ring 297 * @rxd: The hardware descriptor ring
273 * @ptr_mask: The size of the ring minus 1. 298 * @ptr_mask: The size of the ring minus 1.
274 * @enabled: Receive queue enabled indicator. 299 * @refill_enabled: Enable refill whenever fill level is low
275 * @flush_pending: Set when a RX flush is pending. Has the same lifetime as 300 * @flush_pending: Set when a RX flush is pending. Has the same lifetime as
276 * @rxq_flush_pending. 301 * @rxq_flush_pending.
277 * @added_count: Number of buffers added to the receive queue. 302 * @added_count: Number of buffers added to the receive queue.
278 * @notified_count: Number of buffers given to NIC (<= @added_count). 303 * @notified_count: Number of buffers given to NIC (<= @added_count).
279 * @removed_count: Number of buffers removed from the receive queue. 304 * @removed_count: Number of buffers removed from the receive queue.
280 * @scatter_n: Number of buffers used by current packet 305 * @scatter_n: Used by NIC specific receive code.
306 * @scatter_len: Used by NIC specific receive code.
281 * @page_ring: The ring to store DMA mapped pages for reuse. 307 * @page_ring: The ring to store DMA mapped pages for reuse.
282 * @page_add: Counter to calculate the write pointer for the recycle ring. 308 * @page_add: Counter to calculate the write pointer for the recycle ring.
283 * @page_remove: Counter to calculate the read pointer for the recycle ring. 309 * @page_remove: Counter to calculate the read pointer for the recycle ring.
@@ -302,13 +328,14 @@ struct efx_rx_queue {
302 struct efx_rx_buffer *buffer; 328 struct efx_rx_buffer *buffer;
303 struct efx_special_buffer rxd; 329 struct efx_special_buffer rxd;
304 unsigned int ptr_mask; 330 unsigned int ptr_mask;
305 bool enabled; 331 bool refill_enabled;
306 bool flush_pending; 332 bool flush_pending;
307 333
308 unsigned int added_count; 334 unsigned int added_count;
309 unsigned int notified_count; 335 unsigned int notified_count;
310 unsigned int removed_count; 336 unsigned int removed_count;
311 unsigned int scatter_n; 337 unsigned int scatter_n;
338 unsigned int scatter_len;
312 struct page **page_ring; 339 struct page **page_ring;
313 unsigned int page_add; 340 unsigned int page_add;
314 unsigned int page_remove; 341 unsigned int page_remove;
@@ -325,22 +352,6 @@ struct efx_rx_queue {
325 unsigned int slow_fill_count; 352 unsigned int slow_fill_count;
326}; 353};
327 354
328/**
329 * struct efx_buffer - An Efx general-purpose buffer
330 * @addr: host base address of the buffer
331 * @dma_addr: DMA base address of the buffer
332 * @len: Buffer length, in bytes
333 *
334 * The NIC uses these buffers for its interrupt status registers and
335 * MAC stats dumps.
336 */
337struct efx_buffer {
338 void *addr;
339 dma_addr_t dma_addr;
340 unsigned int len;
341};
342
343
344enum efx_rx_alloc_method { 355enum efx_rx_alloc_method {
345 RX_ALLOC_METHOD_AUTO = 0, 356 RX_ALLOC_METHOD_AUTO = 0,
346 RX_ALLOC_METHOD_SKB = 1, 357 RX_ALLOC_METHOD_SKB = 1,
@@ -357,12 +368,12 @@ enum efx_rx_alloc_method {
357 * @efx: Associated Efx NIC 368 * @efx: Associated Efx NIC
358 * @channel: Channel instance number 369 * @channel: Channel instance number
359 * @type: Channel type definition 370 * @type: Channel type definition
371 * @eventq_init: Event queue initialised flag
360 * @enabled: Channel enabled indicator 372 * @enabled: Channel enabled indicator
361 * @irq: IRQ number (MSI and MSI-X only) 373 * @irq: IRQ number (MSI and MSI-X only)
362 * @irq_moderation: IRQ moderation value (in hardware ticks) 374 * @irq_moderation: IRQ moderation value (in hardware ticks)
363 * @napi_dev: Net device used with NAPI 375 * @napi_dev: Net device used with NAPI
364 * @napi_str: NAPI control structure 376 * @napi_str: NAPI control structure
365 * @work_pending: Is work pending via NAPI?
366 * @eventq: Event queue buffer 377 * @eventq: Event queue buffer
367 * @eventq_mask: Event queue pointer mask 378 * @eventq_mask: Event queue pointer mask
368 * @eventq_read_ptr: Event queue read pointer 379 * @eventq_read_ptr: Event queue read pointer
@@ -378,6 +389,8 @@ enum efx_rx_alloc_method {
378 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 389 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
379 * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to 390 * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
380 * lack of descriptors 391 * lack of descriptors
392 * @n_rx_merge_events: Number of RX merged completion events
393 * @n_rx_merge_packets: Number of RX packets completed by merged events
381 * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by 394 * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
382 * __efx_rx_packet(), or zero if there is none 395 * __efx_rx_packet(), or zero if there is none
383 * @rx_pkt_index: Ring index of first buffer for next packet to be delivered 396 * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -389,12 +402,12 @@ struct efx_channel {
389 struct efx_nic *efx; 402 struct efx_nic *efx;
390 int channel; 403 int channel;
391 const struct efx_channel_type *type; 404 const struct efx_channel_type *type;
405 bool eventq_init;
392 bool enabled; 406 bool enabled;
393 int irq; 407 int irq;
394 unsigned int irq_moderation; 408 unsigned int irq_moderation;
395 struct net_device *napi_dev; 409 struct net_device *napi_dev;
396 struct napi_struct napi_str; 410 struct napi_struct napi_str;
397 bool work_pending;
398 struct efx_special_buffer eventq; 411 struct efx_special_buffer eventq;
399 unsigned int eventq_mask; 412 unsigned int eventq_mask;
400 unsigned int eventq_read_ptr; 413 unsigned int eventq_read_ptr;
@@ -414,6 +427,8 @@ struct efx_channel {
414 unsigned n_rx_overlength; 427 unsigned n_rx_overlength;
415 unsigned n_skbuff_leaks; 428 unsigned n_skbuff_leaks;
416 unsigned int n_rx_nodesc_trunc; 429 unsigned int n_rx_nodesc_trunc;
430 unsigned int n_rx_merge_events;
431 unsigned int n_rx_merge_packets;
417 432
418 unsigned int rx_pkt_n_frags; 433 unsigned int rx_pkt_n_frags;
419 unsigned int rx_pkt_index; 434 unsigned int rx_pkt_index;
@@ -423,6 +438,21 @@ struct efx_channel {
423}; 438};
424 439
425/** 440/**
441 * struct efx_msi_context - Context for each MSI
442 * @efx: The associated NIC
443 * @index: Index of the channel/IRQ
444 * @name: Name of the channel/IRQ
445 *
446 * Unlike &struct efx_channel, this is never reallocated and is always
447 * safe for the IRQ handler to access.
448 */
449struct efx_msi_context {
450 struct efx_nic *efx;
451 unsigned int index;
452 char name[IFNAMSIZ + 6];
453};
454
455/**
426 * struct efx_channel_type - distinguishes traffic and extra channels 456 * struct efx_channel_type - distinguishes traffic and extra channels
427 * @handle_no_channel: Handle failure to allocate an extra channel 457 * @handle_no_channel: Handle failure to allocate an extra channel
428 * @pre_probe: Set up extra state prior to initialisation 458 * @pre_probe: Set up extra state prior to initialisation
@@ -579,75 +609,17 @@ static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
579 return !!(mode & ~PHY_MODE_TX_DISABLED); 609 return !!(mode & ~PHY_MODE_TX_DISABLED);
580} 610}
581 611
582/* 612/**
583 * Efx extended statistics 613 * struct efx_hw_stat_desc - Description of a hardware statistic
584 * 614 * @name: Name of the statistic as visible through ethtool, or %NULL if
585 * Not all statistics are provided by all supported MACs. The purpose 615 * it should not be exposed
586 * is this structure is to contain the raw statistics provided by each 616 * @dma_width: Width in bits (0 for non-DMA statistics)
587 * MAC. 617 * @offset: Offset within stats (ignored for non-DMA statistics)
588 */ 618 */
589struct efx_mac_stats { 619struct efx_hw_stat_desc {
590 u64 tx_bytes; 620 const char *name;
591 u64 tx_good_bytes; 621 u16 dma_width;
592 u64 tx_bad_bytes; 622 u16 offset;
593 u64 tx_packets;
594 u64 tx_bad;
595 u64 tx_pause;
596 u64 tx_control;
597 u64 tx_unicast;
598 u64 tx_multicast;
599 u64 tx_broadcast;
600 u64 tx_lt64;
601 u64 tx_64;
602 u64 tx_65_to_127;
603 u64 tx_128_to_255;
604 u64 tx_256_to_511;
605 u64 tx_512_to_1023;
606 u64 tx_1024_to_15xx;
607 u64 tx_15xx_to_jumbo;
608 u64 tx_gtjumbo;
609 u64 tx_collision;
610 u64 tx_single_collision;
611 u64 tx_multiple_collision;
612 u64 tx_excessive_collision;
613 u64 tx_deferred;
614 u64 tx_late_collision;
615 u64 tx_excessive_deferred;
616 u64 tx_non_tcpudp;
617 u64 tx_mac_src_error;
618 u64 tx_ip_src_error;
619 u64 rx_bytes;
620 u64 rx_good_bytes;
621 u64 rx_bad_bytes;
622 u64 rx_packets;
623 u64 rx_good;
624 u64 rx_bad;
625 u64 rx_pause;
626 u64 rx_control;
627 u64 rx_unicast;
628 u64 rx_multicast;
629 u64 rx_broadcast;
630 u64 rx_lt64;
631 u64 rx_64;
632 u64 rx_65_to_127;
633 u64 rx_128_to_255;
634 u64 rx_256_to_511;
635 u64 rx_512_to_1023;
636 u64 rx_1024_to_15xx;
637 u64 rx_15xx_to_jumbo;
638 u64 rx_gtjumbo;
639 u64 rx_bad_lt64;
640 u64 rx_bad_64_to_15xx;
641 u64 rx_bad_15xx_to_jumbo;
642 u64 rx_bad_gtjumbo;
643 u64 rx_overflow;
644 u64 rx_missed;
645 u64 rx_false_carrier;
646 u64 rx_symbol_error;
647 u64 rx_align_error;
648 u64 rx_length_error;
649 u64 rx_internal_error;
650 u64 rx_good_lt64;
651}; 623};
652 624
653/* Number of bits used in a multicast filter hash address */ 625/* Number of bits used in a multicast filter hash address */
@@ -662,7 +634,6 @@ union efx_multicast_hash {
662 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; 634 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
663}; 635};
664 636
665struct efx_filter_state;
666struct efx_vf; 637struct efx_vf;
667struct vfdi_status; 638struct vfdi_status;
668 639
@@ -672,7 +643,6 @@ struct vfdi_status;
672 * @pci_dev: The PCI device 643 * @pci_dev: The PCI device
673 * @type: Controller type attributes 644 * @type: Controller type attributes
674 * @legacy_irq: IRQ number 645 * @legacy_irq: IRQ number
675 * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
676 * @workqueue: Workqueue for port reconfigures and the HW monitor. 646 * @workqueue: Workqueue for port reconfigures and the HW monitor.
677 * Work items do not hold and must not acquire RTNL. 647 * Work items do not hold and must not acquire RTNL.
678 * @workqueue_name: Name of workqueue 648 * @workqueue_name: Name of workqueue
@@ -689,7 +659,7 @@ struct vfdi_status;
689 * @tx_queue: TX DMA queues 659 * @tx_queue: TX DMA queues
690 * @rx_queue: RX DMA queues 660 * @rx_queue: RX DMA queues
691 * @channel: Channels 661 * @channel: Channels
692 * @channel_name: Names for channels and their IRQs 662 * @msi_context: Context for each MSI
693 * @extra_channel_types: Types of extra (non-traffic) channels that 663 * @extra_channel_types: Types of extra (non-traffic) channels that
694 * should be allocated for this NIC 664 * should be allocated for this NIC
695 * @rxq_entries: Size of receive queues requested by user. 665 * @rxq_entries: Size of receive queues requested by user.
@@ -707,17 +677,25 @@ struct vfdi_status;
707 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 677 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
708 * @rx_buffer_truesize: Amortised allocation size of an RX buffer, 678 * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
709 * for use in sk_buff::truesize 679 * for use in sk_buff::truesize
680 * @rx_prefix_size: Size of RX prefix before packet data
681 * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data
682 * (valid only if @rx_prefix_size != 0; always negative)
683 * @rx_packet_len_offset: Offset of RX packet length from start of packet data
684 * (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
710 * @rx_hash_key: Toeplitz hash key for RSS 685 * @rx_hash_key: Toeplitz hash key for RSS
711 * @rx_indir_table: Indirection table for RSS 686 * @rx_indir_table: Indirection table for RSS
712 * @rx_scatter: Scatter mode enabled for receives 687 * @rx_scatter: Scatter mode enabled for receives
713 * @int_error_count: Number of internal errors seen recently 688 * @int_error_count: Number of internal errors seen recently
714 * @int_error_expire: Time at which error count will be expired 689 * @int_error_expire: Time at which error count will be expired
690 * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
691 * acknowledge but do nothing else.
715 * @irq_status: Interrupt status buffer 692 * @irq_status: Interrupt status buffer
716 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 693 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
717 * @irq_level: IRQ level/index for IRQs not triggered by an event queue 694 * @irq_level: IRQ level/index for IRQs not triggered by an event queue
718 * @selftest_work: Work item for asynchronous self-test 695 * @selftest_work: Work item for asynchronous self-test
719 * @mtd_list: List of MTDs attached to the NIC 696 * @mtd_list: List of MTDs attached to the NIC
720 * @nic_data: Hardware dependent state 697 * @nic_data: Hardware dependent state
698 * @mcdi: Management-Controller-to-Driver Interface state
721 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, 699 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
722 * efx_monitor() and efx_reconfigure_port() 700 * efx_monitor() and efx_reconfigure_port()
723 * @port_enabled: Port enabled indicator. 701 * @port_enabled: Port enabled indicator.
@@ -737,8 +715,10 @@ struct vfdi_status;
737 * @link_advertising: Autonegotiation advertising flags 715 * @link_advertising: Autonegotiation advertising flags
738 * @link_state: Current state of the link 716 * @link_state: Current state of the link
739 * @n_link_state_changes: Number of times the link has changed state 717 * @n_link_state_changes: Number of times the link has changed state
740 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. 718 * @unicast_filter: Flag for Falcon-arch simple unicast filter.
741 * @multicast_hash: Multicast hash table 719 * Protected by @mac_lock.
720 * @multicast_hash: Multicast hash table for Falcon-arch.
721 * Protected by @mac_lock.
742 * @wanted_fc: Wanted flow control flags 722 * @wanted_fc: Wanted flow control flags
743 * @fc_disable: When non-zero flow control is disabled. Typically used to 723 * @fc_disable: When non-zero flow control is disabled. Typically used to
744 * ensure that network back pressure doesn't delay dma queue flushes. 724 * ensure that network back pressure doesn't delay dma queue flushes.
@@ -747,7 +727,12 @@ struct vfdi_status;
747 * @loopback_mode: Loopback status 727 * @loopback_mode: Loopback status
748 * @loopback_modes: Supported loopback mode bitmask 728 * @loopback_modes: Supported loopback mode bitmask
749 * @loopback_selftest: Offline self-test private state 729 * @loopback_selftest: Offline self-test private state
750 * @drain_pending: Count of RX and TX queues that haven't been flushed and drained. 730 * @filter_lock: Filter table lock
731 * @filter_state: Architecture-dependent filter table state
732 * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
733 * indexed by filter ID
734 * @rps_expire_index: Next index to check for expiry in @rps_flow_id
735 * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
751 * @rxq_flush_pending: Count of number of receive queues that need to be flushed. 736 * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
752 * Decremented when the efx_flush_rx_queue() is called. 737 * Decremented when the efx_flush_rx_queue() is called.
753 * @rxq_flush_outstanding: Count of number of RX flushes started but not yet 738 * @rxq_flush_outstanding: Count of number of RX flushes started but not yet
@@ -771,12 +756,8 @@ struct vfdi_status;
771 * @last_irq_cpu: Last CPU to handle a possible test interrupt. This 756 * @last_irq_cpu: Last CPU to handle a possible test interrupt. This
772 * field is used by efx_test_interrupts() to verify that an 757 * field is used by efx_test_interrupts() to verify that an
773 * interrupt has occurred. 758 * interrupt has occurred.
774 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count 759 * @stats_lock: Statistics update lock. Must be held when calling
775 * @mac_stats: MAC statistics. These include all statistics the MACs 760 * efx_nic_type::{update,start,stop}_stats.
776 * can provide. Generic code converts these into a standard
777 * &struct net_device_stats.
778 * @stats_lock: Statistics update lock. Serialises statistics fetches
779 * and access to @mac_stats.
780 * 761 *
781 * This is stored in the private area of the &struct net_device. 762 * This is stored in the private area of the &struct net_device.
782 */ 763 */
@@ -788,7 +769,6 @@ struct efx_nic {
788 unsigned int port_num; 769 unsigned int port_num;
789 const struct efx_nic_type *type; 770 const struct efx_nic_type *type;
790 int legacy_irq; 771 int legacy_irq;
791 bool legacy_irq_enabled;
792 bool eeh_disabled_legacy_irq; 772 bool eeh_disabled_legacy_irq;
793 struct workqueue_struct *workqueue; 773 struct workqueue_struct *workqueue;
794 char workqueue_name[16]; 774 char workqueue_name[16];
@@ -806,7 +786,7 @@ struct efx_nic {
806 unsigned long reset_pending; 786 unsigned long reset_pending;
807 787
808 struct efx_channel *channel[EFX_MAX_CHANNELS]; 788 struct efx_channel *channel[EFX_MAX_CHANNELS];
809 char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6]; 789 struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
810 const struct efx_channel_type * 790 const struct efx_channel_type *
811 extra_channel_type[EFX_MAX_EXTRA_CHANNELS]; 791 extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
812 792
@@ -819,6 +799,8 @@ struct efx_nic {
819 unsigned rx_dc_base; 799 unsigned rx_dc_base;
820 unsigned sram_lim_qw; 800 unsigned sram_lim_qw;
821 unsigned next_buffer_table; 801 unsigned next_buffer_table;
802
803 unsigned int max_channels;
822 unsigned n_channels; 804 unsigned n_channels;
823 unsigned n_rx_channels; 805 unsigned n_rx_channels;
824 unsigned rss_spread; 806 unsigned rss_spread;
@@ -830,6 +812,9 @@ struct efx_nic {
830 unsigned int rx_page_buf_step; 812 unsigned int rx_page_buf_step;
831 unsigned int rx_bufs_per_page; 813 unsigned int rx_bufs_per_page;
832 unsigned int rx_pages_per_batch; 814 unsigned int rx_pages_per_batch;
815 unsigned int rx_prefix_size;
816 int rx_packet_hash_offset;
817 int rx_packet_len_offset;
833 u8 rx_hash_key[40]; 818 u8 rx_hash_key[40];
834 u32 rx_indir_table[128]; 819 u32 rx_indir_table[128];
835 bool rx_scatter; 820 bool rx_scatter;
@@ -837,6 +822,7 @@ struct efx_nic {
837 unsigned int_error_count; 822 unsigned int_error_count;
838 unsigned long int_error_expire; 823 unsigned long int_error_expire;
839 824
825 bool irq_soft_enabled;
840 struct efx_buffer irq_status; 826 struct efx_buffer irq_status;
841 unsigned irq_zero_count; 827 unsigned irq_zero_count;
842 unsigned irq_level; 828 unsigned irq_level;
@@ -847,6 +833,7 @@ struct efx_nic {
847#endif 833#endif
848 834
849 void *nic_data; 835 void *nic_data;
836 struct efx_mcdi_data *mcdi;
850 837
851 struct mutex mac_lock; 838 struct mutex mac_lock;
852 struct work_struct mac_work; 839 struct work_struct mac_work;
@@ -868,7 +855,7 @@ struct efx_nic {
868 struct efx_link_state link_state; 855 struct efx_link_state link_state;
869 unsigned int n_link_state_changes; 856 unsigned int n_link_state_changes;
870 857
871 bool promiscuous; 858 bool unicast_filter;
872 union efx_multicast_hash multicast_hash; 859 union efx_multicast_hash multicast_hash;
873 u8 wanted_fc; 860 u8 wanted_fc;
874 unsigned fc_disable; 861 unsigned fc_disable;
@@ -879,9 +866,14 @@ struct efx_nic {
879 866
880 void *loopback_selftest; 867 void *loopback_selftest;
881 868
882 struct efx_filter_state *filter_state; 869 spinlock_t filter_lock;
870 void *filter_state;
871#ifdef CONFIG_RFS_ACCEL
872 u32 *rps_flow_id;
873 unsigned int rps_expire_index;
874#endif
883 875
884 atomic_t drain_pending; 876 atomic_t active_queues;
885 atomic_t rxq_flush_pending; 877 atomic_t rxq_flush_pending;
886 atomic_t rxq_flush_outstanding; 878 atomic_t rxq_flush_outstanding;
887 wait_queue_head_t flush_wq; 879 wait_queue_head_t flush_wq;
@@ -907,8 +899,6 @@ struct efx_nic {
907 struct delayed_work monitor_work ____cacheline_aligned_in_smp; 899 struct delayed_work monitor_work ____cacheline_aligned_in_smp;
908 spinlock_t biu_lock; 900 spinlock_t biu_lock;
909 int last_irq_cpu; 901 int last_irq_cpu;
910 unsigned n_rx_nodesc_drop_cnt;
911 struct efx_mac_stats mac_stats;
912 spinlock_t stats_lock; 902 spinlock_t stats_lock;
913}; 903};
914 904
@@ -922,8 +912,17 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
922 return efx->port_num; 912 return efx->port_num;
923} 913}
924 914
915struct efx_mtd_partition {
916 struct list_head node;
917 struct mtd_info mtd;
918 const char *dev_type_name;
919 const char *type_name;
920 char name[IFNAMSIZ + 20];
921};
922
925/** 923/**
926 * struct efx_nic_type - Efx device type definition 924 * struct efx_nic_type - Efx device type definition
925 * @mem_map_size: Get memory BAR mapped size
927 * @probe: Probe the controller 926 * @probe: Probe the controller
928 * @remove: Free resources allocated by probe() 927 * @remove: Free resources allocated by probe()
929 * @init: Initialise the controller 928 * @init: Initialise the controller
@@ -938,47 +937,118 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
938 * @probe_port: Probe the MAC and PHY 937 * @probe_port: Probe the MAC and PHY
939 * @remove_port: Free resources allocated by probe_port() 938 * @remove_port: Free resources allocated by probe_port()
940 * @handle_global_event: Handle a "global" event (may be %NULL) 939 * @handle_global_event: Handle a "global" event (may be %NULL)
940 * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues)
941 * @prepare_flush: Prepare the hardware for flushing the DMA queues 941 * @prepare_flush: Prepare the hardware for flushing the DMA queues
942 * @finish_flush: Clean up after flushing the DMA queues 942 * (for Falcon architecture)
943 * @update_stats: Update statistics not provided by event handling 943 * @finish_flush: Clean up after flushing the DMA queues (for Falcon
944 * architecture)
945 * @describe_stats: Describe statistics for ethtool
946 * @update_stats: Update statistics not provided by event handling.
947 * Either argument may be %NULL.
944 * @start_stats: Start the regular fetching of statistics 948 * @start_stats: Start the regular fetching of statistics
945 * @stop_stats: Stop the regular fetching of statistics 949 * @stop_stats: Stop the regular fetching of statistics
946 * @set_id_led: Set state of identifying LED or revert to automatic function 950 * @set_id_led: Set state of identifying LED or revert to automatic function
947 * @push_irq_moderation: Apply interrupt moderation value 951 * @push_irq_moderation: Apply interrupt moderation value
948 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY 952 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
953 * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL)
949 * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings 954 * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
950 * to the hardware. Serialised by the mac_lock. 955 * to the hardware. Serialised by the mac_lock.
951 * @check_mac_fault: Check MAC fault state. True if fault present. 956 * @check_mac_fault: Check MAC fault state. True if fault present.
952 * @get_wol: Get WoL configuration from driver state 957 * @get_wol: Get WoL configuration from driver state
953 * @set_wol: Push WoL configuration to the NIC 958 * @set_wol: Push WoL configuration to the NIC
954 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) 959 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
955 * @test_chip: Test registers. Should use efx_nic_test_registers(), and is 960 * @test_chip: Test registers. May use efx_farch_test_registers(), and is
956 * expected to reset the NIC. 961 * expected to reset the NIC.
957 * @test_nvram: Test validity of NVRAM contents 962 * @test_nvram: Test validity of NVRAM contents
963 * @mcdi_request: Send an MCDI request with the given header and SDU.
964 * The SDU length may be any value from 0 up to the protocol-
965 * defined maximum, but its buffer will be padded to a multiple
966 * of 4 bytes.
967 * @mcdi_poll_response: Test whether an MCDI response is available.
968 * @mcdi_read_response: Read the MCDI response PDU. The offset will
969 * be a multiple of 4. The length may not be, but the buffer
970 * will be padded so it is safe to round up.
971 * @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so,
972 * return an appropriate error code for aborting any current
973 * request; otherwise return 0.
974 * @irq_enable_master: Enable IRQs on the NIC. Each event queue must
975 * be separately enabled after this.
976 * @irq_test_generate: Generate a test IRQ
977 * @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event
978 * queue must be separately disabled before this.
979 * @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is
980 * a pointer to the &struct efx_msi_context for the channel.
981 * @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument
982 * is a pointer to the &struct efx_nic.
983 * @tx_probe: Allocate resources for TX queue
984 * @tx_init: Initialise TX queue on the NIC
985 * @tx_remove: Free resources for TX queue
986 * @tx_write: Write TX descriptors and doorbell
987 * @rx_push_indir_table: Write RSS indirection table to the NIC
988 * @rx_probe: Allocate resources for RX queue
989 * @rx_init: Initialise RX queue on the NIC
990 * @rx_remove: Free resources for RX queue
991 * @rx_write: Write RX descriptors and doorbell
992 * @rx_defer_refill: Generate a refill reminder event
993 * @ev_probe: Allocate resources for event queue
994 * @ev_init: Initialise event queue on the NIC
995 * @ev_fini: Deinitialise event queue on the NIC
996 * @ev_remove: Free resources for event queue
997 * @ev_process: Process events for a queue, up to the given NAPI quota
998 * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
999 * @ev_test_generate: Generate a test event
1000 * @filter_table_probe: Probe filter capabilities and set up filter software state
1001 * @filter_table_restore: Restore filters removed from hardware
1002 * @filter_table_remove: Remove filters from hardware and tear down software state
1003 * @filter_update_rx_scatter: Update filters after change to rx scatter setting
1004 * @filter_insert: add or replace a filter
1005 * @filter_remove_safe: remove a filter by ID, carefully
1006 * @filter_get_safe: retrieve a filter by ID, carefully
1007 * @filter_clear_rx: remove RX filters by priority
1008 * @filter_count_rx_used: Get the number of filters in use at a given priority
1009 * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
1010 * @filter_get_rx_ids: Get list of RX filters at a given priority
1011 * @filter_rfs_insert: Add or replace a filter for RFS. This must be
1012 * atomic. The hardware change may be asynchronous but should
1013 * not be delayed for long. It may fail if this can't be done
1014 * atomically.
1015 * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
1016 * This must check whether the specified table entry is used by RFS
1017 * and that rps_may_expire_flow() returns true for it.
1018 * @mtd_probe: Probe and add MTD partitions associated with this net device,
1019 * using efx_mtd_add()
1020 * @mtd_rename: Set an MTD partition name using the net device name
1021 * @mtd_read: Read from an MTD partition
1022 * @mtd_erase: Erase part of an MTD partition
1023 * @mtd_write: Write to an MTD partition
1024 * @mtd_sync: Wait for write-back to complete on MTD partition. This
1025 * also notifies the driver that a writer has finished using this
1026 * partition.
958 * @revision: Hardware architecture revision 1027 * @revision: Hardware architecture revision
959 * @mem_map_size: Memory BAR mapped size
960 * @txd_ptr_tbl_base: TX descriptor ring base address 1028 * @txd_ptr_tbl_base: TX descriptor ring base address
961 * @rxd_ptr_tbl_base: RX descriptor ring base address 1029 * @rxd_ptr_tbl_base: RX descriptor ring base address
962 * @buf_tbl_base: Buffer table base address 1030 * @buf_tbl_base: Buffer table base address
963 * @evq_ptr_tbl_base: Event queue pointer table base address 1031 * @evq_ptr_tbl_base: Event queue pointer table base address
964 * @evq_rptr_tbl_base: Event queue read-pointer table base address 1032 * @evq_rptr_tbl_base: Event queue read-pointer table base address
965 * @max_dma_mask: Maximum possible DMA mask 1033 * @max_dma_mask: Maximum possible DMA mask
966 * @rx_buffer_hash_size: Size of hash at start of RX packet 1034 * @rx_prefix_size: Size of RX prefix before packet data
1035 * @rx_hash_offset: Offset of RX flow hash within prefix
967 * @rx_buffer_padding: Size of padding at end of RX packet 1036 * @rx_buffer_padding: Size of padding at end of RX packet
968 * @can_rx_scatter: NIC is able to scatter packet to multiple buffers 1037 * @can_rx_scatter: NIC is able to scatter packets to multiple buffers
1038 * @always_rx_scatter: NIC will always scatter packets to multiple buffers
969 * @max_interrupt_mode: Highest capability interrupt mode supported 1039 * @max_interrupt_mode: Highest capability interrupt mode supported
970 * from &enum efx_init_mode. 1040 * from &enum efx_init_mode.
971 * @phys_addr_channels: Number of channels with physically addressed
972 * descriptors
973 * @timer_period_max: Maximum period of interrupt timer (in ticks) 1041 * @timer_period_max: Maximum period of interrupt timer (in ticks)
974 * @offload_features: net_device feature flags for protocol offload 1042 * @offload_features: net_device feature flags for protocol offload
975 * features implemented in hardware 1043 * features implemented in hardware
1044 * @mcdi_max_ver: Maximum MCDI version supported
976 */ 1045 */
977struct efx_nic_type { 1046struct efx_nic_type {
1047 unsigned int (*mem_map_size)(struct efx_nic *efx);
978 int (*probe)(struct efx_nic *efx); 1048 int (*probe)(struct efx_nic *efx);
979 void (*remove)(struct efx_nic *efx); 1049 void (*remove)(struct efx_nic *efx);
980 int (*init)(struct efx_nic *efx); 1050 int (*init)(struct efx_nic *efx);
981 void (*dimension_resources)(struct efx_nic *efx); 1051 int (*dimension_resources)(struct efx_nic *efx);
982 void (*fini)(struct efx_nic *efx); 1052 void (*fini)(struct efx_nic *efx);
983 void (*monitor)(struct efx_nic *efx); 1053 void (*monitor)(struct efx_nic *efx);
984 enum reset_type (*map_reset_reason)(enum reset_type reason); 1054 enum reset_type (*map_reset_reason)(enum reset_type reason);
@@ -987,14 +1057,18 @@ struct efx_nic_type {
987 int (*probe_port)(struct efx_nic *efx); 1057 int (*probe_port)(struct efx_nic *efx);
988 void (*remove_port)(struct efx_nic *efx); 1058 void (*remove_port)(struct efx_nic *efx);
989 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); 1059 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
1060 int (*fini_dmaq)(struct efx_nic *efx);
990 void (*prepare_flush)(struct efx_nic *efx); 1061 void (*prepare_flush)(struct efx_nic *efx);
991 void (*finish_flush)(struct efx_nic *efx); 1062 void (*finish_flush)(struct efx_nic *efx);
992 void (*update_stats)(struct efx_nic *efx); 1063 size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
1064 size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
1065 struct rtnl_link_stats64 *core_stats);
993 void (*start_stats)(struct efx_nic *efx); 1066 void (*start_stats)(struct efx_nic *efx);
994 void (*stop_stats)(struct efx_nic *efx); 1067 void (*stop_stats)(struct efx_nic *efx);
995 void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode); 1068 void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
996 void (*push_irq_moderation)(struct efx_channel *channel); 1069 void (*push_irq_moderation)(struct efx_channel *channel);
997 int (*reconfigure_port)(struct efx_nic *efx); 1070 int (*reconfigure_port)(struct efx_nic *efx);
1071 void (*prepare_enable_fc_tx)(struct efx_nic *efx);
998 int (*reconfigure_mac)(struct efx_nic *efx); 1072 int (*reconfigure_mac)(struct efx_nic *efx);
999 bool (*check_mac_fault)(struct efx_nic *efx); 1073 bool (*check_mac_fault)(struct efx_nic *efx);
1000 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); 1074 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
@@ -1002,22 +1076,90 @@ struct efx_nic_type {
1002 void (*resume_wol)(struct efx_nic *efx); 1076 void (*resume_wol)(struct efx_nic *efx);
1003 int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests); 1077 int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
1004 int (*test_nvram)(struct efx_nic *efx); 1078 int (*test_nvram)(struct efx_nic *efx);
1079 void (*mcdi_request)(struct efx_nic *efx,
1080 const efx_dword_t *hdr, size_t hdr_len,
1081 const efx_dword_t *sdu, size_t sdu_len);
1082 bool (*mcdi_poll_response)(struct efx_nic *efx);
1083 void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
1084 size_t pdu_offset, size_t pdu_len);
1085 int (*mcdi_poll_reboot)(struct efx_nic *efx);
1086 void (*irq_enable_master)(struct efx_nic *efx);
1087 void (*irq_test_generate)(struct efx_nic *efx);
1088 void (*irq_disable_non_ev)(struct efx_nic *efx);
1089 irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
1090 irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
1091 int (*tx_probe)(struct efx_tx_queue *tx_queue);
1092 void (*tx_init)(struct efx_tx_queue *tx_queue);
1093 void (*tx_remove)(struct efx_tx_queue *tx_queue);
1094 void (*tx_write)(struct efx_tx_queue *tx_queue);
1095 void (*rx_push_indir_table)(struct efx_nic *efx);
1096 int (*rx_probe)(struct efx_rx_queue *rx_queue);
1097 void (*rx_init)(struct efx_rx_queue *rx_queue);
1098 void (*rx_remove)(struct efx_rx_queue *rx_queue);
1099 void (*rx_write)(struct efx_rx_queue *rx_queue);
1100 void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
1101 int (*ev_probe)(struct efx_channel *channel);
1102 int (*ev_init)(struct efx_channel *channel);
1103 void (*ev_fini)(struct efx_channel *channel);
1104 void (*ev_remove)(struct efx_channel *channel);
1105 int (*ev_process)(struct efx_channel *channel, int quota);
1106 void (*ev_read_ack)(struct efx_channel *channel);
1107 void (*ev_test_generate)(struct efx_channel *channel);
1108 int (*filter_table_probe)(struct efx_nic *efx);
1109 void (*filter_table_restore)(struct efx_nic *efx);
1110 void (*filter_table_remove)(struct efx_nic *efx);
1111 void (*filter_update_rx_scatter)(struct efx_nic *efx);
1112 s32 (*filter_insert)(struct efx_nic *efx,
1113 struct efx_filter_spec *spec, bool replace);
1114 int (*filter_remove_safe)(struct efx_nic *efx,
1115 enum efx_filter_priority priority,
1116 u32 filter_id);
1117 int (*filter_get_safe)(struct efx_nic *efx,
1118 enum efx_filter_priority priority,
1119 u32 filter_id, struct efx_filter_spec *);
1120 void (*filter_clear_rx)(struct efx_nic *efx,
1121 enum efx_filter_priority priority);
1122 u32 (*filter_count_rx_used)(struct efx_nic *efx,
1123 enum efx_filter_priority priority);
1124 u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
1125 s32 (*filter_get_rx_ids)(struct efx_nic *efx,
1126 enum efx_filter_priority priority,
1127 u32 *buf, u32 size);
1128#ifdef CONFIG_RFS_ACCEL
1129 s32 (*filter_rfs_insert)(struct efx_nic *efx,
1130 struct efx_filter_spec *spec);
1131 bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
1132 unsigned int index);
1133#endif
1134#ifdef CONFIG_SFC_MTD
1135 int (*mtd_probe)(struct efx_nic *efx);
1136 void (*mtd_rename)(struct efx_mtd_partition *part);
1137 int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len,
1138 size_t *retlen, u8 *buffer);
1139 int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len);
1140 int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len,
1141 size_t *retlen, const u8 *buffer);
1142 int (*mtd_sync)(struct mtd_info *mtd);
1143#endif
1144 void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
1005 1145
1006 int revision; 1146 int revision;
1007 unsigned int mem_map_size;
1008 unsigned int txd_ptr_tbl_base; 1147 unsigned int txd_ptr_tbl_base;
1009 unsigned int rxd_ptr_tbl_base; 1148 unsigned int rxd_ptr_tbl_base;
1010 unsigned int buf_tbl_base; 1149 unsigned int buf_tbl_base;
1011 unsigned int evq_ptr_tbl_base; 1150 unsigned int evq_ptr_tbl_base;
1012 unsigned int evq_rptr_tbl_base; 1151 unsigned int evq_rptr_tbl_base;
1013 u64 max_dma_mask; 1152 u64 max_dma_mask;
1014 unsigned int rx_buffer_hash_size; 1153 unsigned int rx_prefix_size;
1154 unsigned int rx_hash_offset;
1015 unsigned int rx_buffer_padding; 1155 unsigned int rx_buffer_padding;
1016 bool can_rx_scatter; 1156 bool can_rx_scatter;
1157 bool always_rx_scatter;
1017 unsigned int max_interrupt_mode; 1158 unsigned int max_interrupt_mode;
1018 unsigned int phys_addr_channels;
1019 unsigned int timer_period_max; 1159 unsigned int timer_period_max;
1020 netdev_features_t offload_features; 1160 netdev_features_t offload_features;
1161 int mcdi_max_ver;
1162 unsigned int max_rx_ip_filters;
1021}; 1163};
1022 1164
1023/************************************************************************** 1165/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 56ed3bc71e00..e7dbd2dd202e 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2011 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -19,295 +19,22 @@
19#include "bitfield.h" 19#include "bitfield.h"
20#include "efx.h" 20#include "efx.h"
21#include "nic.h" 21#include "nic.h"
22#include "regs.h" 22#include "farch_regs.h"
23#include "io.h" 23#include "io.h"
24#include "workarounds.h" 24#include "workarounds.h"
25 25
26/************************************************************************** 26/**************************************************************************
27 * 27 *
28 * Configurable values
29 *
30 **************************************************************************
31 */
32
33/* This is set to 16 for a good reason. In summary, if larger than
34 * 16, the descriptor cache holds more than a default socket
35 * buffer's worth of packets (for UDP we can only have at most one
36 * socket buffer's worth outstanding). This combined with the fact
37 * that we only get 1 TX event per descriptor cache means the NIC
38 * goes idle.
39 */
40#define TX_DC_ENTRIES 16
41#define TX_DC_ENTRIES_ORDER 1
42
43#define RX_DC_ENTRIES 64
44#define RX_DC_ENTRIES_ORDER 3
45
46/* If EFX_MAX_INT_ERRORS internal errors occur within
47 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
48 * disable it.
49 */
50#define EFX_INT_ERROR_EXPIRE 3600
51#define EFX_MAX_INT_ERRORS 5
52
53/* Depth of RX flush request fifo */
54#define EFX_RX_FLUSH_COUNT 4
55
56/* Driver generated events */
57#define _EFX_CHANNEL_MAGIC_TEST 0x000101
58#define _EFX_CHANNEL_MAGIC_FILL 0x000102
59#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
60#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
61
62#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
63#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
64
65#define EFX_CHANNEL_MAGIC_TEST(_channel) \
66 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
67#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
68 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
69 efx_rx_queue_index(_rx_queue))
70#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
71 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
72 efx_rx_queue_index(_rx_queue))
73#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
74 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
75 (_tx_queue)->queue)
76
77static void efx_magic_event(struct efx_channel *channel, u32 magic);
78
79/**************************************************************************
80 *
81 * Solarstorm hardware access
82 *
83 **************************************************************************/
84
85static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
86 unsigned int index)
87{
88 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
89 value, index);
90}
91
92/* Read the current event from the event queue */
93static inline efx_qword_t *efx_event(struct efx_channel *channel,
94 unsigned int index)
95{
96 return ((efx_qword_t *) (channel->eventq.addr)) +
97 (index & channel->eventq_mask);
98}
99
100/* See if an event is present
101 *
102 * We check both the high and low dword of the event for all ones. We
103 * wrote all ones when we cleared the event, and no valid event can
104 * have all ones in either its high or low dwords. This approach is
105 * robust against reordering.
106 *
107 * Note that using a single 64-bit comparison is incorrect; even
108 * though the CPU read will be atomic, the DMA write may not be.
109 */
110static inline int efx_event_present(efx_qword_t *event)
111{
112 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
113 EFX_DWORD_IS_ALL_ONES(event->dword[1]));
114}
115
116static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
117 const efx_oword_t *mask)
118{
119 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
120 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
121}
122
123int efx_nic_test_registers(struct efx_nic *efx,
124 const struct efx_nic_register_test *regs,
125 size_t n_regs)
126{
127 unsigned address = 0, i, j;
128 efx_oword_t mask, imask, original, reg, buf;
129
130 for (i = 0; i < n_regs; ++i) {
131 address = regs[i].address;
132 mask = imask = regs[i].mask;
133 EFX_INVERT_OWORD(imask);
134
135 efx_reado(efx, &original, address);
136
137 /* bit sweep on and off */
138 for (j = 0; j < 128; j++) {
139 if (!EFX_EXTRACT_OWORD32(mask, j, j))
140 continue;
141
142 /* Test this testable bit can be set in isolation */
143 EFX_AND_OWORD(reg, original, mask);
144 EFX_SET_OWORD32(reg, j, j, 1);
145
146 efx_writeo(efx, &reg, address);
147 efx_reado(efx, &buf, address);
148
149 if (efx_masked_compare_oword(&reg, &buf, &mask))
150 goto fail;
151
152 /* Test this testable bit can be cleared in isolation */
153 EFX_OR_OWORD(reg, original, mask);
154 EFX_SET_OWORD32(reg, j, j, 0);
155
156 efx_writeo(efx, &reg, address);
157 efx_reado(efx, &buf, address);
158
159 if (efx_masked_compare_oword(&reg, &buf, &mask))
160 goto fail;
161 }
162
163 efx_writeo(efx, &original, address);
164 }
165
166 return 0;
167
168fail:
169 netif_err(efx, hw, efx->net_dev,
170 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
171 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
172 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
173 return -EIO;
174}
175
176/**************************************************************************
177 *
178 * Special buffer handling
179 * Special buffers are used for event queues and the TX and RX
180 * descriptor rings.
181 *
182 *************************************************************************/
183
184/*
185 * Initialise a special buffer
186 *
187 * This will define a buffer (previously allocated via
188 * efx_alloc_special_buffer()) in the buffer table, allowing
189 * it to be used for event queues, descriptor rings etc.
190 */
191static void
192efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
193{
194 efx_qword_t buf_desc;
195 unsigned int index;
196 dma_addr_t dma_addr;
197 int i;
198
199 EFX_BUG_ON_PARANOID(!buffer->addr);
200
201 /* Write buffer descriptors to NIC */
202 for (i = 0; i < buffer->entries; i++) {
203 index = buffer->index + i;
204 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE);
205 netif_dbg(efx, probe, efx->net_dev,
206 "mapping special buffer %d at %llx\n",
207 index, (unsigned long long)dma_addr);
208 EFX_POPULATE_QWORD_3(buf_desc,
209 FRF_AZ_BUF_ADR_REGION, 0,
210 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
211 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
212 efx_write_buf_tbl(efx, &buf_desc, index);
213 }
214}
215
216/* Unmaps a buffer and clears the buffer table entries */
217static void
218efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
219{
220 efx_oword_t buf_tbl_upd;
221 unsigned int start = buffer->index;
222 unsigned int end = (buffer->index + buffer->entries - 1);
223
224 if (!buffer->entries)
225 return;
226
227 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
228 buffer->index, buffer->index + buffer->entries - 1);
229
230 EFX_POPULATE_OWORD_4(buf_tbl_upd,
231 FRF_AZ_BUF_UPD_CMD, 0,
232 FRF_AZ_BUF_CLR_CMD, 1,
233 FRF_AZ_BUF_CLR_END_ID, end,
234 FRF_AZ_BUF_CLR_START_ID, start);
235 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
236}
237
238/*
239 * Allocate a new special buffer
240 *
241 * This allocates memory for a new buffer, clears it and allocates a
242 * new buffer ID range. It does not write into the buffer table.
243 *
244 * This call will allocate 4KB buffers, since 8KB buffers can't be
245 * used for event queues and descriptor rings.
246 */
247static int efx_alloc_special_buffer(struct efx_nic *efx,
248 struct efx_special_buffer *buffer,
249 unsigned int len)
250{
251 len = ALIGN(len, EFX_BUF_SIZE);
252
253 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
254 &buffer->dma_addr, GFP_KERNEL);
255 if (!buffer->addr)
256 return -ENOMEM;
257 buffer->len = len;
258 buffer->entries = len / EFX_BUF_SIZE;
259 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
260
261 /* Select new buffer ID */
262 buffer->index = efx->next_buffer_table;
263 efx->next_buffer_table += buffer->entries;
264#ifdef CONFIG_SFC_SRIOV
265 BUG_ON(efx_sriov_enabled(efx) &&
266 efx->vf_buftbl_base < efx->next_buffer_table);
267#endif
268
269 netif_dbg(efx, probe, efx->net_dev,
270 "allocating special buffers %d-%d at %llx+%x "
271 "(virt %p phys %llx)\n", buffer->index,
272 buffer->index + buffer->entries - 1,
273 (u64)buffer->dma_addr, len,
274 buffer->addr, (u64)virt_to_phys(buffer->addr));
275
276 return 0;
277}
278
279static void
280efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
281{
282 if (!buffer->addr)
283 return;
284
285 netif_dbg(efx, hw, efx->net_dev,
286 "deallocating special buffers %d-%d at %llx+%x "
287 "(virt %p phys %llx)\n", buffer->index,
288 buffer->index + buffer->entries - 1,
289 (u64)buffer->dma_addr, buffer->len,
290 buffer->addr, (u64)virt_to_phys(buffer->addr));
291
292 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
293 buffer->dma_addr);
294 buffer->addr = NULL;
295 buffer->entries = 0;
296}
297
298/**************************************************************************
299 *
300 * Generic buffer handling 28 * Generic buffer handling
301 * These buffers are used for interrupt status, MAC stats, etc. 29 * These buffers are used for interrupt status, MAC stats, etc.
302 * 30 *
303 **************************************************************************/ 31 **************************************************************************/
304 32
305int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 33int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
306 unsigned int len) 34 unsigned int len, gfp_t gfp_flags)
307{ 35{
308 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 36 buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len,
309 &buffer->dma_addr, 37 &buffer->dma_addr, gfp_flags);
310 GFP_ATOMIC | __GFP_ZERO);
311 if (!buffer->addr) 38 if (!buffer->addr)
312 return -ENOMEM; 39 return -ENOMEM;
313 buffer->len = len; 40 buffer->len = len;
@@ -323,1057 +50,6 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
323 } 50 }
324} 51}
325 52
326/**************************************************************************
327 *
328 * TX path
329 *
330 **************************************************************************/
331
332/* Returns a pointer to the specified transmit descriptor in the TX
333 * descriptor queue belonging to the specified channel.
334 */
335static inline efx_qword_t *
336efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
337{
338 return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
339}
340
341/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
342static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
343{
344 unsigned write_ptr;
345 efx_dword_t reg;
346
347 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
348 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
349 efx_writed_page(tx_queue->efx, &reg,
350 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
351}
352
353/* Write pointer and first descriptor for TX descriptor ring */
354static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
355 const efx_qword_t *txd)
356{
357 unsigned write_ptr;
358 efx_oword_t reg;
359
360 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
361 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
362
363 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
364 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
365 FRF_AZ_TX_DESC_WPTR, write_ptr);
366 reg.qword[0] = *txd;
367 efx_writeo_page(tx_queue->efx, &reg,
368 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
369}
370
371static inline bool
372efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
373{
374 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
375
376 if (empty_read_count == 0)
377 return false;
378
379 tx_queue->empty_read_count = 0;
380 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
381 && tx_queue->write_count - write_count == 1;
382}
383
384/* For each entry inserted into the software descriptor ring, create a
385 * descriptor in the hardware TX descriptor ring (in host memory), and
386 * write a doorbell.
387 */
388void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
389{
390
391 struct efx_tx_buffer *buffer;
392 efx_qword_t *txd;
393 unsigned write_ptr;
394 unsigned old_write_count = tx_queue->write_count;
395
396 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
397
398 do {
399 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
400 buffer = &tx_queue->buffer[write_ptr];
401 txd = efx_tx_desc(tx_queue, write_ptr);
402 ++tx_queue->write_count;
403
404 /* Create TX descriptor ring entry */
405 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
406 EFX_POPULATE_QWORD_4(*txd,
407 FSF_AZ_TX_KER_CONT,
408 buffer->flags & EFX_TX_BUF_CONT,
409 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
410 FSF_AZ_TX_KER_BUF_REGION, 0,
411 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
412 } while (tx_queue->write_count != tx_queue->insert_count);
413
414 wmb(); /* Ensure descriptors are written before they are fetched */
415
416 if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
417 txd = efx_tx_desc(tx_queue,
418 old_write_count & tx_queue->ptr_mask);
419 efx_push_tx_desc(tx_queue, txd);
420 ++tx_queue->pushes;
421 } else {
422 efx_notify_tx_desc(tx_queue);
423 }
424}
425
426/* Allocate hardware resources for a TX queue */
427int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
428{
429 struct efx_nic *efx = tx_queue->efx;
430 unsigned entries;
431
432 entries = tx_queue->ptr_mask + 1;
433 return efx_alloc_special_buffer(efx, &tx_queue->txd,
434 entries * sizeof(efx_qword_t));
435}
436
437void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
438{
439 struct efx_nic *efx = tx_queue->efx;
440 efx_oword_t reg;
441
442 /* Pin TX descriptor ring */
443 efx_init_special_buffer(efx, &tx_queue->txd);
444
445 /* Push TX descriptor ring to card */
446 EFX_POPULATE_OWORD_10(reg,
447 FRF_AZ_TX_DESCQ_EN, 1,
448 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
449 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
450 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
451 FRF_AZ_TX_DESCQ_EVQ_ID,
452 tx_queue->channel->channel,
453 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
454 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
455 FRF_AZ_TX_DESCQ_SIZE,
456 __ffs(tx_queue->txd.entries),
457 FRF_AZ_TX_DESCQ_TYPE, 0,
458 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
459
460 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
461 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
462 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
463 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
464 !csum);
465 }
466
467 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
468 tx_queue->queue);
469
470 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
471 /* Only 128 bits in this register */
472 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
473
474 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
475 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
476 __clear_bit_le(tx_queue->queue, &reg);
477 else
478 __set_bit_le(tx_queue->queue, &reg);
479 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
480 }
481
482 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
483 EFX_POPULATE_OWORD_1(reg,
484 FRF_BZ_TX_PACE,
485 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
486 FFE_BZ_TX_PACE_OFF :
487 FFE_BZ_TX_PACE_RESERVED);
488 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
489 tx_queue->queue);
490 }
491}
492
493static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
494{
495 struct efx_nic *efx = tx_queue->efx;
496 efx_oword_t tx_flush_descq;
497
498 WARN_ON(atomic_read(&tx_queue->flush_outstanding));
499 atomic_set(&tx_queue->flush_outstanding, 1);
500
501 EFX_POPULATE_OWORD_2(tx_flush_descq,
502 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
503 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
504 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
505}
506
507void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
508{
509 struct efx_nic *efx = tx_queue->efx;
510 efx_oword_t tx_desc_ptr;
511
512 /* Remove TX descriptor ring from card */
513 EFX_ZERO_OWORD(tx_desc_ptr);
514 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
515 tx_queue->queue);
516
517 /* Unpin TX descriptor ring */
518 efx_fini_special_buffer(efx, &tx_queue->txd);
519}
520
521/* Free buffers backing TX queue */
522void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
523{
524 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
525}
526
527/**************************************************************************
528 *
529 * RX path
530 *
531 **************************************************************************/
532
533/* Returns a pointer to the specified descriptor in the RX descriptor queue */
534static inline efx_qword_t *
535efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
536{
537 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
538}
539
540/* This creates an entry in the RX descriptor queue */
541static inline void
542efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
543{
544 struct efx_rx_buffer *rx_buf;
545 efx_qword_t *rxd;
546
547 rxd = efx_rx_desc(rx_queue, index);
548 rx_buf = efx_rx_buffer(rx_queue, index);
549 EFX_POPULATE_QWORD_3(*rxd,
550 FSF_AZ_RX_KER_BUF_SIZE,
551 rx_buf->len -
552 rx_queue->efx->type->rx_buffer_padding,
553 FSF_AZ_RX_KER_BUF_REGION, 0,
554 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
555}
556
557/* This writes to the RX_DESC_WPTR register for the specified receive
558 * descriptor ring.
559 */
560void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
561{
562 struct efx_nic *efx = rx_queue->efx;
563 efx_dword_t reg;
564 unsigned write_ptr;
565
566 while (rx_queue->notified_count != rx_queue->added_count) {
567 efx_build_rx_desc(
568 rx_queue,
569 rx_queue->notified_count & rx_queue->ptr_mask);
570 ++rx_queue->notified_count;
571 }
572
573 wmb();
574 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
575 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
576 efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
577 efx_rx_queue_index(rx_queue));
578}
579
580int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
581{
582 struct efx_nic *efx = rx_queue->efx;
583 unsigned entries;
584
585 entries = rx_queue->ptr_mask + 1;
586 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
587 entries * sizeof(efx_qword_t));
588}
589
590void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
591{
592 efx_oword_t rx_desc_ptr;
593 struct efx_nic *efx = rx_queue->efx;
594 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
595 bool iscsi_digest_en = is_b0;
596 bool jumbo_en;
597
598 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
599 * DMA to continue after a PCIe page boundary (and scattering
600 * is not possible). In Falcon B0 and Siena, it enables
601 * scatter.
602 */
603 jumbo_en = !is_b0 || efx->rx_scatter;
604
605 netif_dbg(efx, hw, efx->net_dev,
606 "RX queue %d ring in special buffers %d-%d\n",
607 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
608 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
609
610 rx_queue->scatter_n = 0;
611
612 /* Pin RX descriptor ring */
613 efx_init_special_buffer(efx, &rx_queue->rxd);
614
615 /* Push RX descriptor ring to card */
616 EFX_POPULATE_OWORD_10(rx_desc_ptr,
617 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
618 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
619 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
620 FRF_AZ_RX_DESCQ_EVQ_ID,
621 efx_rx_queue_channel(rx_queue)->channel,
622 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
623 FRF_AZ_RX_DESCQ_LABEL,
624 efx_rx_queue_index(rx_queue),
625 FRF_AZ_RX_DESCQ_SIZE,
626 __ffs(rx_queue->rxd.entries),
627 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
628 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
629 FRF_AZ_RX_DESCQ_EN, 1);
630 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
631 efx_rx_queue_index(rx_queue));
632}
633
634static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
635{
636 struct efx_nic *efx = rx_queue->efx;
637 efx_oword_t rx_flush_descq;
638
639 EFX_POPULATE_OWORD_2(rx_flush_descq,
640 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
641 FRF_AZ_RX_FLUSH_DESCQ,
642 efx_rx_queue_index(rx_queue));
643 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
644}
645
646void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
647{
648 efx_oword_t rx_desc_ptr;
649 struct efx_nic *efx = rx_queue->efx;
650
651 /* Remove RX descriptor ring from card */
652 EFX_ZERO_OWORD(rx_desc_ptr);
653 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
654 efx_rx_queue_index(rx_queue));
655
656 /* Unpin RX descriptor ring */
657 efx_fini_special_buffer(efx, &rx_queue->rxd);
658}
659
660/* Free buffers backing RX queue */
661void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
662{
663 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
664}
665
666/**************************************************************************
667 *
668 * Flush handling
669 *
670 **************************************************************************/
671
672/* efx_nic_flush_queues() must be woken up when all flushes are completed,
673 * or more RX flushes can be kicked off.
674 */
675static bool efx_flush_wake(struct efx_nic *efx)
676{
677 /* Ensure that all updates are visible to efx_nic_flush_queues() */
678 smp_mb();
679
680 return (atomic_read(&efx->drain_pending) == 0 ||
681 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
682 && atomic_read(&efx->rxq_flush_pending) > 0));
683}
684
685static bool efx_check_tx_flush_complete(struct efx_nic *efx)
686{
687 bool i = true;
688 efx_oword_t txd_ptr_tbl;
689 struct efx_channel *channel;
690 struct efx_tx_queue *tx_queue;
691
692 efx_for_each_channel(channel, efx) {
693 efx_for_each_channel_tx_queue(tx_queue, channel) {
694 efx_reado_table(efx, &txd_ptr_tbl,
695 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
696 if (EFX_OWORD_FIELD(txd_ptr_tbl,
697 FRF_AZ_TX_DESCQ_FLUSH) ||
698 EFX_OWORD_FIELD(txd_ptr_tbl,
699 FRF_AZ_TX_DESCQ_EN)) {
700 netif_dbg(efx, hw, efx->net_dev,
701 "flush did not complete on TXQ %d\n",
702 tx_queue->queue);
703 i = false;
704 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
705 1, 0)) {
706 /* The flush is complete, but we didn't
707 * receive a flush completion event
708 */
709 netif_dbg(efx, hw, efx->net_dev,
710 "flush complete on TXQ %d, so drain "
711 "the queue\n", tx_queue->queue);
712 /* Don't need to increment drain_pending as it
713 * has already been incremented for the queues
714 * which did not drain
715 */
716 efx_magic_event(channel,
717 EFX_CHANNEL_MAGIC_TX_DRAIN(
718 tx_queue));
719 }
720 }
721 }
722
723 return i;
724}
725
726/* Flush all the transmit queues, and continue flushing receive queues until
727 * they're all flushed. Wait for the DRAIN events to be recieved so that there
728 * are no more RX and TX events left on any channel. */
729int efx_nic_flush_queues(struct efx_nic *efx)
730{
731 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
732 struct efx_channel *channel;
733 struct efx_rx_queue *rx_queue;
734 struct efx_tx_queue *tx_queue;
735 int rc = 0;
736
737 efx->type->prepare_flush(efx);
738
739 efx_for_each_channel(channel, efx) {
740 efx_for_each_channel_tx_queue(tx_queue, channel) {
741 atomic_inc(&efx->drain_pending);
742 efx_flush_tx_queue(tx_queue);
743 }
744 efx_for_each_channel_rx_queue(rx_queue, channel) {
745 atomic_inc(&efx->drain_pending);
746 rx_queue->flush_pending = true;
747 atomic_inc(&efx->rxq_flush_pending);
748 }
749 }
750
751 while (timeout && atomic_read(&efx->drain_pending) > 0) {
752 /* If SRIOV is enabled, then offload receive queue flushing to
753 * the firmware (though we will still have to poll for
754 * completion). If that fails, fall back to the old scheme.
755 */
756 if (efx_sriov_enabled(efx)) {
757 rc = efx_mcdi_flush_rxqs(efx);
758 if (!rc)
759 goto wait;
760 }
761
762 /* The hardware supports four concurrent rx flushes, each of
763 * which may need to be retried if there is an outstanding
764 * descriptor fetch
765 */
766 efx_for_each_channel(channel, efx) {
767 efx_for_each_channel_rx_queue(rx_queue, channel) {
768 if (atomic_read(&efx->rxq_flush_outstanding) >=
769 EFX_RX_FLUSH_COUNT)
770 break;
771
772 if (rx_queue->flush_pending) {
773 rx_queue->flush_pending = false;
774 atomic_dec(&efx->rxq_flush_pending);
775 atomic_inc(&efx->rxq_flush_outstanding);
776 efx_flush_rx_queue(rx_queue);
777 }
778 }
779 }
780
781 wait:
782 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx),
783 timeout);
784 }
785
786 if (atomic_read(&efx->drain_pending) &&
787 !efx_check_tx_flush_complete(efx)) {
788 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
789 "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
790 atomic_read(&efx->rxq_flush_outstanding),
791 atomic_read(&efx->rxq_flush_pending));
792 rc = -ETIMEDOUT;
793
794 atomic_set(&efx->drain_pending, 0);
795 atomic_set(&efx->rxq_flush_pending, 0);
796 atomic_set(&efx->rxq_flush_outstanding, 0);
797 }
798
799 efx->type->finish_flush(efx);
800
801 return rc;
802}
803
804/**************************************************************************
805 *
806 * Event queue processing
807 * Event queues are processed by per-channel tasklets.
808 *
809 **************************************************************************/
810
811/* Update a channel's event queue's read pointer (RPTR) register
812 *
813 * This writes the EVQ_RPTR_REG register for the specified channel's
814 * event queue.
815 */
816void efx_nic_eventq_read_ack(struct efx_channel *channel)
817{
818 efx_dword_t reg;
819 struct efx_nic *efx = channel->efx;
820
821 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
822 channel->eventq_read_ptr & channel->eventq_mask);
823
824 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
825 * of 4 bytes, but it is really 16 bytes just like later revisions.
826 */
827 efx_writed(efx, &reg,
828 efx->type->evq_rptr_tbl_base +
829 FR_BZ_EVQ_RPTR_STEP * channel->channel);
830}
831
832/* Use HW to insert a SW defined event */
833void efx_generate_event(struct efx_nic *efx, unsigned int evq,
834 efx_qword_t *event)
835{
836 efx_oword_t drv_ev_reg;
837
838 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
839 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
840 drv_ev_reg.u32[0] = event->u32[0];
841 drv_ev_reg.u32[1] = event->u32[1];
842 drv_ev_reg.u32[2] = 0;
843 drv_ev_reg.u32[3] = 0;
844 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
845 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
846}
847
848static void efx_magic_event(struct efx_channel *channel, u32 magic)
849{
850 efx_qword_t event;
851
852 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
853 FSE_AZ_EV_CODE_DRV_GEN_EV,
854 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
855 efx_generate_event(channel->efx, channel->channel, &event);
856}
857
858/* Handle a transmit completion event
859 *
860 * The NIC batches TX completion events; the message we receive is of
861 * the form "complete all TX events up to this index".
862 */
863static int
864efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
865{
866 unsigned int tx_ev_desc_ptr;
867 unsigned int tx_ev_q_label;
868 struct efx_tx_queue *tx_queue;
869 struct efx_nic *efx = channel->efx;
870 int tx_packets = 0;
871
872 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
873 return 0;
874
875 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
876 /* Transmit completion */
877 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
878 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
879 tx_queue = efx_channel_get_tx_queue(
880 channel, tx_ev_q_label % EFX_TXQ_TYPES);
881 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
882 tx_queue->ptr_mask);
883 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
884 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
885 /* Rewrite the FIFO write pointer */
886 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
887 tx_queue = efx_channel_get_tx_queue(
888 channel, tx_ev_q_label % EFX_TXQ_TYPES);
889
890 netif_tx_lock(efx->net_dev);
891 efx_notify_tx_desc(tx_queue);
892 netif_tx_unlock(efx->net_dev);
893 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
894 EFX_WORKAROUND_10727(efx)) {
895 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
896 } else {
897 netif_err(efx, tx_err, efx->net_dev,
898 "channel %d unexpected TX event "
899 EFX_QWORD_FMT"\n", channel->channel,
900 EFX_QWORD_VAL(*event));
901 }
902
903 return tx_packets;
904}
905
906/* Detect errors included in the rx_evt_pkt_ok bit. */
907static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
908 const efx_qword_t *event)
909{
910 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
911 struct efx_nic *efx = rx_queue->efx;
912 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
913 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
914 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
915 bool rx_ev_other_err, rx_ev_pause_frm;
916 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
917 unsigned rx_ev_pkt_type;
918
919 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
920 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
921 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
922 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
923 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
924 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
925 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
926 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
927 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
928 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
929 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
930 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
931 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
932 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
933 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
934
935 /* Every error apart from tobe_disc and pause_frm */
936 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
937 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
938 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
939
940 /* Count errors that are not in MAC stats. Ignore expected
941 * checksum errors during self-test. */
942 if (rx_ev_frm_trunc)
943 ++channel->n_rx_frm_trunc;
944 else if (rx_ev_tobe_disc)
945 ++channel->n_rx_tobe_disc;
946 else if (!efx->loopback_selftest) {
947 if (rx_ev_ip_hdr_chksum_err)
948 ++channel->n_rx_ip_hdr_chksum_err;
949 else if (rx_ev_tcp_udp_chksum_err)
950 ++channel->n_rx_tcp_udp_chksum_err;
951 }
952
953 /* TOBE_DISC is expected on unicast mismatches; don't print out an
954 * error message. FRM_TRUNC indicates RXDP dropped the packet due
955 * to a FIFO overflow.
956 */
957#ifdef DEBUG
958 if (rx_ev_other_err && net_ratelimit()) {
959 netif_dbg(efx, rx_err, efx->net_dev,
960 " RX queue %d unexpected RX event "
961 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
962 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
963 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
964 rx_ev_ip_hdr_chksum_err ?
965 " [IP_HDR_CHKSUM_ERR]" : "",
966 rx_ev_tcp_udp_chksum_err ?
967 " [TCP_UDP_CHKSUM_ERR]" : "",
968 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
969 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
970 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
971 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
972 rx_ev_pause_frm ? " [PAUSE]" : "");
973 }
974#endif
975
976 /* The frame must be discarded if any of these are true. */
977 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
978 rx_ev_tobe_disc | rx_ev_pause_frm) ?
979 EFX_RX_PKT_DISCARD : 0;
980}
981
982/* Handle receive events that are not in-order. Return true if this
983 * can be handled as a partial packet discard, false if it's more
984 * serious.
985 */
986static bool
987efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
988{
989 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
990 struct efx_nic *efx = rx_queue->efx;
991 unsigned expected, dropped;
992
993 if (rx_queue->scatter_n &&
994 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
995 rx_queue->ptr_mask)) {
996 ++channel->n_rx_nodesc_trunc;
997 return true;
998 }
999
1000 expected = rx_queue->removed_count & rx_queue->ptr_mask;
1001 dropped = (index - expected) & rx_queue->ptr_mask;
1002 netif_info(efx, rx_err, efx->net_dev,
1003 "dropped %d events (index=%d expected=%d)\n",
1004 dropped, index, expected);
1005
1006 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
1007 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1008 return false;
1009}
1010
1011/* Handle a packet received event
1012 *
1013 * The NIC gives a "discard" flag if it's a unicast packet with the
1014 * wrong destination address
1015 * Also "is multicast" and "matches multicast filter" flags can be used to
1016 * discard non-matching multicast packets.
1017 */
1018static void
1019efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
1020{
1021 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
1022 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
1023 unsigned expected_ptr;
1024 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
1025 u16 flags;
1026 struct efx_rx_queue *rx_queue;
1027 struct efx_nic *efx = channel->efx;
1028
1029 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1030 return;
1031
1032 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
1033 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
1034 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
1035 channel->channel);
1036
1037 rx_queue = efx_channel_get_rx_queue(channel);
1038
1039 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
1040 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
1041 rx_queue->ptr_mask);
1042
1043 /* Check for partial drops and other errors */
1044 if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
1045 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
1046 if (rx_ev_desc_ptr != expected_ptr &&
1047 !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
1048 return;
1049
1050 /* Discard all pending fragments */
1051 if (rx_queue->scatter_n) {
1052 efx_rx_packet(
1053 rx_queue,
1054 rx_queue->removed_count & rx_queue->ptr_mask,
1055 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
1056 rx_queue->removed_count += rx_queue->scatter_n;
1057 rx_queue->scatter_n = 0;
1058 }
1059
1060 /* Return if there is no new fragment */
1061 if (rx_ev_desc_ptr != expected_ptr)
1062 return;
1063
1064 /* Discard new fragment if not SOP */
1065 if (!rx_ev_sop) {
1066 efx_rx_packet(
1067 rx_queue,
1068 rx_queue->removed_count & rx_queue->ptr_mask,
1069 1, 0, EFX_RX_PKT_DISCARD);
1070 ++rx_queue->removed_count;
1071 return;
1072 }
1073 }
1074
1075 ++rx_queue->scatter_n;
1076 if (rx_ev_cont)
1077 return;
1078
1079 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1080 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1081 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1082
1083 if (likely(rx_ev_pkt_ok)) {
1084 /* If packet is marked as OK then we can rely on the
1085 * hardware checksum and classification.
1086 */
1087 flags = 0;
1088 switch (rx_ev_hdr_type) {
1089 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
1090 flags |= EFX_RX_PKT_TCP;
1091 /* fall through */
1092 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
1093 flags |= EFX_RX_PKT_CSUMMED;
1094 /* fall through */
1095 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
1096 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
1097 break;
1098 }
1099 } else {
1100 flags = efx_handle_rx_not_ok(rx_queue, event);
1101 }
1102
1103 /* Detect multicast packets that didn't match the filter */
1104 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1105 if (rx_ev_mcast_pkt) {
1106 unsigned int rx_ev_mcast_hash_match =
1107 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1108
1109 if (unlikely(!rx_ev_mcast_hash_match)) {
1110 ++channel->n_rx_mcast_mismatch;
1111 flags |= EFX_RX_PKT_DISCARD;
1112 }
1113 }
1114
1115 channel->irq_mod_score += 2;
1116
1117 /* Handle received packet */
1118 efx_rx_packet(rx_queue,
1119 rx_queue->removed_count & rx_queue->ptr_mask,
1120 rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1121 rx_queue->removed_count += rx_queue->scatter_n;
1122 rx_queue->scatter_n = 0;
1123}
1124
1125/* If this flush done event corresponds to a &struct efx_tx_queue, then
1126 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1127 * of all transmit completions.
1128 */
1129static void
1130efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1131{
1132 struct efx_tx_queue *tx_queue;
1133 int qid;
1134
1135 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1136 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1137 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
1138 qid % EFX_TXQ_TYPES);
1139 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1140 efx_magic_event(tx_queue->channel,
1141 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1142 }
1143 }
1144}
1145
1146/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1147 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1148 * the RX queue back to the mask of RX queues in need of flushing.
1149 */
1150static void
1151efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1152{
1153 struct efx_channel *channel;
1154 struct efx_rx_queue *rx_queue;
1155 int qid;
1156 bool failed;
1157
1158 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1159 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1160 if (qid >= efx->n_channels)
1161 return;
1162 channel = efx_get_channel(efx, qid);
1163 if (!efx_channel_has_rx_queue(channel))
1164 return;
1165 rx_queue = efx_channel_get_rx_queue(channel);
1166
1167 if (failed) {
1168 netif_info(efx, hw, efx->net_dev,
1169 "RXQ %d flush retry\n", qid);
1170 rx_queue->flush_pending = true;
1171 atomic_inc(&efx->rxq_flush_pending);
1172 } else {
1173 efx_magic_event(efx_rx_queue_channel(rx_queue),
1174 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1175 }
1176 atomic_dec(&efx->rxq_flush_outstanding);
1177 if (efx_flush_wake(efx))
1178 wake_up(&efx->flush_wq);
1179}
1180
1181static void
1182efx_handle_drain_event(struct efx_channel *channel)
1183{
1184 struct efx_nic *efx = channel->efx;
1185
1186 WARN_ON(atomic_read(&efx->drain_pending) == 0);
1187 atomic_dec(&efx->drain_pending);
1188 if (efx_flush_wake(efx))
1189 wake_up(&efx->flush_wq);
1190}
1191
1192static void
1193efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
1194{
1195 struct efx_nic *efx = channel->efx;
1196 struct efx_rx_queue *rx_queue =
1197 efx_channel_has_rx_queue(channel) ?
1198 efx_channel_get_rx_queue(channel) : NULL;
1199 unsigned magic, code;
1200
1201 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1202 code = _EFX_CHANNEL_MAGIC_CODE(magic);
1203
1204 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
1205 channel->event_test_cpu = raw_smp_processor_id();
1206 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
1207 /* The queue must be empty, so we won't receive any rx
1208 * events, so efx_process_channel() won't refill the
1209 * queue. Refill it here */
1210 efx_fast_push_rx_descriptors(rx_queue);
1211 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1212 rx_queue->enabled = false;
1213 efx_handle_drain_event(channel);
1214 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1215 efx_handle_drain_event(channel);
1216 } else {
1217 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1218 "generated event "EFX_QWORD_FMT"\n",
1219 channel->channel, EFX_QWORD_VAL(*event));
1220 }
1221}
1222
1223static void
1224efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1225{
1226 struct efx_nic *efx = channel->efx;
1227 unsigned int ev_sub_code;
1228 unsigned int ev_sub_data;
1229
1230 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1231 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1232
1233 switch (ev_sub_code) {
1234 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1235 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1236 channel->channel, ev_sub_data);
1237 efx_handle_tx_flush_done(efx, event);
1238 efx_sriov_tx_flush_done(efx, event);
1239 break;
1240 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1241 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1242 channel->channel, ev_sub_data);
1243 efx_handle_rx_flush_done(efx, event);
1244 efx_sriov_rx_flush_done(efx, event);
1245 break;
1246 case FSE_AZ_EVQ_INIT_DONE_EV:
1247 netif_dbg(efx, hw, efx->net_dev,
1248 "channel %d EVQ %d initialised\n",
1249 channel->channel, ev_sub_data);
1250 break;
1251 case FSE_AZ_SRM_UPD_DONE_EV:
1252 netif_vdbg(efx, hw, efx->net_dev,
1253 "channel %d SRAM update done\n", channel->channel);
1254 break;
1255 case FSE_AZ_WAKE_UP_EV:
1256 netif_vdbg(efx, hw, efx->net_dev,
1257 "channel %d RXQ %d wakeup event\n",
1258 channel->channel, ev_sub_data);
1259 break;
1260 case FSE_AZ_TIMER_EV:
1261 netif_vdbg(efx, hw, efx->net_dev,
1262 "channel %d RX queue %d timer expired\n",
1263 channel->channel, ev_sub_data);
1264 break;
1265 case FSE_AA_RX_RECOVER_EV:
1266 netif_err(efx, rx_err, efx->net_dev,
1267 "channel %d seen DRIVER RX_RESET event. "
1268 "Resetting.\n", channel->channel);
1269 atomic_inc(&efx->rx_reset);
1270 efx_schedule_reset(efx,
1271 EFX_WORKAROUND_6555(efx) ?
1272 RESET_TYPE_RX_RECOVERY :
1273 RESET_TYPE_DISABLE);
1274 break;
1275 case FSE_BZ_RX_DSC_ERROR_EV:
1276 if (ev_sub_data < EFX_VI_BASE) {
1277 netif_err(efx, rx_err, efx->net_dev,
1278 "RX DMA Q %d reports descriptor fetch error."
1279 " RX Q %d is disabled.\n", ev_sub_data,
1280 ev_sub_data);
1281 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
1282 } else
1283 efx_sriov_desc_fetch_err(efx, ev_sub_data);
1284 break;
1285 case FSE_BZ_TX_DSC_ERROR_EV:
1286 if (ev_sub_data < EFX_VI_BASE) {
1287 netif_err(efx, tx_err, efx->net_dev,
1288 "TX DMA Q %d reports descriptor fetch error."
1289 " TX Q %d is disabled.\n", ev_sub_data,
1290 ev_sub_data);
1291 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
1292 } else
1293 efx_sriov_desc_fetch_err(efx, ev_sub_data);
1294 break;
1295 default:
1296 netif_vdbg(efx, hw, efx->net_dev,
1297 "channel %d unknown driver event code %d "
1298 "data %04x\n", channel->channel, ev_sub_code,
1299 ev_sub_data);
1300 break;
1301 }
1302}
1303
1304int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1305{
1306 struct efx_nic *efx = channel->efx;
1307 unsigned int read_ptr;
1308 efx_qword_t event, *p_event;
1309 int ev_code;
1310 int tx_packets = 0;
1311 int spent = 0;
1312
1313 read_ptr = channel->eventq_read_ptr;
1314
1315 for (;;) {
1316 p_event = efx_event(channel, read_ptr);
1317 event = *p_event;
1318
1319 if (!efx_event_present(&event))
1320 /* End of events */
1321 break;
1322
1323 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1324 "channel %d event is "EFX_QWORD_FMT"\n",
1325 channel->channel, EFX_QWORD_VAL(event));
1326
1327 /* Clear this event by marking it all ones */
1328 EFX_SET_QWORD(*p_event);
1329
1330 ++read_ptr;
1331
1332 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1333
1334 switch (ev_code) {
1335 case FSE_AZ_EV_CODE_RX_EV:
1336 efx_handle_rx_event(channel, &event);
1337 if (++spent == budget)
1338 goto out;
1339 break;
1340 case FSE_AZ_EV_CODE_TX_EV:
1341 tx_packets += efx_handle_tx_event(channel, &event);
1342 if (tx_packets > efx->txq_entries) {
1343 spent = budget;
1344 goto out;
1345 }
1346 break;
1347 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1348 efx_handle_generated_event(channel, &event);
1349 break;
1350 case FSE_AZ_EV_CODE_DRIVER_EV:
1351 efx_handle_driver_event(channel, &event);
1352 break;
1353 case FSE_CZ_EV_CODE_USER_EV:
1354 efx_sriov_event(channel, &event);
1355 break;
1356 case FSE_CZ_EV_CODE_MCDI_EV:
1357 efx_mcdi_process_event(channel, &event);
1358 break;
1359 case FSE_AZ_EV_CODE_GLOBAL_EV:
1360 if (efx->type->handle_global_event &&
1361 efx->type->handle_global_event(channel, &event))
1362 break;
1363 /* else fall through */
1364 default:
1365 netif_err(channel->efx, hw, channel->efx->net_dev,
1366 "channel %d unknown event type %d (data "
1367 EFX_QWORD_FMT ")\n", channel->channel,
1368 ev_code, EFX_QWORD_VAL(event));
1369 }
1370 }
1371
1372out:
1373 channel->eventq_read_ptr = read_ptr;
1374 return spent;
1375}
1376
1377/* Check whether an event is present in the eventq at the current 53/* Check whether an event is present in the eventq at the current
1378 * read pointer. Only useful for self-test. 54 * read pointer. Only useful for self-test.
1379 */ 55 */
@@ -1382,323 +58,18 @@ bool efx_nic_event_present(struct efx_channel *channel)
1382 return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 58 return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
1383} 59}
1384 60
1385/* Allocate buffer table entries for event queue */
1386int efx_nic_probe_eventq(struct efx_channel *channel)
1387{
1388 struct efx_nic *efx = channel->efx;
1389 unsigned entries;
1390
1391 entries = channel->eventq_mask + 1;
1392 return efx_alloc_special_buffer(efx, &channel->eventq,
1393 entries * sizeof(efx_qword_t));
1394}
1395
1396void efx_nic_init_eventq(struct efx_channel *channel)
1397{
1398 efx_oword_t reg;
1399 struct efx_nic *efx = channel->efx;
1400
1401 netif_dbg(efx, hw, efx->net_dev,
1402 "channel %d event queue in special buffers %d-%d\n",
1403 channel->channel, channel->eventq.index,
1404 channel->eventq.index + channel->eventq.entries - 1);
1405
1406 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1407 EFX_POPULATE_OWORD_3(reg,
1408 FRF_CZ_TIMER_Q_EN, 1,
1409 FRF_CZ_HOST_NOTIFY_MODE, 0,
1410 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1411 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1412 }
1413
1414 /* Pin event queue buffer */
1415 efx_init_special_buffer(efx, &channel->eventq);
1416
1417 /* Fill event queue with all ones (i.e. empty events) */
1418 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1419
1420 /* Push event queue to card */
1421 EFX_POPULATE_OWORD_3(reg,
1422 FRF_AZ_EVQ_EN, 1,
1423 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1424 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1425 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1426 channel->channel);
1427
1428 efx->type->push_irq_moderation(channel);
1429}
1430
1431void efx_nic_fini_eventq(struct efx_channel *channel)
1432{
1433 efx_oword_t reg;
1434 struct efx_nic *efx = channel->efx;
1435
1436 /* Remove event queue from card */
1437 EFX_ZERO_OWORD(reg);
1438 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1439 channel->channel);
1440 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1441 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1442
1443 /* Unpin event queue */
1444 efx_fini_special_buffer(efx, &channel->eventq);
1445}
1446
1447/* Free buffers backing event queue */
1448void efx_nic_remove_eventq(struct efx_channel *channel)
1449{
1450 efx_free_special_buffer(channel->efx, &channel->eventq);
1451}
1452
1453
1454void efx_nic_event_test_start(struct efx_channel *channel) 61void efx_nic_event_test_start(struct efx_channel *channel)
1455{ 62{
1456 channel->event_test_cpu = -1; 63 channel->event_test_cpu = -1;
1457 smp_wmb(); 64 smp_wmb();
1458 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); 65 channel->efx->type->ev_test_generate(channel);
1459} 66}
1460 67
1461void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
1462{
1463 efx_magic_event(efx_rx_queue_channel(rx_queue),
1464 EFX_CHANNEL_MAGIC_FILL(rx_queue));
1465}
1466
1467/**************************************************************************
1468 *
1469 * Hardware interrupts
1470 * The hardware interrupt handler does very little work; all the event
1471 * queue processing is carried out by per-channel tasklets.
1472 *
1473 **************************************************************************/
1474
1475/* Enable/disable/generate interrupts */
1476static inline void efx_nic_interrupts(struct efx_nic *efx,
1477 bool enabled, bool force)
1478{
1479 efx_oword_t int_en_reg_ker;
1480
1481 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1482 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1483 FRF_AZ_KER_INT_KER, force,
1484 FRF_AZ_DRV_INT_EN_KER, enabled);
1485 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1486}
1487
1488void efx_nic_enable_interrupts(struct efx_nic *efx)
1489{
1490 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1491 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1492
1493 efx_nic_interrupts(efx, true, false);
1494}
1495
1496void efx_nic_disable_interrupts(struct efx_nic *efx)
1497{
1498 /* Disable interrupts */
1499 efx_nic_interrupts(efx, false, false);
1500}
1501
1502/* Generate a test interrupt
1503 * Interrupt must already have been enabled, otherwise nasty things
1504 * may happen.
1505 */
1506void efx_nic_irq_test_start(struct efx_nic *efx) 68void efx_nic_irq_test_start(struct efx_nic *efx)
1507{ 69{
1508 efx->last_irq_cpu = -1; 70 efx->last_irq_cpu = -1;
1509 smp_wmb(); 71 smp_wmb();
1510 efx_nic_interrupts(efx, true, true); 72 efx->type->irq_test_generate(efx);
1511}
1512
1513/* Process a fatal interrupt
1514 * Disable bus mastering ASAP and schedule a reset
1515 */
1516irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1517{
1518 struct falcon_nic_data *nic_data = efx->nic_data;
1519 efx_oword_t *int_ker = efx->irq_status.addr;
1520 efx_oword_t fatal_intr;
1521 int error, mem_perr;
1522
1523 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1524 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1525
1526 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1527 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1528 EFX_OWORD_VAL(fatal_intr),
1529 error ? "disabling bus mastering" : "no recognised error");
1530
1531 /* If this is a memory parity error dump which blocks are offending */
1532 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1533 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1534 if (mem_perr) {
1535 efx_oword_t reg;
1536 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1537 netif_err(efx, hw, efx->net_dev,
1538 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1539 EFX_OWORD_VAL(reg));
1540 }
1541
1542 /* Disable both devices */
1543 pci_clear_master(efx->pci_dev);
1544 if (efx_nic_is_dual_func(efx))
1545 pci_clear_master(nic_data->pci_dev2);
1546 efx_nic_disable_interrupts(efx);
1547
1548 /* Count errors and reset or disable the NIC accordingly */
1549 if (efx->int_error_count == 0 ||
1550 time_after(jiffies, efx->int_error_expire)) {
1551 efx->int_error_count = 0;
1552 efx->int_error_expire =
1553 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1554 }
1555 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1556 netif_err(efx, hw, efx->net_dev,
1557 "SYSTEM ERROR - reset scheduled\n");
1558 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1559 } else {
1560 netif_err(efx, hw, efx->net_dev,
1561 "SYSTEM ERROR - max number of errors seen."
1562 "NIC will be disabled\n");
1563 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1564 }
1565
1566 return IRQ_HANDLED;
1567}
1568
1569/* Handle a legacy interrupt
1570 * Acknowledges the interrupt and schedule event queue processing.
1571 */
1572static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1573{
1574 struct efx_nic *efx = dev_id;
1575 efx_oword_t *int_ker = efx->irq_status.addr;
1576 irqreturn_t result = IRQ_NONE;
1577 struct efx_channel *channel;
1578 efx_dword_t reg;
1579 u32 queues;
1580 int syserr;
1581
1582 /* Could this be ours? If interrupts are disabled then the
1583 * channel state may not be valid.
1584 */
1585 if (!efx->legacy_irq_enabled)
1586 return result;
1587
1588 /* Read the ISR which also ACKs the interrupts */
1589 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1590 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1591
1592 /* Legacy interrupts are disabled too late by the EEH kernel
1593 * code. Disable them earlier.
1594 * If an EEH error occurred, the read will have returned all ones.
1595 */
1596 if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
1597 !efx->eeh_disabled_legacy_irq) {
1598 disable_irq_nosync(efx->legacy_irq);
1599 efx->eeh_disabled_legacy_irq = true;
1600 }
1601
1602 /* Handle non-event-queue sources */
1603 if (queues & (1U << efx->irq_level)) {
1604 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1605 if (unlikely(syserr))
1606 return efx_nic_fatal_interrupt(efx);
1607 efx->last_irq_cpu = raw_smp_processor_id();
1608 }
1609
1610 if (queues != 0) {
1611 if (EFX_WORKAROUND_15783(efx))
1612 efx->irq_zero_count = 0;
1613
1614 /* Schedule processing of any interrupting queues */
1615 efx_for_each_channel(channel, efx) {
1616 if (queues & 1)
1617 efx_schedule_channel_irq(channel);
1618 queues >>= 1;
1619 }
1620 result = IRQ_HANDLED;
1621
1622 } else if (EFX_WORKAROUND_15783(efx)) {
1623 efx_qword_t *event;
1624
1625 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1626 * because this might be a shared interrupt. */
1627 if (efx->irq_zero_count++ == 0)
1628 result = IRQ_HANDLED;
1629
1630 /* Ensure we schedule or rearm all event queues */
1631 efx_for_each_channel(channel, efx) {
1632 event = efx_event(channel, channel->eventq_read_ptr);
1633 if (efx_event_present(event))
1634 efx_schedule_channel_irq(channel);
1635 else
1636 efx_nic_eventq_read_ack(channel);
1637 }
1638 }
1639
1640 if (result == IRQ_HANDLED)
1641 netif_vdbg(efx, intr, efx->net_dev,
1642 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1643 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1644
1645 return result;
1646}
1647
1648/* Handle an MSI interrupt
1649 *
1650 * Handle an MSI hardware interrupt. This routine schedules event
1651 * queue processing. No interrupt acknowledgement cycle is necessary.
1652 * Also, we never need to check that the interrupt is for us, since
1653 * MSI interrupts cannot be shared.
1654 */
1655static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1656{
1657 struct efx_channel *channel = *(struct efx_channel **)dev_id;
1658 struct efx_nic *efx = channel->efx;
1659 efx_oword_t *int_ker = efx->irq_status.addr;
1660 int syserr;
1661
1662 netif_vdbg(efx, intr, efx->net_dev,
1663 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1664 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1665
1666 /* Handle non-event-queue sources */
1667 if (channel->channel == efx->irq_level) {
1668 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1669 if (unlikely(syserr))
1670 return efx_nic_fatal_interrupt(efx);
1671 efx->last_irq_cpu = raw_smp_processor_id();
1672 }
1673
1674 /* Schedule processing of the channel */
1675 efx_schedule_channel_irq(channel);
1676
1677 return IRQ_HANDLED;
1678}
1679
1680
1681/* Setup RSS indirection table.
1682 * This maps from the hash value of the packet to RXQ
1683 */
1684void efx_nic_push_rx_indir_table(struct efx_nic *efx)
1685{
1686 size_t i = 0;
1687 efx_dword_t dword;
1688
1689 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1690 return;
1691
1692 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1693 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1694
1695 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1696 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1697 efx->rx_indir_table[i]);
1698 efx_writed(efx, &dword,
1699 FR_BZ_RX_INDIRECTION_TBL +
1700 FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1701 }
1702} 73}
1703 74
1704/* Hook interrupt handler(s) 75/* Hook interrupt handler(s)
@@ -1711,13 +82,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
1711 int rc; 82 int rc;
1712 83
1713 if (!EFX_INT_MODE_USE_MSI(efx)) { 84 if (!EFX_INT_MODE_USE_MSI(efx)) {
1714 irq_handler_t handler; 85 rc = request_irq(efx->legacy_irq,
1715 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 86 efx->type->irq_handle_legacy, IRQF_SHARED,
1716 handler = efx_legacy_interrupt;
1717 else
1718 handler = falcon_legacy_interrupt_a1;
1719
1720 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1721 efx->name, efx); 87 efx->name, efx);
1722 if (rc) { 88 if (rc) {
1723 netif_err(efx, drv, efx->net_dev, 89 netif_err(efx, drv, efx->net_dev,
@@ -1742,10 +108,10 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
1742 /* Hook MSI or MSI-X interrupt */ 108 /* Hook MSI or MSI-X interrupt */
1743 n_irqs = 0; 109 n_irqs = 0;
1744 efx_for_each_channel(channel, efx) { 110 efx_for_each_channel(channel, efx) {
1745 rc = request_irq(channel->irq, efx_msi_interrupt, 111 rc = request_irq(channel->irq, efx->type->irq_handle_msi,
1746 IRQF_PROBE_SHARED, /* Not shared */ 112 IRQF_PROBE_SHARED, /* Not shared */
1747 efx->channel_name[channel->channel], 113 efx->msi_context[channel->channel].name,
1748 &efx->channel[channel->channel]); 114 &efx->msi_context[channel->channel]);
1749 if (rc) { 115 if (rc) {
1750 netif_err(efx, drv, efx->net_dev, 116 netif_err(efx, drv, efx->net_dev,
1751 "failed to hook IRQ %d\n", channel->irq); 117 "failed to hook IRQ %d\n", channel->irq);
@@ -1774,7 +140,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
1774 efx_for_each_channel(channel, efx) { 140 efx_for_each_channel(channel, efx) {
1775 if (n_irqs-- == 0) 141 if (n_irqs-- == 0)
1776 break; 142 break;
1777 free_irq(channel->irq, &efx->channel[channel->channel]); 143 free_irq(channel->irq, &efx->msi_context[channel->channel]);
1778 } 144 }
1779 fail1: 145 fail1:
1780 return rc; 146 return rc;
@@ -1783,7 +149,6 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
1783void efx_nic_fini_interrupt(struct efx_nic *efx) 149void efx_nic_fini_interrupt(struct efx_nic *efx)
1784{ 150{
1785 struct efx_channel *channel; 151 struct efx_channel *channel;
1786 efx_oword_t reg;
1787 152
1788#ifdef CONFIG_RFS_ACCEL 153#ifdef CONFIG_RFS_ACCEL
1789 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); 154 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
@@ -1792,167 +157,13 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
1792 157
1793 /* Disable MSI/MSI-X interrupts */ 158 /* Disable MSI/MSI-X interrupts */
1794 efx_for_each_channel(channel, efx) 159 efx_for_each_channel(channel, efx)
1795 free_irq(channel->irq, &efx->channel[channel->channel]); 160 free_irq(channel->irq, &efx->msi_context[channel->channel]);
1796
1797 /* ACK legacy interrupt */
1798 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1799 efx_reado(efx, &reg, FR_BZ_INT_ISR0);
1800 else
1801 falcon_irq_ack_a1(efx);
1802 161
1803 /* Disable legacy interrupt */ 162 /* Disable legacy interrupt */
1804 if (efx->legacy_irq) 163 if (efx->legacy_irq)
1805 free_irq(efx->legacy_irq, efx); 164 free_irq(efx->legacy_irq, efx);
1806} 165}
1807 166
1808/* Looks at available SRAM resources and works out how many queues we
1809 * can support, and where things like descriptor caches should live.
1810 *
1811 * SRAM is split up as follows:
1812 * 0 buftbl entries for channels
1813 * efx->vf_buftbl_base buftbl entries for SR-IOV
1814 * efx->rx_dc_base RX descriptor caches
1815 * efx->tx_dc_base TX descriptor caches
1816 */
1817void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
1818{
1819 unsigned vi_count, buftbl_min;
1820
1821 /* Account for the buffer table entries backing the datapath channels
1822 * and the descriptor caches for those channels.
1823 */
1824 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
1825 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
1826 efx->n_channels * EFX_MAX_EVQ_SIZE)
1827 * sizeof(efx_qword_t) / EFX_BUF_SIZE);
1828 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
1829
1830#ifdef CONFIG_SFC_SRIOV
1831 if (efx_sriov_wanted(efx)) {
1832 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
1833
1834 efx->vf_buftbl_base = buftbl_min;
1835
1836 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
1837 vi_count = max(vi_count, EFX_VI_BASE);
1838 buftbl_free = (sram_lim_qw - buftbl_min -
1839 vi_count * vi_dc_entries);
1840
1841 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
1842 efx_vf_size(efx));
1843 vf_limit = min(buftbl_free / entries_per_vf,
1844 (1024U - EFX_VI_BASE) >> efx->vi_scale);
1845
1846 if (efx->vf_count > vf_limit) {
1847 netif_err(efx, probe, efx->net_dev,
1848 "Reducing VF count from from %d to %d\n",
1849 efx->vf_count, vf_limit);
1850 efx->vf_count = vf_limit;
1851 }
1852 vi_count += efx->vf_count * efx_vf_size(efx);
1853 }
1854#endif
1855
1856 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1857 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1858}
1859
1860u32 efx_nic_fpga_ver(struct efx_nic *efx)
1861{
1862 efx_oword_t altera_build;
1863 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1864 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1865}
1866
1867void efx_nic_init_common(struct efx_nic *efx)
1868{
1869 efx_oword_t temp;
1870
1871 /* Set positions of descriptor caches in SRAM. */
1872 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1873 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1874 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1875 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1876
1877 /* Set TX descriptor cache size. */
1878 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1879 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1880 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1881
1882 /* Set RX descriptor cache size. Set low watermark to size-8, as
1883 * this allows most efficient prefetching.
1884 */
1885 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1886 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1887 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1888 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1889 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1890
1891 /* Program INT_KER address */
1892 EFX_POPULATE_OWORD_2(temp,
1893 FRF_AZ_NORM_INT_VEC_DIS_KER,
1894 EFX_INT_MODE_USE_MSI(efx),
1895 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1896 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1897
1898 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1899 /* Use an interrupt level unused by event queues */
1900 efx->irq_level = 0x1f;
1901 else
1902 /* Use a valid MSI-X vector */
1903 efx->irq_level = 0;
1904
1905 /* Enable all the genuinely fatal interrupts. (They are still
1906 * masked by the overall interrupt mask, controlled by
1907 * falcon_interrupts()).
1908 *
1909 * Note: All other fatal interrupts are enabled
1910 */
1911 EFX_POPULATE_OWORD_3(temp,
1912 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1913 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1914 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1915 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1916 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1917 EFX_INVERT_OWORD(temp);
1918 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1919
1920 efx_nic_push_rx_indir_table(efx);
1921
1922 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1923 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1924 */
1925 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1926 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1927 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1928 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1929 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1930 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1931 /* Enable SW_EV to inherit in char driver - assume harmless here */
1932 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1933 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1934 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1935 /* Disable hardware watchdog which can misfire */
1936 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1937 /* Squash TX of packets of 16 bytes or less */
1938 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1939 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1940 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1941
1942 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1943 EFX_POPULATE_OWORD_4(temp,
1944 /* Default values */
1945 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1946 FRF_BZ_TX_PACE_SB_AF, 0xb,
1947 FRF_BZ_TX_PACE_FB_BASE, 0,
1948 /* Allow large pace values in the
1949 * fast bin. */
1950 FRF_BZ_TX_PACE_BIN_TH,
1951 FFE_BZ_TX_PACE_RESERVED);
1952 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1953 }
1954}
1955
1956/* Register dump */ 167/* Register dump */
1957 168
1958#define REGISTER_REVISION_A 1 169#define REGISTER_REVISION_A 1
@@ -2217,3 +428,86 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
2217 } 428 }
2218 } 429 }
2219} 430}
431
432/**
433 * efx_nic_describe_stats - Describe supported statistics for ethtool
434 * @desc: Array of &struct efx_hw_stat_desc describing the statistics
435 * @count: Length of the @desc array
436 * @mask: Bitmask of which elements of @desc are enabled
437 * @names: Buffer to copy names to, or %NULL. The names are copied
438 * starting at intervals of %ETH_GSTRING_LEN bytes.
439 *
440 * Returns the number of visible statistics, i.e. the number of set
441 * bits in the first @count bits of @mask for which a name is defined.
442 */
443size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
444 const unsigned long *mask, u8 *names)
445{
446 size_t visible = 0;
447 size_t index;
448
449 for_each_set_bit(index, mask, count) {
450 if (desc[index].name) {
451 if (names) {
452 strlcpy(names, desc[index].name,
453 ETH_GSTRING_LEN);
454 names += ETH_GSTRING_LEN;
455 }
456 ++visible;
457 }
458 }
459
460 return visible;
461}
462
463/**
464 * efx_nic_update_stats - Convert statistics DMA buffer to array of u64
465 * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer
466 * layout. DMA widths of 0, 16, 32 and 64 are supported; where
467 * the width is specified as 0 the corresponding element of
468 * @stats is not updated.
469 * @count: Length of the @desc array
470 * @mask: Bitmask of which elements of @desc are enabled
471 * @stats: Buffer to update with the converted statistics. The length
472 * of this array must be at least the number of set bits in the
473 * first @count bits of @mask.
474 * @dma_buf: DMA buffer containing hardware statistics
475 * @accumulate: If set, the converted values will be added rather than
476 * directly stored to the corresponding elements of @stats
477 */
478void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
479 const unsigned long *mask,
480 u64 *stats, const void *dma_buf, bool accumulate)
481{
482 size_t index;
483
484 for_each_set_bit(index, mask, count) {
485 if (desc[index].dma_width) {
486 const void *addr = dma_buf + desc[index].offset;
487 u64 val;
488
489 switch (desc[index].dma_width) {
490 case 16:
491 val = le16_to_cpup((__le16 *)addr);
492 break;
493 case 32:
494 val = le32_to_cpup((__le32 *)addr);
495 break;
496 case 64:
497 val = le64_to_cpup((__le64 *)addr);
498 break;
499 default:
500 WARN_ON(1);
501 val = 0;
502 break;
503 }
504
505 if (accumulate)
506 *stats += val;
507 else
508 *stats = val;
509 }
510
511 ++stats;
512 }
513}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index d63c2991a751..4b1e188f7a2f 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2011 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -16,17 +16,13 @@
16#include "net_driver.h" 16#include "net_driver.h"
17#include "efx.h" 17#include "efx.h"
18#include "mcdi.h" 18#include "mcdi.h"
19#include "spi.h"
20
21/*
22 * Falcon hardware control
23 */
24 19
25enum { 20enum {
26 EFX_REV_FALCON_A0 = 0, 21 EFX_REV_FALCON_A0 = 0,
27 EFX_REV_FALCON_A1 = 1, 22 EFX_REV_FALCON_A1 = 1,
28 EFX_REV_FALCON_B0 = 2, 23 EFX_REV_FALCON_B0 = 2,
29 EFX_REV_SIENA_A0 = 3, 24 EFX_REV_SIENA_A0 = 3,
25 EFX_REV_HUNT_A0 = 4,
30}; 26};
31 27
32static inline int efx_nic_rev(struct efx_nic *efx) 28static inline int efx_nic_rev(struct efx_nic *efx)
@@ -34,7 +30,7 @@ static inline int efx_nic_rev(struct efx_nic *efx)
34 return efx->type->revision; 30 return efx->type->revision;
35} 31}
36 32
37extern u32 efx_nic_fpga_ver(struct efx_nic *efx); 33extern u32 efx_farch_fpga_ver(struct efx_nic *efx);
38 34
39/* NIC has two interlinked PCI functions for the same port. */ 35/* NIC has two interlinked PCI functions for the same port. */
40static inline bool efx_nic_is_dual_func(struct efx_nic *efx) 36static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
@@ -42,6 +38,65 @@ static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
42 return efx_nic_rev(efx) < EFX_REV_FALCON_B0; 38 return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
43} 39}
44 40
41/* Read the current event from the event queue */
42static inline efx_qword_t *efx_event(struct efx_channel *channel,
43 unsigned int index)
44{
45 return ((efx_qword_t *) (channel->eventq.buf.addr)) +
46 (index & channel->eventq_mask);
47}
48
49/* See if an event is present
50 *
51 * We check both the high and low dword of the event for all ones. We
52 * wrote all ones when we cleared the event, and no valid event can
53 * have all ones in either its high or low dwords. This approach is
54 * robust against reordering.
55 *
56 * Note that using a single 64-bit comparison is incorrect; even
57 * though the CPU read will be atomic, the DMA write may not be.
58 */
59static inline int efx_event_present(efx_qword_t *event)
60{
61 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
62 EFX_DWORD_IS_ALL_ONES(event->dword[1]));
63}
64
65/* Returns a pointer to the specified transmit descriptor in the TX
66 * descriptor queue belonging to the specified channel.
67 */
68static inline efx_qword_t *
69efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
70{
71 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
72}
73
74/* Decide whether to push a TX descriptor to the NIC vs merely writing
75 * the doorbell. This can reduce latency when we are adding a single
76 * descriptor to an empty queue, but is otherwise pointless. Further,
77 * Falcon and Siena have hardware bugs (SF bug 33851) that may be
78 * triggered if we don't check this.
79 */
80static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
81 unsigned int write_count)
82{
83 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
84
85 if (empty_read_count == 0)
86 return false;
87
88 tx_queue->empty_read_count = 0;
89 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
90 && tx_queue->write_count - write_count == 1;
91}
92
93/* Returns a pointer to the specified descriptor in the RX descriptor queue */
94static inline efx_qword_t *
95efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
96{
97 return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
98}
99
45enum { 100enum {
46 PHY_TYPE_NONE = 0, 101 PHY_TYPE_NONE = 0,
47 PHY_TYPE_TXC43128 = 1, 102 PHY_TYPE_TXC43128 = 1,
@@ -59,9 +114,6 @@ enum {
59 (1 << LOOPBACK_XGXS) | \ 114 (1 << LOOPBACK_XGXS) | \
60 (1 << LOOPBACK_XAUI)) 115 (1 << LOOPBACK_XAUI))
61 116
62#define FALCON_GMAC_LOOPBACKS \
63 (1 << LOOPBACK_GMAC)
64
65/* Alignment of PCIe DMA boundaries (4KB) */ 117/* Alignment of PCIe DMA boundaries (4KB) */
66#define EFX_PAGE_SIZE 4096 118#define EFX_PAGE_SIZE 4096
67/* Size and alignment of buffer table entries (same) */ 119/* Size and alignment of buffer table entries (same) */
@@ -105,13 +157,96 @@ struct falcon_board {
105}; 157};
106 158
107/** 159/**
160 * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
161 * @device_id: Controller's id for the device
162 * @size: Size (in bytes)
163 * @addr_len: Number of address bytes in read/write commands
164 * @munge_address: Flag whether addresses should be munged.
165 * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
166 * use bit 3 of the command byte as address bit A8, rather
167 * than having a two-byte address. If this flag is set, then
168 * commands should be munged in this way.
169 * @erase_command: Erase command (or 0 if sector erase not needed).
170 * @erase_size: Erase sector size (in bytes)
171 * Erase commands affect sectors with this size and alignment.
172 * This must be a power of two.
173 * @block_size: Write block size (in bytes).
174 * Write commands are limited to blocks with this size and alignment.
175 */
176struct falcon_spi_device {
177 int device_id;
178 unsigned int size;
179 unsigned int addr_len;
180 unsigned int munge_address:1;
181 u8 erase_command;
182 unsigned int erase_size;
183 unsigned int block_size;
184};
185
186static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
187{
188 return spi->size != 0;
189}
190
191enum {
192 FALCON_STAT_tx_bytes,
193 FALCON_STAT_tx_packets,
194 FALCON_STAT_tx_pause,
195 FALCON_STAT_tx_control,
196 FALCON_STAT_tx_unicast,
197 FALCON_STAT_tx_multicast,
198 FALCON_STAT_tx_broadcast,
199 FALCON_STAT_tx_lt64,
200 FALCON_STAT_tx_64,
201 FALCON_STAT_tx_65_to_127,
202 FALCON_STAT_tx_128_to_255,
203 FALCON_STAT_tx_256_to_511,
204 FALCON_STAT_tx_512_to_1023,
205 FALCON_STAT_tx_1024_to_15xx,
206 FALCON_STAT_tx_15xx_to_jumbo,
207 FALCON_STAT_tx_gtjumbo,
208 FALCON_STAT_tx_non_tcpudp,
209 FALCON_STAT_tx_mac_src_error,
210 FALCON_STAT_tx_ip_src_error,
211 FALCON_STAT_rx_bytes,
212 FALCON_STAT_rx_good_bytes,
213 FALCON_STAT_rx_bad_bytes,
214 FALCON_STAT_rx_packets,
215 FALCON_STAT_rx_good,
216 FALCON_STAT_rx_bad,
217 FALCON_STAT_rx_pause,
218 FALCON_STAT_rx_control,
219 FALCON_STAT_rx_unicast,
220 FALCON_STAT_rx_multicast,
221 FALCON_STAT_rx_broadcast,
222 FALCON_STAT_rx_lt64,
223 FALCON_STAT_rx_64,
224 FALCON_STAT_rx_65_to_127,
225 FALCON_STAT_rx_128_to_255,
226 FALCON_STAT_rx_256_to_511,
227 FALCON_STAT_rx_512_to_1023,
228 FALCON_STAT_rx_1024_to_15xx,
229 FALCON_STAT_rx_15xx_to_jumbo,
230 FALCON_STAT_rx_gtjumbo,
231 FALCON_STAT_rx_bad_lt64,
232 FALCON_STAT_rx_bad_gtjumbo,
233 FALCON_STAT_rx_overflow,
234 FALCON_STAT_rx_symbol_error,
235 FALCON_STAT_rx_align_error,
236 FALCON_STAT_rx_length_error,
237 FALCON_STAT_rx_internal_error,
238 FALCON_STAT_rx_nodesc_drop_cnt,
239 FALCON_STAT_COUNT
240};
241
242/**
108 * struct falcon_nic_data - Falcon NIC state 243 * struct falcon_nic_data - Falcon NIC state
109 * @pci_dev2: Secondary function of Falcon A 244 * @pci_dev2: Secondary function of Falcon A
110 * @board: Board state and functions 245 * @board: Board state and functions
246 * @stats: Hardware statistics
111 * @stats_disable_count: Nest count for disabling statistics fetches 247 * @stats_disable_count: Nest count for disabling statistics fetches
112 * @stats_pending: Is there a pending DMA of MAC statistics. 248 * @stats_pending: Is there a pending DMA of MAC statistics.
113 * @stats_timer: A timer for regularly fetching MAC statistics. 249 * @stats_timer: A timer for regularly fetching MAC statistics.
114 * @stats_dma_done: Pointer to the flag which indicates DMA completion.
115 * @spi_flash: SPI flash device 250 * @spi_flash: SPI flash device
116 * @spi_eeprom: SPI EEPROM device 251 * @spi_eeprom: SPI EEPROM device
117 * @spi_lock: SPI bus lock 252 * @spi_lock: SPI bus lock
@@ -121,12 +256,12 @@ struct falcon_board {
121struct falcon_nic_data { 256struct falcon_nic_data {
122 struct pci_dev *pci_dev2; 257 struct pci_dev *pci_dev2;
123 struct falcon_board board; 258 struct falcon_board board;
259 u64 stats[FALCON_STAT_COUNT];
124 unsigned int stats_disable_count; 260 unsigned int stats_disable_count;
125 bool stats_pending; 261 bool stats_pending;
126 struct timer_list stats_timer; 262 struct timer_list stats_timer;
127 u32 *stats_dma_done; 263 struct falcon_spi_device spi_flash;
128 struct efx_spi_device spi_flash; 264 struct falcon_spi_device spi_eeprom;
129 struct efx_spi_device spi_eeprom;
130 struct mutex spi_lock; 265 struct mutex spi_lock;
131 struct mutex mdio_lock; 266 struct mutex mdio_lock;
132 bool xmac_poll_required; 267 bool xmac_poll_required;
@@ -138,29 +273,148 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
138 return &data->board; 273 return &data->board;
139} 274}
140 275
276enum {
277 SIENA_STAT_tx_bytes,
278 SIENA_STAT_tx_good_bytes,
279 SIENA_STAT_tx_bad_bytes,
280 SIENA_STAT_tx_packets,
281 SIENA_STAT_tx_bad,
282 SIENA_STAT_tx_pause,
283 SIENA_STAT_tx_control,
284 SIENA_STAT_tx_unicast,
285 SIENA_STAT_tx_multicast,
286 SIENA_STAT_tx_broadcast,
287 SIENA_STAT_tx_lt64,
288 SIENA_STAT_tx_64,
289 SIENA_STAT_tx_65_to_127,
290 SIENA_STAT_tx_128_to_255,
291 SIENA_STAT_tx_256_to_511,
292 SIENA_STAT_tx_512_to_1023,
293 SIENA_STAT_tx_1024_to_15xx,
294 SIENA_STAT_tx_15xx_to_jumbo,
295 SIENA_STAT_tx_gtjumbo,
296 SIENA_STAT_tx_collision,
297 SIENA_STAT_tx_single_collision,
298 SIENA_STAT_tx_multiple_collision,
299 SIENA_STAT_tx_excessive_collision,
300 SIENA_STAT_tx_deferred,
301 SIENA_STAT_tx_late_collision,
302 SIENA_STAT_tx_excessive_deferred,
303 SIENA_STAT_tx_non_tcpudp,
304 SIENA_STAT_tx_mac_src_error,
305 SIENA_STAT_tx_ip_src_error,
306 SIENA_STAT_rx_bytes,
307 SIENA_STAT_rx_good_bytes,
308 SIENA_STAT_rx_bad_bytes,
309 SIENA_STAT_rx_packets,
310 SIENA_STAT_rx_good,
311 SIENA_STAT_rx_bad,
312 SIENA_STAT_rx_pause,
313 SIENA_STAT_rx_control,
314 SIENA_STAT_rx_unicast,
315 SIENA_STAT_rx_multicast,
316 SIENA_STAT_rx_broadcast,
317 SIENA_STAT_rx_lt64,
318 SIENA_STAT_rx_64,
319 SIENA_STAT_rx_65_to_127,
320 SIENA_STAT_rx_128_to_255,
321 SIENA_STAT_rx_256_to_511,
322 SIENA_STAT_rx_512_to_1023,
323 SIENA_STAT_rx_1024_to_15xx,
324 SIENA_STAT_rx_15xx_to_jumbo,
325 SIENA_STAT_rx_gtjumbo,
326 SIENA_STAT_rx_bad_gtjumbo,
327 SIENA_STAT_rx_overflow,
328 SIENA_STAT_rx_false_carrier,
329 SIENA_STAT_rx_symbol_error,
330 SIENA_STAT_rx_align_error,
331 SIENA_STAT_rx_length_error,
332 SIENA_STAT_rx_internal_error,
333 SIENA_STAT_rx_nodesc_drop_cnt,
334 SIENA_STAT_COUNT
335};
336
141/** 337/**
142 * struct siena_nic_data - Siena NIC state 338 * struct siena_nic_data - Siena NIC state
143 * @mcdi: Management-Controller-to-Driver Interface
144 * @wol_filter_id: Wake-on-LAN packet filter id 339 * @wol_filter_id: Wake-on-LAN packet filter id
145 * @hwmon: Hardware monitor state 340 * @stats: Hardware statistics
146 */ 341 */
147struct siena_nic_data { 342struct siena_nic_data {
148 struct efx_mcdi_iface mcdi;
149 int wol_filter_id; 343 int wol_filter_id;
150#ifdef CONFIG_SFC_MCDI_MON 344 u64 stats[SIENA_STAT_COUNT];
151 struct efx_mcdi_mon hwmon;
152#endif
153}; 345};
154 346
155#ifdef CONFIG_SFC_MCDI_MON 347enum {
156static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx) 348 EF10_STAT_tx_bytes,
157{ 349 EF10_STAT_tx_packets,
158 struct siena_nic_data *nic_data; 350 EF10_STAT_tx_pause,
159 EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); 351 EF10_STAT_tx_control,
160 nic_data = efx->nic_data; 352 EF10_STAT_tx_unicast,
161 return &nic_data->hwmon; 353 EF10_STAT_tx_multicast,
162} 354 EF10_STAT_tx_broadcast,
163#endif 355 EF10_STAT_tx_lt64,
356 EF10_STAT_tx_64,
357 EF10_STAT_tx_65_to_127,
358 EF10_STAT_tx_128_to_255,
359 EF10_STAT_tx_256_to_511,
360 EF10_STAT_tx_512_to_1023,
361 EF10_STAT_tx_1024_to_15xx,
362 EF10_STAT_tx_15xx_to_jumbo,
363 EF10_STAT_rx_bytes,
364 EF10_STAT_rx_bytes_minus_good_bytes,
365 EF10_STAT_rx_good_bytes,
366 EF10_STAT_rx_bad_bytes,
367 EF10_STAT_rx_packets,
368 EF10_STAT_rx_good,
369 EF10_STAT_rx_bad,
370 EF10_STAT_rx_pause,
371 EF10_STAT_rx_control,
372 EF10_STAT_rx_unicast,
373 EF10_STAT_rx_multicast,
374 EF10_STAT_rx_broadcast,
375 EF10_STAT_rx_lt64,
376 EF10_STAT_rx_64,
377 EF10_STAT_rx_65_to_127,
378 EF10_STAT_rx_128_to_255,
379 EF10_STAT_rx_256_to_511,
380 EF10_STAT_rx_512_to_1023,
381 EF10_STAT_rx_1024_to_15xx,
382 EF10_STAT_rx_15xx_to_jumbo,
383 EF10_STAT_rx_gtjumbo,
384 EF10_STAT_rx_bad_gtjumbo,
385 EF10_STAT_rx_overflow,
386 EF10_STAT_rx_align_error,
387 EF10_STAT_rx_length_error,
388 EF10_STAT_rx_nodesc_drops,
389 EF10_STAT_COUNT
390};
391
392/**
393 * struct efx_ef10_nic_data - EF10 architecture NIC state
394 * @mcdi_buf: DMA buffer for MCDI
395 * @warm_boot_count: Last seen MC warm boot count
396 * @vi_base: Absolute index of first VI in this function
397 * @n_allocated_vis: Number of VIs allocated to this function
398 * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
399 * @must_restore_filters: Flag: filters have yet to be restored after MC reboot
400 * @rx_rss_context: Firmware handle for our RSS context
401 * @stats: Hardware statistics
402 * @workaround_35388: Flag: firmware supports workaround for bug 35388
403 * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
404 * %MC_CMD_GET_CAPABILITIES response)
405 */
406struct efx_ef10_nic_data {
407 struct efx_buffer mcdi_buf;
408 u16 warm_boot_count;
409 unsigned int vi_base;
410 unsigned int n_allocated_vis;
411 bool must_realloc_vis;
412 bool must_restore_filters;
413 u32 rx_rss_context;
414 u64 stats[EF10_STAT_COUNT];
415 bool workaround_35388;
416 u32 datapath_caps;
417};
164 418
165/* 419/*
166 * On the SFC9000 family each port is associated with 1 PCI physical 420 * On the SFC9000 family each port is associated with 1 PCI physical
@@ -263,6 +517,7 @@ extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
263extern const struct efx_nic_type falcon_a1_nic_type; 517extern const struct efx_nic_type falcon_a1_nic_type;
264extern const struct efx_nic_type falcon_b0_nic_type; 518extern const struct efx_nic_type falcon_b0_nic_type;
265extern const struct efx_nic_type siena_a0_nic_type; 519extern const struct efx_nic_type siena_a0_nic_type;
520extern const struct efx_nic_type efx_hunt_a0_nic_type;
266 521
267/************************************************************************** 522/**************************************************************************
268 * 523 *
@@ -274,35 +529,123 @@ extern const struct efx_nic_type siena_a0_nic_type;
274extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); 529extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
275 530
276/* TX data path */ 531/* TX data path */
277extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); 532static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
278extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue); 533{
279extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue); 534 return tx_queue->efx->type->tx_probe(tx_queue);
280extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue); 535}
281extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue); 536static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
537{
538 tx_queue->efx->type->tx_init(tx_queue);
539}
540static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
541{
542 tx_queue->efx->type->tx_remove(tx_queue);
543}
544static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
545{
546 tx_queue->efx->type->tx_write(tx_queue);
547}
282 548
283/* RX data path */ 549/* RX data path */
284extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue); 550static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
285extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue); 551{
286extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue); 552 return rx_queue->efx->type->rx_probe(rx_queue);
287extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue); 553}
288extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue); 554static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
289extern void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue); 555{
556 rx_queue->efx->type->rx_init(rx_queue);
557}
558static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
559{
560 rx_queue->efx->type->rx_remove(rx_queue);
561}
562static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
563{
564 rx_queue->efx->type->rx_write(rx_queue);
565}
566static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
567{
568 rx_queue->efx->type->rx_defer_refill(rx_queue);
569}
290 570
291/* Event data path */ 571/* Event data path */
292extern int efx_nic_probe_eventq(struct efx_channel *channel); 572static inline int efx_nic_probe_eventq(struct efx_channel *channel)
293extern void efx_nic_init_eventq(struct efx_channel *channel); 573{
294extern void efx_nic_fini_eventq(struct efx_channel *channel); 574 return channel->efx->type->ev_probe(channel);
295extern void efx_nic_remove_eventq(struct efx_channel *channel); 575}
296extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); 576static inline int efx_nic_init_eventq(struct efx_channel *channel)
297extern void efx_nic_eventq_read_ack(struct efx_channel *channel); 577{
298extern bool efx_nic_event_present(struct efx_channel *channel); 578 return channel->efx->type->ev_init(channel);
579}
580static inline void efx_nic_fini_eventq(struct efx_channel *channel)
581{
582 channel->efx->type->ev_fini(channel);
583}
584static inline void efx_nic_remove_eventq(struct efx_channel *channel)
585{
586 channel->efx->type->ev_remove(channel);
587}
588static inline int
589efx_nic_process_eventq(struct efx_channel *channel, int quota)
590{
591 return channel->efx->type->ev_process(channel, quota);
592}
593static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
594{
595 channel->efx->type->ev_read_ack(channel);
596}
597extern void efx_nic_event_test_start(struct efx_channel *channel);
299 598
300/* MAC/PHY */ 599/* Falcon/Siena queue operations */
301extern void falcon_drain_tx_fifo(struct efx_nic *efx); 600extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
302extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); 601extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
303extern bool falcon_xmac_check_fault(struct efx_nic *efx); 602extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
304extern int falcon_reconfigure_xmac(struct efx_nic *efx); 603extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
305extern void falcon_update_stats_xmac(struct efx_nic *efx); 604extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
605extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
606extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
607extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
608extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
609extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
610extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
611extern int efx_farch_ev_probe(struct efx_channel *channel);
612extern int efx_farch_ev_init(struct efx_channel *channel);
613extern void efx_farch_ev_fini(struct efx_channel *channel);
614extern void efx_farch_ev_remove(struct efx_channel *channel);
615extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
616extern void efx_farch_ev_read_ack(struct efx_channel *channel);
617extern void efx_farch_ev_test_generate(struct efx_channel *channel);
618
619/* Falcon/Siena filter operations */
620extern int efx_farch_filter_table_probe(struct efx_nic *efx);
621extern void efx_farch_filter_table_restore(struct efx_nic *efx);
622extern void efx_farch_filter_table_remove(struct efx_nic *efx);
623extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
624extern s32 efx_farch_filter_insert(struct efx_nic *efx,
625 struct efx_filter_spec *spec, bool replace);
626extern int efx_farch_filter_remove_safe(struct efx_nic *efx,
627 enum efx_filter_priority priority,
628 u32 filter_id);
629extern int efx_farch_filter_get_safe(struct efx_nic *efx,
630 enum efx_filter_priority priority,
631 u32 filter_id, struct efx_filter_spec *);
632extern void efx_farch_filter_clear_rx(struct efx_nic *efx,
633 enum efx_filter_priority priority);
634extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
635 enum efx_filter_priority priority);
636extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
637extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
638 enum efx_filter_priority priority,
639 u32 *buf, u32 size);
640#ifdef CONFIG_RFS_ACCEL
641extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
642 struct efx_filter_spec *spec);
643extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
644 unsigned int index);
645#endif
646extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
647
648extern bool efx_nic_event_present(struct efx_channel *channel);
306 649
307/* Some statistics are computed as A - B where A and B each increase 650/* Some statistics are computed as A - B where A and B each increase
308 * linearly with some hardware counter(s) and the counters are read 651 * linearly with some hardware counter(s) and the counters are read
@@ -322,16 +665,18 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
322 *stat = diff; 665 *stat = diff;
323} 666}
324 667
325/* Interrupts and test events */ 668/* Interrupts */
326extern int efx_nic_init_interrupt(struct efx_nic *efx); 669extern int efx_nic_init_interrupt(struct efx_nic *efx);
327extern void efx_nic_enable_interrupts(struct efx_nic *efx);
328extern void efx_nic_event_test_start(struct efx_channel *channel);
329extern void efx_nic_irq_test_start(struct efx_nic *efx); 670extern void efx_nic_irq_test_start(struct efx_nic *efx);
330extern void efx_nic_disable_interrupts(struct efx_nic *efx);
331extern void efx_nic_fini_interrupt(struct efx_nic *efx); 671extern void efx_nic_fini_interrupt(struct efx_nic *efx);
332extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx); 672
333extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id); 673/* Falcon/Siena interrupts */
334extern void falcon_irq_ack_a1(struct efx_nic *efx); 674extern void efx_farch_irq_enable_master(struct efx_nic *efx);
675extern void efx_farch_irq_test_generate(struct efx_nic *efx);
676extern void efx_farch_irq_disable_master(struct efx_nic *efx);
677extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
678extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
679extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
335 680
336static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) 681static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
337{ 682{
@@ -345,69 +690,47 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
345/* Global Resources */ 690/* Global Resources */
346extern int efx_nic_flush_queues(struct efx_nic *efx); 691extern int efx_nic_flush_queues(struct efx_nic *efx);
347extern void siena_prepare_flush(struct efx_nic *efx); 692extern void siena_prepare_flush(struct efx_nic *efx);
693extern int efx_farch_fini_dmaq(struct efx_nic *efx);
348extern void siena_finish_flush(struct efx_nic *efx); 694extern void siena_finish_flush(struct efx_nic *efx);
349extern void falcon_start_nic_stats(struct efx_nic *efx); 695extern void falcon_start_nic_stats(struct efx_nic *efx);
350extern void falcon_stop_nic_stats(struct efx_nic *efx); 696extern void falcon_stop_nic_stats(struct efx_nic *efx);
351extern void falcon_setup_xaui(struct efx_nic *efx);
352extern int falcon_reset_xaui(struct efx_nic *efx); 697extern int falcon_reset_xaui(struct efx_nic *efx);
353extern void 698extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
354efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); 699extern void efx_farch_init_common(struct efx_nic *efx);
355extern void efx_nic_init_common(struct efx_nic *efx); 700extern void efx_ef10_handle_drain_event(struct efx_nic *efx);
356extern void efx_nic_push_rx_indir_table(struct efx_nic *efx); 701static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
702{
703 efx->type->rx_push_indir_table(efx);
704}
705extern void efx_farch_rx_push_indir_table(struct efx_nic *efx);
357 706
358int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 707int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
359 unsigned int len); 708 unsigned int len, gfp_t gfp_flags);
360void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); 709void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
361 710
362/* Tests */ 711/* Tests */
363struct efx_nic_register_test { 712struct efx_farch_register_test {
364 unsigned address; 713 unsigned address;
365 efx_oword_t mask; 714 efx_oword_t mask;
366}; 715};
367extern int efx_nic_test_registers(struct efx_nic *efx, 716extern int efx_farch_test_registers(struct efx_nic *efx,
368 const struct efx_nic_register_test *regs, 717 const struct efx_farch_register_test *regs,
369 size_t n_regs); 718 size_t n_regs);
370 719
371extern size_t efx_nic_get_regs_len(struct efx_nic *efx); 720extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
372extern void efx_nic_get_regs(struct efx_nic *efx, void *buf); 721extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
373 722
374/************************************************************************** 723extern size_t
375 * 724efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
376 * Falcon MAC stats 725 const unsigned long *mask, u8 *names);
377 * 726extern void
378 ************************************************************************** 727efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
379 */ 728 const unsigned long *mask,
729 u64 *stats, const void *dma_buf, bool accumulate);
730
731#define EFX_MAX_FLUSH_TIME 5000
380 732
381#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset) 733extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
382#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH) 734 efx_qword_t *event);
383
384/* Retrieve statistic from statistics block */
385#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
386 if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
387 (efx)->mac_stats.efx_stat += le16_to_cpu( \
388 *((__force __le16 *) \
389 (efx->stats_buffer.addr + \
390 FALCON_STAT_OFFSET(falcon_stat)))); \
391 else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
392 (efx)->mac_stats.efx_stat += le32_to_cpu( \
393 *((__force __le32 *) \
394 (efx->stats_buffer.addr + \
395 FALCON_STAT_OFFSET(falcon_stat)))); \
396 else \
397 (efx)->mac_stats.efx_stat += le64_to_cpu( \
398 *((__force __le64 *) \
399 (efx->stats_buffer.addr + \
400 FALCON_STAT_OFFSET(falcon_stat)))); \
401 } while (0)
402
403#define FALCON_MAC_STATS_SIZE 0x100
404
405#define MAC_DATA_LBN 0
406#define MAC_DATA_WIDTH 32
407
408extern void efx_generate_event(struct efx_nic *efx, unsigned int evq,
409 efx_qword_t *event);
410
411extern void falcon_poll_xmac(struct efx_nic *efx);
412 735
413#endif /* EFX_NIC_H */ 736#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h
index 11d148cd8441..45eeb7075156 100644
--- a/drivers/net/ethernet/sfc/phy.h
+++ b/drivers/net/ethernet/sfc/phy.h
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2007-2010 Solarflare Communications Inc. 3 * Copyright 2007-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
@@ -47,21 +47,4 @@ extern const struct efx_phy_operations falcon_txc_phy_ops;
47extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir); 47extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
48extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val); 48extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
49 49
50/****************************************************************************
51 * Siena managed PHYs
52 */
53extern const struct efx_phy_operations efx_mcdi_phy_ops;
54
55extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
56 unsigned int prtad, unsigned int devad,
57 u16 addr, u16 *value_out, u32 *status_out);
58extern int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
59 unsigned int prtad, unsigned int devad,
60 u16 addr, u16 value, u32 *status_out);
61extern void efx_mcdi_phy_decode_link(struct efx_nic *efx,
62 struct efx_link_state *link_state,
63 u32 speed, u32 flags, u32 fcntl);
64extern int efx_mcdi_phy_reconfigure(struct efx_nic *efx);
65extern void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
66
67#endif 50#endif
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index b495394a6dfa..03acf57df045 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2011 Solarflare Communications Inc. 3 * Copyright 2011-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -46,7 +46,7 @@
46#include "mcdi.h" 46#include "mcdi.h"
47#include "mcdi_pcol.h" 47#include "mcdi_pcol.h"
48#include "io.h" 48#include "io.h"
49#include "regs.h" 49#include "farch_regs.h"
50#include "nic.h" 50#include "nic.h"
51 51
52/* Maximum number of events expected to make up a PTP event */ 52/* Maximum number of events expected to make up a PTP event */
@@ -294,8 +294,7 @@ struct efx_ptp_data {
294 struct work_struct pps_work; 294 struct work_struct pps_work;
295 struct workqueue_struct *pps_workwq; 295 struct workqueue_struct *pps_workwq;
296 bool nic_ts_enabled; 296 bool nic_ts_enabled;
297 u8 txbuf[ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN( 297 MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
298 MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM), 4)];
299 struct efx_ptp_timeset 298 struct efx_ptp_timeset
300 timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM]; 299 timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
301}; 300};
@@ -311,9 +310,10 @@ static int efx_phc_enable(struct ptp_clock_info *ptp,
311/* Enable MCDI PTP support. */ 310/* Enable MCDI PTP support. */
312static int efx_ptp_enable(struct efx_nic *efx) 311static int efx_ptp_enable(struct efx_nic *efx)
313{ 312{
314 u8 inbuf[MC_CMD_PTP_IN_ENABLE_LEN]; 313 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
315 314
316 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE); 315 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
316 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
317 MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE, 317 MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
318 efx->ptp_data->channel->channel); 318 efx->ptp_data->channel->channel);
319 MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode); 319 MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
@@ -329,9 +329,10 @@ static int efx_ptp_enable(struct efx_nic *efx)
329 */ 329 */
330static int efx_ptp_disable(struct efx_nic *efx) 330static int efx_ptp_disable(struct efx_nic *efx)
331{ 331{
332 u8 inbuf[MC_CMD_PTP_IN_DISABLE_LEN]; 332 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
333 333
334 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE); 334 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
335 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
335 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), 336 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
336 NULL, 0, NULL); 337 NULL, 0, NULL);
337} 338}
@@ -389,14 +390,14 @@ static void efx_ptp_send_times(struct efx_nic *efx,
389 host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | 390 host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
390 now.ts_real.tv_nsec); 391 now.ts_real.tv_nsec);
391 /* Update host time in NIC memory */ 392 /* Update host time in NIC memory */
392 _efx_writed(efx, cpu_to_le32(host_time), 393 efx->type->ptp_write_host_time(efx, host_time);
393 FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
394 } 394 }
395 *last_time = now; 395 *last_time = now;
396} 396}
397 397
398/* Read a timeset from the MC's results and partial process. */ 398/* Read a timeset from the MC's results and partial process. */
399static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset) 399static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
400 struct efx_ptp_timeset *timeset)
400{ 401{
401 unsigned start_ns, end_ns; 402 unsigned start_ns, end_ns;
402 403
@@ -425,12 +426,14 @@ static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset)
425 * busy. A number of readings are taken so that, hopefully, at least one good 426 * busy. A number of readings are taken so that, hopefully, at least one good
426 * synchronisation will be seen in the results. 427 * synchronisation will be seen in the results.
427 */ 428 */
428static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf, 429static int
429 size_t response_length, 430efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
430 const struct pps_event_time *last_time) 431 size_t response_length,
432 const struct pps_event_time *last_time)
431{ 433{
432 unsigned number_readings = (response_length / 434 unsigned number_readings =
433 MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN); 435 MCDI_VAR_ARRAY_LEN(response_length,
436 PTP_OUT_SYNCHRONIZE_TIMESET);
434 unsigned i; 437 unsigned i;
435 unsigned total; 438 unsigned total;
436 unsigned ngood = 0; 439 unsigned ngood = 0;
@@ -447,8 +450,10 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
447 * appera to be erroneous. 450 * appera to be erroneous.
448 */ 451 */
449 for (i = 0; i < number_readings; i++) { 452 for (i = 0; i < number_readings; i++) {
450 efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]); 453 efx_ptp_read_timeset(
451 synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN; 454 MCDI_ARRAY_STRUCT_PTR(synch_buf,
455 PTP_OUT_SYNCHRONIZE_TIMESET, i),
456 &ptp->timeset[i]);
452 } 457 }
453 458
454 /* Find the last good host-MC synchronization result. The MC times 459 /* Find the last good host-MC synchronization result. The MC times
@@ -518,7 +523,7 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
518static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) 523static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
519{ 524{
520 struct efx_ptp_data *ptp = efx->ptp_data; 525 struct efx_ptp_data *ptp = efx->ptp_data;
521 u8 synch_buf[MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX]; 526 MCDI_DECLARE_BUF(synch_buf, MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX);
522 size_t response_length; 527 size_t response_length;
523 int rc; 528 int rc;
524 unsigned long timeout; 529 unsigned long timeout;
@@ -527,17 +532,17 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
527 int *start = ptp->start.addr; 532 int *start = ptp->start.addr;
528 533
529 MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE); 534 MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE);
535 MCDI_SET_DWORD(synch_buf, PTP_IN_PERIPH_ID, 0);
530 MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS, 536 MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS,
531 num_readings); 537 num_readings);
532 MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_LO, 538 MCDI_SET_QWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR,
533 (u32)ptp->start.dma_addr); 539 ptp->start.dma_addr);
534 MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_HI,
535 (u32)((u64)ptp->start.dma_addr >> 32));
536 540
537 /* Clear flag that signals MC ready */ 541 /* Clear flag that signals MC ready */
538 ACCESS_ONCE(*start) = 0; 542 ACCESS_ONCE(*start) = 0;
539 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, 543 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
540 MC_CMD_PTP_IN_SYNCHRONIZE_LEN); 544 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
545 EFX_BUG_ON_PARANOID(rc);
541 546
542 /* Wait for start from MCDI (or timeout) */ 547 /* Wait for start from MCDI (or timeout) */
543 timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); 548 timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
@@ -564,15 +569,15 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
564/* Transmit a PTP packet, via the MCDI interface, to the wire. */ 569/* Transmit a PTP packet, via the MCDI interface, to the wire. */
565static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb) 570static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
566{ 571{
567 u8 *txbuf = efx->ptp_data->txbuf; 572 struct efx_ptp_data *ptp_data = efx->ptp_data;
568 struct skb_shared_hwtstamps timestamps; 573 struct skb_shared_hwtstamps timestamps;
569 int rc = -EIO; 574 int rc = -EIO;
570 /* MCDI driver requires word aligned lengths */ 575 MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN);
571 size_t len = ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), 4); 576 size_t len;
572 u8 txtime[MC_CMD_PTP_OUT_TRANSMIT_LEN];
573 577
574 MCDI_SET_DWORD(txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT); 578 MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
575 MCDI_SET_DWORD(txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len); 579 MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_PERIPH_ID, 0);
580 MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
576 if (skb_shinfo(skb)->nr_frags != 0) { 581 if (skb_shinfo(skb)->nr_frags != 0) {
577 rc = skb_linearize(skb); 582 rc = skb_linearize(skb);
578 if (rc != 0) 583 if (rc != 0)
@@ -585,10 +590,12 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
585 goto fail; 590 goto fail;
586 } 591 }
587 skb_copy_from_linear_data(skb, 592 skb_copy_from_linear_data(skb,
588 &txbuf[MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST], 593 MCDI_PTR(ptp_data->txbuf,
589 len); 594 PTP_IN_TRANSMIT_PACKET),
590 rc = efx_mcdi_rpc(efx, MC_CMD_PTP, txbuf, len, txtime, 595 skb->len);
591 sizeof(txtime), &len); 596 rc = efx_mcdi_rpc(efx, MC_CMD_PTP,
597 ptp_data->txbuf, MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len),
598 txtime, sizeof(txtime), &len);
592 if (rc != 0) 599 if (rc != 0)
593 goto fail; 600 goto fail;
594 601
@@ -872,7 +879,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
872 if (!efx->ptp_data) 879 if (!efx->ptp_data)
873 return -ENOMEM; 880 return -ENOMEM;
874 881
875 rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int)); 882 rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
876 if (rc != 0) 883 if (rc != 0)
877 goto fail1; 884 goto fail1;
878 885
@@ -1359,7 +1366,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
1359 struct efx_ptp_data, 1366 struct efx_ptp_data,
1360 phc_clock_info); 1367 phc_clock_info);
1361 struct efx_nic *efx = ptp_data->channel->efx; 1368 struct efx_nic *efx = ptp_data->channel->efx;
1362 u8 inadj[MC_CMD_PTP_IN_ADJUST_LEN]; 1369 MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN);
1363 s64 adjustment_ns; 1370 s64 adjustment_ns;
1364 int rc; 1371 int rc;
1365 1372
@@ -1373,9 +1380,8 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
1373 (PPB_EXTRA_BITS + MAX_PPB_BITS)); 1380 (PPB_EXTRA_BITS + MAX_PPB_BITS));
1374 1381
1375 MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); 1382 MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
1376 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_LO, (u32)adjustment_ns); 1383 MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0);
1377 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_HI, 1384 MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns);
1378 (u32)(adjustment_ns >> 32));
1379 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0); 1385 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0);
1380 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0); 1386 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0);
1381 rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj), 1387 rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj),
@@ -1394,11 +1400,11 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
1394 phc_clock_info); 1400 phc_clock_info);
1395 struct efx_nic *efx = ptp_data->channel->efx; 1401 struct efx_nic *efx = ptp_data->channel->efx;
1396 struct timespec delta_ts = ns_to_timespec(delta); 1402 struct timespec delta_ts = ns_to_timespec(delta);
1397 u8 inbuf[MC_CMD_PTP_IN_ADJUST_LEN]; 1403 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN);
1398 1404
1399 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); 1405 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
1400 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_LO, 0); 1406 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
1401 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_HI, 0); 1407 MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0);
1402 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); 1408 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
1403 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); 1409 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
1404 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), 1410 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
@@ -1411,11 +1417,12 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
1411 struct efx_ptp_data, 1417 struct efx_ptp_data,
1412 phc_clock_info); 1418 phc_clock_info);
1413 struct efx_nic *efx = ptp_data->channel->efx; 1419 struct efx_nic *efx = ptp_data->channel->efx;
1414 u8 inbuf[MC_CMD_PTP_IN_READ_NIC_TIME_LEN]; 1420 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN);
1415 u8 outbuf[MC_CMD_PTP_OUT_READ_NIC_TIME_LEN]; 1421 MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN);
1416 int rc; 1422 int rc;
1417 1423
1418 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME); 1424 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
1425 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
1419 1426
1420 rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), 1427 rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
1421 outbuf, sizeof(outbuf), NULL); 1428 outbuf, sizeof(outbuf), NULL);
diff --git a/drivers/net/ethernet/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/qt202x_phy.c
index 326a28637f3c..efa3612affca 100644
--- a/drivers/net/ethernet/sfc/qt202x_phy.c
+++ b/drivers/net/ethernet/sfc/qt202x_phy.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2006-2010 Solarflare Communications Inc. 3 * Copyright 2006-2012 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 6af9cfda50fb..4a596725023f 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2011 Solarflare Communications Inc. 4 * Copyright 2005-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,7 @@
21#include <net/checksum.h> 21#include <net/checksum.h>
22#include "net_driver.h" 22#include "net_driver.h"
23#include "efx.h" 23#include "efx.h"
24#include "filter.h"
24#include "nic.h" 25#include "nic.h"
25#include "selftest.h" 26#include "selftest.h"
26#include "workarounds.h" 27#include "workarounds.h"
@@ -60,13 +61,12 @@ static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
60 return page_address(buf->page) + buf->page_offset; 61 return page_address(buf->page) + buf->page_offset;
61} 62}
62 63
63static inline u32 efx_rx_buf_hash(const u8 *eh) 64static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
64{ 65{
65 /* The ethernet header is always directly after any hash. */ 66#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
66#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 67 return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
67 return __le32_to_cpup((const __le32 *)(eh - 4));
68#else 68#else
69 const u8 *data = eh - 4; 69 const u8 *data = eh + efx->rx_packet_hash_offset;
70 return (u32)data[0] | 70 return (u32)data[0] |
71 (u32)data[1] << 8 | 71 (u32)data[1] << 8 |
72 (u32)data[2] << 16 | 72 (u32)data[2] << 16 |
@@ -326,6 +326,9 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
326 unsigned int fill_level, batch_size; 326 unsigned int fill_level, batch_size;
327 int space, rc = 0; 327 int space, rc = 0;
328 328
329 if (!rx_queue->refill_enabled)
330 return;
331
329 /* Calculate current fill level, and exit if we don't need to fill */ 332 /* Calculate current fill level, and exit if we don't need to fill */
330 fill_level = (rx_queue->added_count - rx_queue->removed_count); 333 fill_level = (rx_queue->added_count - rx_queue->removed_count);
331 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); 334 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
@@ -435,7 +438,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
435 } 438 }
436 439
437 if (efx->net_dev->features & NETIF_F_RXHASH) 440 if (efx->net_dev->features & NETIF_F_RXHASH)
438 skb->rxhash = efx_rx_buf_hash(eh); 441 skb->rxhash = efx_rx_buf_hash(efx, eh);
439 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? 442 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
440 CHECKSUM_UNNECESSARY : CHECKSUM_NONE); 443 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
441 444
@@ -523,10 +526,11 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
523 526
524 /* Validate the number of fragments and completed length */ 527 /* Validate the number of fragments and completed length */
525 if (n_frags == 1) { 528 if (n_frags == 1) {
526 efx_rx_packet__check_len(rx_queue, rx_buf, len); 529 if (!(flags & EFX_RX_PKT_PREFIX_LEN))
530 efx_rx_packet__check_len(rx_queue, rx_buf, len);
527 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || 531 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
528 unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) || 532 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
529 unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) || 533 unlikely(len > n_frags * efx->rx_dma_len) ||
530 unlikely(!efx->rx_scatter)) { 534 unlikely(!efx->rx_scatter)) {
531 /* If this isn't an explicit discard request, either 535 /* If this isn't an explicit discard request, either
532 * the hardware or the driver is broken. 536 * the hardware or the driver is broken.
@@ -551,7 +555,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
551 return; 555 return;
552 } 556 }
553 557
554 if (n_frags == 1) 558 if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
555 rx_buf->len = len; 559 rx_buf->len = len;
556 560
557 /* Release and/or sync the DMA mapping - assumes all RX buffers 561 /* Release and/or sync the DMA mapping - assumes all RX buffers
@@ -564,8 +568,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
564 */ 568 */
565 prefetch(efx_rx_buf_va(rx_buf)); 569 prefetch(efx_rx_buf_va(rx_buf));
566 570
567 rx_buf->page_offset += efx->type->rx_buffer_hash_size; 571 rx_buf->page_offset += efx->rx_prefix_size;
568 rx_buf->len -= efx->type->rx_buffer_hash_size; 572 rx_buf->len -= efx->rx_prefix_size;
569 573
570 if (n_frags > 1) { 574 if (n_frags > 1) {
571 /* Release/sync DMA mapping for additional fragments. 575 /* Release/sync DMA mapping for additional fragments.
@@ -577,9 +581,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
577 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 581 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
578 if (--tail_frags == 0) 582 if (--tail_frags == 0)
579 break; 583 break;
580 efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE); 584 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
581 } 585 }
582 rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE; 586 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
583 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); 587 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
584 } 588 }
585 589
@@ -630,6 +634,13 @@ void __efx_rx_packet(struct efx_channel *channel)
630 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); 634 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
631 u8 *eh = efx_rx_buf_va(rx_buf); 635 u8 *eh = efx_rx_buf_va(rx_buf);
632 636
637 /* Read length from the prefix if necessary. This already
638 * excludes the length of the prefix itself.
639 */
640 if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
641 rx_buf->len = le16_to_cpup((__le16 *)
642 (eh + efx->rx_packet_len_offset));
643
633 /* If we're in loopback test, then pass the packet directly to the 644 /* If we're in loopback test, then pass the packet directly to the
634 * loopback layer, and free the rx_buf here 645 * loopback layer, and free the rx_buf here
635 */ 646 */
@@ -738,9 +749,9 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
738 749
739 rx_queue->max_fill = max_fill; 750 rx_queue->max_fill = max_fill;
740 rx_queue->fast_fill_trigger = trigger; 751 rx_queue->fast_fill_trigger = trigger;
752 rx_queue->refill_enabled = true;
741 753
742 /* Set up RX descriptor ring */ 754 /* Set up RX descriptor ring */
743 rx_queue->enabled = true;
744 efx_nic_init_rx(rx_queue); 755 efx_nic_init_rx(rx_queue);
745} 756}
746 757
@@ -753,11 +764,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
753 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 764 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
754 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); 765 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
755 766
756 /* A flush failure might have left rx_queue->enabled */
757 rx_queue->enabled = false;
758
759 del_timer_sync(&rx_queue->slow_fill); 767 del_timer_sync(&rx_queue->slow_fill);
760 efx_nic_fini_rx(rx_queue);
761 768
762 /* Release RX buffers from the current read ptr to the write ptr */ 769 /* Release RX buffers from the current read ptr to the write ptr */
763 if (rx_queue->buffer) { 770 if (rx_queue->buffer) {
@@ -803,3 +810,130 @@ module_param(rx_refill_threshold, uint, 0444);
803MODULE_PARM_DESC(rx_refill_threshold, 810MODULE_PARM_DESC(rx_refill_threshold,
804 "RX descriptor ring refill threshold (%)"); 811 "RX descriptor ring refill threshold (%)");
805 812
813#ifdef CONFIG_RFS_ACCEL
814
815int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
816 u16 rxq_index, u32 flow_id)
817{
818 struct efx_nic *efx = netdev_priv(net_dev);
819 struct efx_channel *channel;
820 struct efx_filter_spec spec;
821 const struct iphdr *ip;
822 const __be16 *ports;
823 int nhoff;
824 int rc;
825
826 nhoff = skb_network_offset(skb);
827
828 if (skb->protocol == htons(ETH_P_8021Q)) {
829 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
830 nhoff + sizeof(struct vlan_hdr));
831 if (((const struct vlan_hdr *)skb->data + nhoff)->
832 h_vlan_encapsulated_proto != htons(ETH_P_IP))
833 return -EPROTONOSUPPORT;
834
835 /* This is IP over 802.1q VLAN. We can't filter on the
836 * IP 5-tuple and the vlan together, so just strip the
837 * vlan header and filter on the IP part.
838 */
839 nhoff += sizeof(struct vlan_hdr);
840 } else if (skb->protocol != htons(ETH_P_IP)) {
841 return -EPROTONOSUPPORT;
842 }
843
844 /* RFS must validate the IP header length before calling us */
845 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
846 ip = (const struct iphdr *)(skb->data + nhoff);
847 if (ip_is_fragment(ip))
848 return -EPROTONOSUPPORT;
849 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
850 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
851
852 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
853 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
854 rxq_index);
855 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
856 ip->daddr, ports[1], ip->saddr, ports[0]);
857 if (rc)
858 return rc;
859
860 rc = efx->type->filter_rfs_insert(efx, &spec);
861 if (rc < 0)
862 return rc;
863
864 /* Remember this so we can check whether to expire the filter later */
865 efx->rps_flow_id[rc] = flow_id;
866 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
867 ++channel->rfs_filters_added;
868
869 netif_info(efx, rx_status, efx->net_dev,
870 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
871 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
872 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
873 rxq_index, flow_id, rc);
874
875 return rc;
876}
877
878bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
879{
880 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
881 unsigned int index, size;
882 u32 flow_id;
883
884 if (!spin_trylock_bh(&efx->filter_lock))
885 return false;
886
887 expire_one = efx->type->filter_rfs_expire_one;
888 index = efx->rps_expire_index;
889 size = efx->type->max_rx_ip_filters;
890 while (quota--) {
891 flow_id = efx->rps_flow_id[index];
892 if (expire_one(efx, flow_id, index))
893 netif_info(efx, rx_status, efx->net_dev,
894 "expired filter %d [flow %u]\n",
895 index, flow_id);
896 if (++index == size)
897 index = 0;
898 }
899 efx->rps_expire_index = index;
900
901 spin_unlock_bh(&efx->filter_lock);
902 return true;
903}
904
905#endif /* CONFIG_RFS_ACCEL */
906
907/**
908 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
909 * @spec: Specification to test
910 *
911 * Return: %true if the specification is a non-drop RX filter that
912 * matches a local MAC address I/G bit value of 1 or matches a local
913 * IPv4 or IPv6 address value in the respective multicast address
914 * range. Otherwise %false.
915 */
916bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
917{
918 if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
919 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
920 return false;
921
922 if (spec->match_flags &
923 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
924 is_multicast_ether_addr(spec->loc_mac))
925 return true;
926
927 if ((spec->match_flags &
928 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
929 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
930 if (spec->ether_type == htons(ETH_P_IP) &&
931 ipv4_is_multicast(spec->loc_host[0]))
932 return true;
933 if (spec->ether_type == htons(ETH_P_IPV6) &&
934 ((const u8 *)spec->loc_host)[0] == 0xff)
935 return true;
936 }
937
938 return false;
939}
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 2069f51b2aa9..144bbff5a4ae 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2012 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -447,14 +447,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
447static int efx_poll_loopback(struct efx_nic *efx) 447static int efx_poll_loopback(struct efx_nic *efx)
448{ 448{
449 struct efx_loopback_state *state = efx->loopback_selftest; 449 struct efx_loopback_state *state = efx->loopback_selftest;
450 struct efx_channel *channel;
451 450
452 /* NAPI polling is not enabled, so process channels
453 * synchronously */
454 efx_for_each_channel(channel, efx) {
455 if (channel->work_pending)
456 efx_process_channel_now(channel);
457 }
458 return atomic_read(&state->rx_good) == state->packet_count; 451 return atomic_read(&state->rx_good) == state->packet_count;
459} 452}
460 453
@@ -586,10 +579,6 @@ static int efx_wait_for_link(struct efx_nic *efx)
586 mutex_lock(&efx->mac_lock); 579 mutex_lock(&efx->mac_lock);
587 efx->type->monitor(efx); 580 efx->type->monitor(efx);
588 mutex_unlock(&efx->mac_lock); 581 mutex_unlock(&efx->mac_lock);
589 } else {
590 struct efx_channel *channel = efx_get_channel(efx, 0);
591 if (channel->work_pending)
592 efx_process_channel_now(channel);
593 } 582 }
594 583
595 mutex_lock(&efx->mac_lock); 584 mutex_lock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index aed24b736059..87698ae0bf75 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2012 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 8c91775e3c5f..d034bcd124ef 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -18,8 +18,7 @@
18#include "bitfield.h" 18#include "bitfield.h"
19#include "efx.h" 19#include "efx.h"
20#include "nic.h" 20#include "nic.h"
21#include "spi.h" 21#include "farch_regs.h"
22#include "regs.h"
23#include "io.h" 22#include "io.h"
24#include "phy.h" 23#include "phy.h"
25#include "workarounds.h" 24#include "workarounds.h"
@@ -30,7 +29,6 @@
30/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ 29/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
31 30
32static void siena_init_wol(struct efx_nic *efx); 31static void siena_init_wol(struct efx_nic *efx);
33static int siena_reset_hw(struct efx_nic *efx, enum reset_type method);
34 32
35 33
36static void siena_push_irq_moderation(struct efx_channel *channel) 34static void siena_push_irq_moderation(struct efx_channel *channel)
@@ -52,81 +50,6 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
52 channel->channel); 50 channel->channel);
53} 51}
54 52
55static int siena_mdio_write(struct net_device *net_dev,
56 int prtad, int devad, u16 addr, u16 value)
57{
58 struct efx_nic *efx = netdev_priv(net_dev);
59 uint32_t status;
60 int rc;
61
62 rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad,
63 addr, value, &status);
64 if (rc)
65 return rc;
66 if (status != MC_CMD_MDIO_STATUS_GOOD)
67 return -EIO;
68
69 return 0;
70}
71
72static int siena_mdio_read(struct net_device *net_dev,
73 int prtad, int devad, u16 addr)
74{
75 struct efx_nic *efx = netdev_priv(net_dev);
76 uint16_t value;
77 uint32_t status;
78 int rc;
79
80 rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad,
81 addr, &value, &status);
82 if (rc)
83 return rc;
84 if (status != MC_CMD_MDIO_STATUS_GOOD)
85 return -EIO;
86
87 return (int)value;
88}
89
90/* This call is responsible for hooking in the MAC and PHY operations */
91static int siena_probe_port(struct efx_nic *efx)
92{
93 int rc;
94
95 /* Hook in PHY operations table */
96 efx->phy_op = &efx_mcdi_phy_ops;
97
98 /* Set up MDIO structure for PHY */
99 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
100 efx->mdio.mdio_read = siena_mdio_read;
101 efx->mdio.mdio_write = siena_mdio_write;
102
103 /* Fill out MDIO structure, loopback modes, and initial link state */
104 rc = efx->phy_op->probe(efx);
105 if (rc != 0)
106 return rc;
107
108 /* Allocate buffer for stats */
109 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
110 MC_CMD_MAC_NSTATS * sizeof(u64));
111 if (rc)
112 return rc;
113 netif_dbg(efx, probe, efx->net_dev,
114 "stats buffer at %llx (virt %p phys %llx)\n",
115 (u64)efx->stats_buffer.dma_addr,
116 efx->stats_buffer.addr,
117 (u64)virt_to_phys(efx->stats_buffer.addr));
118
119 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
120
121 return 0;
122}
123
124static void siena_remove_port(struct efx_nic *efx)
125{
126 efx->phy_op->remove(efx);
127 efx_nic_free_buffer(efx, &efx->stats_buffer);
128}
129
130void siena_prepare_flush(struct efx_nic *efx) 53void siena_prepare_flush(struct efx_nic *efx)
131{ 54{
132 if (efx->fc_disable++ == 0) 55 if (efx->fc_disable++ == 0)
@@ -139,7 +62,7 @@ void siena_finish_flush(struct efx_nic *efx)
139 efx_mcdi_set_mac(efx); 62 efx_mcdi_set_mac(efx);
140} 63}
141 64
142static const struct efx_nic_register_test siena_register_tests[] = { 65static const struct efx_farch_register_test siena_register_tests[] = {
143 { FR_AZ_ADR_REGION, 66 { FR_AZ_ADR_REGION,
144 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, 67 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
145 { FR_CZ_USR_EV_CFG, 68 { FR_CZ_USR_EV_CFG,
@@ -178,16 +101,16 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
178 /* Reset the chip immediately so that it is completely 101 /* Reset the chip immediately so that it is completely
179 * quiescent regardless of what any VF driver does. 102 * quiescent regardless of what any VF driver does.
180 */ 103 */
181 rc = siena_reset_hw(efx, reset_method); 104 rc = efx_mcdi_reset(efx, reset_method);
182 if (rc) 105 if (rc)
183 goto out; 106 goto out;
184 107
185 tests->registers = 108 tests->registers =
186 efx_nic_test_registers(efx, siena_register_tests, 109 efx_farch_test_registers(efx, siena_register_tests,
187 ARRAY_SIZE(siena_register_tests)) 110 ARRAY_SIZE(siena_register_tests))
188 ? -1 : 1; 111 ? -1 : 1;
189 112
190 rc = siena_reset_hw(efx, reset_method); 113 rc = efx_mcdi_reset(efx, reset_method);
191out: 114out:
192 rc2 = efx_reset_up(efx, reset_method, rc == 0); 115 rc2 = efx_reset_up(efx, reset_method, rc == 0);
193 return rc ? rc : rc2; 116 return rc ? rc : rc2;
@@ -200,11 +123,6 @@ out:
200 ************************************************************************** 123 **************************************************************************
201 */ 124 */
202 125
203static enum reset_type siena_map_reset_reason(enum reset_type reason)
204{
205 return RESET_TYPE_RECOVER_OR_ALL;
206}
207
208static int siena_map_reset_flags(u32 *flags) 126static int siena_map_reset_flags(u32 *flags)
209{ 127{
210 enum { 128 enum {
@@ -230,21 +148,6 @@ static int siena_map_reset_flags(u32 *flags)
230 return -EINVAL; 148 return -EINVAL;
231} 149}
232 150
233static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
234{
235 int rc;
236
237 /* Recover from a failed assertion pre-reset */
238 rc = efx_mcdi_handle_assertion(efx);
239 if (rc)
240 return rc;
241
242 if (method == RESET_TYPE_WORLD)
243 return efx_mcdi_reset_mc(efx);
244 else
245 return efx_mcdi_reset_port(efx);
246}
247
248#ifdef CONFIG_EEH 151#ifdef CONFIG_EEH
249/* When a PCI device is isolated from the bus, a subsequent MMIO read is 152/* When a PCI device is isolated from the bus, a subsequent MMIO read is
250 * required for the kernel EEH mechanisms to notice. As the Solarflare driver 153 * required for the kernel EEH mechanisms to notice. As the Solarflare driver
@@ -274,19 +177,25 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
274 return rc; 177 return rc;
275} 178}
276 179
277static void siena_dimension_resources(struct efx_nic *efx) 180static int siena_dimension_resources(struct efx_nic *efx)
278{ 181{
279 /* Each port has a small block of internal SRAM dedicated to 182 /* Each port has a small block of internal SRAM dedicated to
280 * the buffer table and descriptor caches. In theory we can 183 * the buffer table and descriptor caches. In theory we can
281 * map both blocks to one port, but we don't. 184 * map both blocks to one port, but we don't.
282 */ 185 */
283 efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2); 186 efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
187 return 0;
188}
189
190static unsigned int siena_mem_map_size(struct efx_nic *efx)
191{
192 return FR_CZ_MC_TREG_SMEM +
193 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS;
284} 194}
285 195
286static int siena_probe_nic(struct efx_nic *efx) 196static int siena_probe_nic(struct efx_nic *efx)
287{ 197{
288 struct siena_nic_data *nic_data; 198 struct siena_nic_data *nic_data;
289 bool already_attached = false;
290 efx_oword_t reg; 199 efx_oword_t reg;
291 int rc; 200 int rc;
292 201
@@ -296,38 +205,24 @@ static int siena_probe_nic(struct efx_nic *efx)
296 return -ENOMEM; 205 return -ENOMEM;
297 efx->nic_data = nic_data; 206 efx->nic_data = nic_data;
298 207
299 if (efx_nic_fpga_ver(efx) != 0) { 208 if (efx_farch_fpga_ver(efx) != 0) {
300 netif_err(efx, probe, efx->net_dev, 209 netif_err(efx, probe, efx->net_dev,
301 "Siena FPGA not supported\n"); 210 "Siena FPGA not supported\n");
302 rc = -ENODEV; 211 rc = -ENODEV;
303 goto fail1; 212 goto fail1;
304 } 213 }
305 214
215 efx->max_channels = EFX_MAX_CHANNELS;
216
306 efx_reado(efx, &reg, FR_AZ_CS_DEBUG); 217 efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
307 efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; 218 efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
308 219
309 efx_mcdi_init(efx); 220 rc = efx_mcdi_init(efx);
310
311 /* Recover from a failed assertion before probing */
312 rc = efx_mcdi_handle_assertion(efx);
313 if (rc) 221 if (rc)
314 goto fail1; 222 goto fail1;
315 223
316 /* Let the BMC know that the driver is now in charge of link and
317 * filter settings. We must do this before we reset the NIC */
318 rc = efx_mcdi_drv_attach(efx, true, &already_attached);
319 if (rc) {
320 netif_err(efx, probe, efx->net_dev,
321 "Unable to register driver with MCPU\n");
322 goto fail2;
323 }
324 if (already_attached)
325 /* Not a fatal error */
326 netif_err(efx, probe, efx->net_dev,
327 "Host already registered with MCPU\n");
328
329 /* Now we can reset the NIC */ 224 /* Now we can reset the NIC */
330 rc = siena_reset_hw(efx, RESET_TYPE_ALL); 225 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
331 if (rc) { 226 if (rc) {
332 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); 227 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
333 goto fail3; 228 goto fail3;
@@ -336,7 +231,8 @@ static int siena_probe_nic(struct efx_nic *efx)
336 siena_init_wol(efx); 231 siena_init_wol(efx);
337 232
338 /* Allocate memory for INT_KER */ 233 /* Allocate memory for INT_KER */
339 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); 234 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
235 GFP_KERNEL);
340 if (rc) 236 if (rc)
341 goto fail4; 237 goto fail4;
342 BUG_ON(efx->irq_status.dma_addr & 0x0f); 238 BUG_ON(efx->irq_status.dma_addr & 0x0f);
@@ -371,8 +267,7 @@ fail5:
371 efx_nic_free_buffer(efx, &efx->irq_status); 267 efx_nic_free_buffer(efx, &efx->irq_status);
372fail4: 268fail4:
373fail3: 269fail3:
374 efx_mcdi_drv_attach(efx, false, NULL); 270 efx_mcdi_fini(efx);
375fail2:
376fail1: 271fail1:
377 kfree(efx->nic_data); 272 kfree(efx->nic_data);
378 return rc; 273 return rc;
@@ -448,7 +343,7 @@ static int siena_init_nic(struct efx_nic *efx)
448 EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); 343 EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
449 efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); 344 efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
450 345
451 efx_nic_init_common(efx); 346 efx_farch_init_common(efx);
452 return 0; 347 return 0;
453} 348}
454 349
@@ -458,144 +353,192 @@ static void siena_remove_nic(struct efx_nic *efx)
458 353
459 efx_nic_free_buffer(efx, &efx->irq_status); 354 efx_nic_free_buffer(efx, &efx->irq_status);
460 355
461 siena_reset_hw(efx, RESET_TYPE_ALL); 356 efx_mcdi_reset(efx, RESET_TYPE_ALL);
462 357
463 /* Relinquish the device back to the BMC */ 358 efx_mcdi_fini(efx);
464 efx_mcdi_drv_attach(efx, false, NULL);
465 359
466 /* Tear down the private nic state */ 360 /* Tear down the private nic state */
467 kfree(efx->nic_data); 361 kfree(efx->nic_data);
468 efx->nic_data = NULL; 362 efx->nic_data = NULL;
469} 363}
470 364
471#define STATS_GENERATION_INVALID ((__force __le64)(-1)) 365#define SIENA_DMA_STAT(ext_name, mcdi_name) \
366 [SIENA_STAT_ ## ext_name] = \
367 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
368#define SIENA_OTHER_STAT(ext_name) \
369 [SIENA_STAT_ ## ext_name] = { #ext_name, 0, 0 }
370
371static const struct efx_hw_stat_desc siena_stat_desc[SIENA_STAT_COUNT] = {
372 SIENA_DMA_STAT(tx_bytes, TX_BYTES),
373 SIENA_OTHER_STAT(tx_good_bytes),
374 SIENA_DMA_STAT(tx_bad_bytes, TX_BAD_BYTES),
375 SIENA_DMA_STAT(tx_packets, TX_PKTS),
376 SIENA_DMA_STAT(tx_bad, TX_BAD_FCS_PKTS),
377 SIENA_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
378 SIENA_DMA_STAT(tx_control, TX_CONTROL_PKTS),
379 SIENA_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
380 SIENA_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
381 SIENA_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
382 SIENA_DMA_STAT(tx_lt64, TX_LT64_PKTS),
383 SIENA_DMA_STAT(tx_64, TX_64_PKTS),
384 SIENA_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
385 SIENA_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
386 SIENA_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
387 SIENA_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
388 SIENA_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
389 SIENA_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
390 SIENA_DMA_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS),
391 SIENA_OTHER_STAT(tx_collision),
392 SIENA_DMA_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS),
393 SIENA_DMA_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS),
394 SIENA_DMA_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS),
395 SIENA_DMA_STAT(tx_deferred, TX_DEFERRED_PKTS),
396 SIENA_DMA_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS),
397 SIENA_DMA_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS),
398 SIENA_DMA_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS),
399 SIENA_DMA_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS),
400 SIENA_DMA_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS),
401 SIENA_DMA_STAT(rx_bytes, RX_BYTES),
402 SIENA_OTHER_STAT(rx_good_bytes),
403 SIENA_DMA_STAT(rx_bad_bytes, RX_BAD_BYTES),
404 SIENA_DMA_STAT(rx_packets, RX_PKTS),
405 SIENA_DMA_STAT(rx_good, RX_GOOD_PKTS),
406 SIENA_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
407 SIENA_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
408 SIENA_DMA_STAT(rx_control, RX_CONTROL_PKTS),
409 SIENA_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
410 SIENA_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
411 SIENA_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
412 SIENA_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
413 SIENA_DMA_STAT(rx_64, RX_64_PKTS),
414 SIENA_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
415 SIENA_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
416 SIENA_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
417 SIENA_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
418 SIENA_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
419 SIENA_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
420 SIENA_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
421 SIENA_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
422 SIENA_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
423 SIENA_DMA_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS),
424 SIENA_DMA_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS),
425 SIENA_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
426 SIENA_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
427 SIENA_DMA_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS),
428 SIENA_DMA_STAT(rx_nodesc_drop_cnt, RX_NODESC_DROPS),
429};
430static const unsigned long siena_stat_mask[] = {
431 [0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL,
432};
433
434static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names)
435{
436 return efx_nic_describe_stats(siena_stat_desc, SIENA_STAT_COUNT,
437 siena_stat_mask, names);
438}
472 439
473static int siena_try_update_nic_stats(struct efx_nic *efx) 440static int siena_try_update_nic_stats(struct efx_nic *efx)
474{ 441{
442 struct siena_nic_data *nic_data = efx->nic_data;
443 u64 *stats = nic_data->stats;
475 __le64 *dma_stats; 444 __le64 *dma_stats;
476 struct efx_mac_stats *mac_stats;
477 __le64 generation_start, generation_end; 445 __le64 generation_start, generation_end;
478 446
479 mac_stats = &efx->mac_stats;
480 dma_stats = efx->stats_buffer.addr; 447 dma_stats = efx->stats_buffer.addr;
481 448
482 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; 449 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
483 if (generation_end == STATS_GENERATION_INVALID) 450 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
484 return 0; 451 return 0;
485 rmb(); 452 rmb();
486 453 efx_nic_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask,
487#define MAC_STAT(M, D) \ 454 stats, efx->stats_buffer.addr, false);
488 mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D])
489
490 MAC_STAT(tx_bytes, TX_BYTES);
491 MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
492 efx_update_diff_stat(&mac_stats->tx_good_bytes,
493 mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
494 MAC_STAT(tx_packets, TX_PKTS);
495 MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
496 MAC_STAT(tx_pause, TX_PAUSE_PKTS);
497 MAC_STAT(tx_control, TX_CONTROL_PKTS);
498 MAC_STAT(tx_unicast, TX_UNICAST_PKTS);
499 MAC_STAT(tx_multicast, TX_MULTICAST_PKTS);
500 MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS);
501 MAC_STAT(tx_lt64, TX_LT64_PKTS);
502 MAC_STAT(tx_64, TX_64_PKTS);
503 MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS);
504 MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS);
505 MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS);
506 MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS);
507 MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS);
508 MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS);
509 MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS);
510 mac_stats->tx_collision = 0;
511 MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS);
512 MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS);
513 MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS);
514 MAC_STAT(tx_deferred, TX_DEFERRED_PKTS);
515 MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS);
516 mac_stats->tx_collision = (mac_stats->tx_single_collision +
517 mac_stats->tx_multiple_collision +
518 mac_stats->tx_excessive_collision +
519 mac_stats->tx_late_collision);
520 MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS);
521 MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS);
522 MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS);
523 MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
524 MAC_STAT(rx_bytes, RX_BYTES);
525 MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
526 efx_update_diff_stat(&mac_stats->rx_good_bytes,
527 mac_stats->rx_bytes - mac_stats->rx_bad_bytes);
528 MAC_STAT(rx_packets, RX_PKTS);
529 MAC_STAT(rx_good, RX_GOOD_PKTS);
530 MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
531 MAC_STAT(rx_pause, RX_PAUSE_PKTS);
532 MAC_STAT(rx_control, RX_CONTROL_PKTS);
533 MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
534 MAC_STAT(rx_multicast, RX_MULTICAST_PKTS);
535 MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS);
536 MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS);
537 MAC_STAT(rx_64, RX_64_PKTS);
538 MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS);
539 MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS);
540 MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS);
541 MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS);
542 MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS);
543 MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS);
544 MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS);
545 mac_stats->rx_bad_lt64 = 0;
546 mac_stats->rx_bad_64_to_15xx = 0;
547 mac_stats->rx_bad_15xx_to_jumbo = 0;
548 MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS);
549 MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS);
550 mac_stats->rx_missed = 0;
551 MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS);
552 MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS);
553 MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS);
554 MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS);
555 MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
556 mac_stats->rx_good_lt64 = 0;
557
558 efx->n_rx_nodesc_drop_cnt =
559 le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]);
560
561#undef MAC_STAT
562
563 rmb(); 455 rmb();
564 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; 456 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
565 if (generation_end != generation_start) 457 if (generation_end != generation_start)
566 return -EAGAIN; 458 return -EAGAIN;
567 459
460 /* Update derived statistics */
461 efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes],
462 stats[SIENA_STAT_tx_bytes] -
463 stats[SIENA_STAT_tx_bad_bytes]);
464 stats[SIENA_STAT_tx_collision] =
465 stats[SIENA_STAT_tx_single_collision] +
466 stats[SIENA_STAT_tx_multiple_collision] +
467 stats[SIENA_STAT_tx_excessive_collision] +
468 stats[SIENA_STAT_tx_late_collision];
469 efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes],
470 stats[SIENA_STAT_rx_bytes] -
471 stats[SIENA_STAT_rx_bad_bytes]);
568 return 0; 472 return 0;
569} 473}
570 474
571static void siena_update_nic_stats(struct efx_nic *efx) 475static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
476 struct rtnl_link_stats64 *core_stats)
572{ 477{
478 struct siena_nic_data *nic_data = efx->nic_data;
479 u64 *stats = nic_data->stats;
573 int retry; 480 int retry;
574 481
575 /* If we're unlucky enough to read statistics wduring the DMA, wait 482 /* If we're unlucky enough to read statistics wduring the DMA, wait
576 * up to 10ms for it to finish (typically takes <500us) */ 483 * up to 10ms for it to finish (typically takes <500us) */
577 for (retry = 0; retry < 100; ++retry) { 484 for (retry = 0; retry < 100; ++retry) {
578 if (siena_try_update_nic_stats(efx) == 0) 485 if (siena_try_update_nic_stats(efx) == 0)
579 return; 486 break;
580 udelay(100); 487 udelay(100);
581 } 488 }
582 489
583 /* Use the old values instead */ 490 if (full_stats)
491 memcpy(full_stats, stats, sizeof(u64) * SIENA_STAT_COUNT);
492
493 if (core_stats) {
494 core_stats->rx_packets = stats[SIENA_STAT_rx_packets];
495 core_stats->tx_packets = stats[SIENA_STAT_tx_packets];
496 core_stats->rx_bytes = stats[SIENA_STAT_rx_bytes];
497 core_stats->tx_bytes = stats[SIENA_STAT_tx_bytes];
498 core_stats->rx_dropped = stats[SIENA_STAT_rx_nodesc_drop_cnt];
499 core_stats->multicast = stats[SIENA_STAT_rx_multicast];
500 core_stats->collisions = stats[SIENA_STAT_tx_collision];
501 core_stats->rx_length_errors =
502 stats[SIENA_STAT_rx_gtjumbo] +
503 stats[SIENA_STAT_rx_length_error];
504 core_stats->rx_crc_errors = stats[SIENA_STAT_rx_bad];
505 core_stats->rx_frame_errors = stats[SIENA_STAT_rx_align_error];
506 core_stats->rx_fifo_errors = stats[SIENA_STAT_rx_overflow];
507 core_stats->tx_window_errors =
508 stats[SIENA_STAT_tx_late_collision];
509
510 core_stats->rx_errors = (core_stats->rx_length_errors +
511 core_stats->rx_crc_errors +
512 core_stats->rx_frame_errors +
513 stats[SIENA_STAT_rx_symbol_error]);
514 core_stats->tx_errors = (core_stats->tx_window_errors +
515 stats[SIENA_STAT_tx_bad]);
516 }
517
518 return SIENA_STAT_COUNT;
584} 519}
585 520
586static void siena_start_nic_stats(struct efx_nic *efx) 521static int siena_mac_reconfigure(struct efx_nic *efx)
587{ 522{
588 __le64 *dma_stats = efx->stats_buffer.addr; 523 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN);
524 int rc;
589 525
590 dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID; 526 BUILD_BUG_ON(MC_CMD_SET_MCAST_HASH_IN_LEN !=
527 MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST +
528 sizeof(efx->multicast_hash));
591 529
592 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 530 efx_farch_filter_sync_rx_mode(efx);
593 MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
594}
595 531
596static void siena_stop_nic_stats(struct efx_nic *efx) 532 WARN_ON(!mutex_is_locked(&efx->mac_lock));
597{ 533
598 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); 534 rc = efx_mcdi_set_mac(efx);
535 if (rc != 0)
536 return rc;
537
538 memcpy(MCDI_PTR(inbuf, SET_MCAST_HASH_IN_HASH0),
539 efx->multicast_hash.byte, sizeof(efx->multicast_hash));
540 return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
541 inbuf, sizeof(inbuf), NULL, 0, NULL);
599} 542}
600 543
601/************************************************************************** 544/**************************************************************************
@@ -669,6 +612,241 @@ static void siena_init_wol(struct efx_nic *efx)
669 } 612 }
670} 613}
671 614
615/**************************************************************************
616 *
617 * MCDI
618 *
619 **************************************************************************
620 */
621
622#define MCDI_PDU(efx) \
623 (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
624#define MCDI_DOORBELL(efx) \
625 (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
626#define MCDI_STATUS(efx) \
627 (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
628
629static void siena_mcdi_request(struct efx_nic *efx,
630 const efx_dword_t *hdr, size_t hdr_len,
631 const efx_dword_t *sdu, size_t sdu_len)
632{
633 unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
634 unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
635 unsigned int i;
636 unsigned int inlen_dw = DIV_ROUND_UP(sdu_len, 4);
637
638 EFX_BUG_ON_PARANOID(hdr_len != 4);
639
640 efx_writed(efx, hdr, pdu);
641
642 for (i = 0; i < inlen_dw; i++)
643 efx_writed(efx, &sdu[i], pdu + hdr_len + 4 * i);
644
645 /* Ensure the request is written out before the doorbell */
646 wmb();
647
648 /* ring the doorbell with a distinctive value */
649 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
650}
651
652static bool siena_mcdi_poll_response(struct efx_nic *efx)
653{
654 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
655 efx_dword_t hdr;
656
657 efx_readd(efx, &hdr, pdu);
658
659 /* All 1's indicates that shared memory is in reset (and is
660 * not a valid hdr). Wait for it to come out reset before
661 * completing the command
662 */
663 return EFX_DWORD_FIELD(hdr, EFX_DWORD_0) != 0xffffffff &&
664 EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
665}
666
667static void siena_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
668 size_t offset, size_t outlen)
669{
670 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
671 unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4);
672 int i;
673
674 for (i = 0; i < outlen_dw; i++)
675 efx_readd(efx, &outbuf[i], pdu + offset + 4 * i);
676}
677
678static int siena_mcdi_poll_reboot(struct efx_nic *efx)
679{
680 struct siena_nic_data *nic_data = efx->nic_data;
681 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
682 efx_dword_t reg;
683 u32 value;
684
685 efx_readd(efx, &reg, addr);
686 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
687
688 if (value == 0)
689 return 0;
690
691 EFX_ZERO_DWORD(reg);
692 efx_writed(efx, &reg, addr);
693
694 /* MAC statistics have been cleared on the NIC; clear the local
695 * copies that we update with efx_update_diff_stat().
696 */
697 nic_data->stats[SIENA_STAT_tx_good_bytes] = 0;
698 nic_data->stats[SIENA_STAT_rx_good_bytes] = 0;
699
700 if (value == MC_STATUS_DWORD_ASSERT)
701 return -EINTR;
702 else
703 return -EIO;
704}
705
706/**************************************************************************
707 *
708 * MTD
709 *
710 **************************************************************************
711 */
712
713#ifdef CONFIG_SFC_MTD
714
715struct siena_nvram_type_info {
716 int port;
717 const char *name;
718};
719
720static const struct siena_nvram_type_info siena_nvram_types[] = {
721 [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
722 [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
723 [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
724 [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
725 [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
726 [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
727 [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
728 [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
729 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
730 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
731 [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
732 [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
733 [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
734};
735
736static int siena_mtd_probe_partition(struct efx_nic *efx,
737 struct efx_mcdi_mtd_partition *part,
738 unsigned int type)
739{
740 const struct siena_nvram_type_info *info;
741 size_t size, erase_size;
742 bool protected;
743 int rc;
744
745 if (type >= ARRAY_SIZE(siena_nvram_types) ||
746 siena_nvram_types[type].name == NULL)
747 return -ENODEV;
748
749 info = &siena_nvram_types[type];
750
751 if (info->port != efx_port_num(efx))
752 return -ENODEV;
753
754 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
755 if (rc)
756 return rc;
757 if (protected)
758 return -ENODEV; /* hide it */
759
760 part->nvram_type = type;
761 part->common.dev_type_name = "Siena NVRAM manager";
762 part->common.type_name = info->name;
763
764 part->common.mtd.type = MTD_NORFLASH;
765 part->common.mtd.flags = MTD_CAP_NORFLASH;
766 part->common.mtd.size = size;
767 part->common.mtd.erasesize = erase_size;
768
769 return 0;
770}
771
772static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
773 struct efx_mcdi_mtd_partition *parts,
774 size_t n_parts)
775{
776 uint16_t fw_subtype_list[
777 MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
778 size_t i;
779 int rc;
780
781 rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
782 if (rc)
783 return rc;
784
785 for (i = 0; i < n_parts; i++)
786 parts[i].fw_subtype = fw_subtype_list[parts[i].nvram_type];
787
788 return 0;
789}
790
791static int siena_mtd_probe(struct efx_nic *efx)
792{
793 struct efx_mcdi_mtd_partition *parts;
794 u32 nvram_types;
795 unsigned int type;
796 size_t n_parts;
797 int rc;
798
799 ASSERT_RTNL();
800
801 rc = efx_mcdi_nvram_types(efx, &nvram_types);
802 if (rc)
803 return rc;
804
805 parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL);
806 if (!parts)
807 return -ENOMEM;
808
809 type = 0;
810 n_parts = 0;
811
812 while (nvram_types != 0) {
813 if (nvram_types & 1) {
814 rc = siena_mtd_probe_partition(efx, &parts[n_parts],
815 type);
816 if (rc == 0)
817 n_parts++;
818 else if (rc != -ENODEV)
819 goto fail;
820 }
821 type++;
822 nvram_types >>= 1;
823 }
824
825 rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts);
826 if (rc)
827 goto fail;
828
829 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
830fail:
831 if (rc)
832 kfree(parts);
833 return rc;
834}
835
836#endif /* CONFIG_SFC_MTD */
837
838/**************************************************************************
839 *
840 * PTP
841 *
842 **************************************************************************
843 */
844
845static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
846{
847 _efx_writed(efx, cpu_to_le32(host_time),
848 FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
849}
672 850
673/************************************************************************** 851/**************************************************************************
674 * 852 *
@@ -678,6 +856,7 @@ static void siena_init_wol(struct efx_nic *efx)
678 */ 856 */
679 857
680const struct efx_nic_type siena_a0_nic_type = { 858const struct efx_nic_type siena_a0_nic_type = {
859 .mem_map_size = siena_mem_map_size,
681 .probe = siena_probe_nic, 860 .probe = siena_probe_nic,
682 .remove = siena_remove_nic, 861 .remove = siena_remove_nic,
683 .init = siena_init_nic, 862 .init = siena_init_nic,
@@ -688,44 +867,94 @@ const struct efx_nic_type siena_a0_nic_type = {
688#else 867#else
689 .monitor = NULL, 868 .monitor = NULL,
690#endif 869#endif
691 .map_reset_reason = siena_map_reset_reason, 870 .map_reset_reason = efx_mcdi_map_reset_reason,
692 .map_reset_flags = siena_map_reset_flags, 871 .map_reset_flags = siena_map_reset_flags,
693 .reset = siena_reset_hw, 872 .reset = efx_mcdi_reset,
694 .probe_port = siena_probe_port, 873 .probe_port = efx_mcdi_port_probe,
695 .remove_port = siena_remove_port, 874 .remove_port = efx_mcdi_port_remove,
875 .fini_dmaq = efx_farch_fini_dmaq,
696 .prepare_flush = siena_prepare_flush, 876 .prepare_flush = siena_prepare_flush,
697 .finish_flush = siena_finish_flush, 877 .finish_flush = siena_finish_flush,
878 .describe_stats = siena_describe_nic_stats,
698 .update_stats = siena_update_nic_stats, 879 .update_stats = siena_update_nic_stats,
699 .start_stats = siena_start_nic_stats, 880 .start_stats = efx_mcdi_mac_start_stats,
700 .stop_stats = siena_stop_nic_stats, 881 .stop_stats = efx_mcdi_mac_stop_stats,
701 .set_id_led = efx_mcdi_set_id_led, 882 .set_id_led = efx_mcdi_set_id_led,
702 .push_irq_moderation = siena_push_irq_moderation, 883 .push_irq_moderation = siena_push_irq_moderation,
703 .reconfigure_mac = efx_mcdi_mac_reconfigure, 884 .reconfigure_mac = siena_mac_reconfigure,
704 .check_mac_fault = efx_mcdi_mac_check_fault, 885 .check_mac_fault = efx_mcdi_mac_check_fault,
705 .reconfigure_port = efx_mcdi_phy_reconfigure, 886 .reconfigure_port = efx_mcdi_port_reconfigure,
706 .get_wol = siena_get_wol, 887 .get_wol = siena_get_wol,
707 .set_wol = siena_set_wol, 888 .set_wol = siena_set_wol,
708 .resume_wol = siena_init_wol, 889 .resume_wol = siena_init_wol,
709 .test_chip = siena_test_chip, 890 .test_chip = siena_test_chip,
710 .test_nvram = efx_mcdi_nvram_test_all, 891 .test_nvram = efx_mcdi_nvram_test_all,
892 .mcdi_request = siena_mcdi_request,
893 .mcdi_poll_response = siena_mcdi_poll_response,
894 .mcdi_read_response = siena_mcdi_read_response,
895 .mcdi_poll_reboot = siena_mcdi_poll_reboot,
896 .irq_enable_master = efx_farch_irq_enable_master,
897 .irq_test_generate = efx_farch_irq_test_generate,
898 .irq_disable_non_ev = efx_farch_irq_disable_master,
899 .irq_handle_msi = efx_farch_msi_interrupt,
900 .irq_handle_legacy = efx_farch_legacy_interrupt,
901 .tx_probe = efx_farch_tx_probe,
902 .tx_init = efx_farch_tx_init,
903 .tx_remove = efx_farch_tx_remove,
904 .tx_write = efx_farch_tx_write,
905 .rx_push_indir_table = efx_farch_rx_push_indir_table,
906 .rx_probe = efx_farch_rx_probe,
907 .rx_init = efx_farch_rx_init,
908 .rx_remove = efx_farch_rx_remove,
909 .rx_write = efx_farch_rx_write,
910 .rx_defer_refill = efx_farch_rx_defer_refill,
911 .ev_probe = efx_farch_ev_probe,
912 .ev_init = efx_farch_ev_init,
913 .ev_fini = efx_farch_ev_fini,
914 .ev_remove = efx_farch_ev_remove,
915 .ev_process = efx_farch_ev_process,
916 .ev_read_ack = efx_farch_ev_read_ack,
917 .ev_test_generate = efx_farch_ev_test_generate,
918 .filter_table_probe = efx_farch_filter_table_probe,
919 .filter_table_restore = efx_farch_filter_table_restore,
920 .filter_table_remove = efx_farch_filter_table_remove,
921 .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
922 .filter_insert = efx_farch_filter_insert,
923 .filter_remove_safe = efx_farch_filter_remove_safe,
924 .filter_get_safe = efx_farch_filter_get_safe,
925 .filter_clear_rx = efx_farch_filter_clear_rx,
926 .filter_count_rx_used = efx_farch_filter_count_rx_used,
927 .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
928 .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
929#ifdef CONFIG_RFS_ACCEL
930 .filter_rfs_insert = efx_farch_filter_rfs_insert,
931 .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
932#endif
933#ifdef CONFIG_SFC_MTD
934 .mtd_probe = siena_mtd_probe,
935 .mtd_rename = efx_mcdi_mtd_rename,
936 .mtd_read = efx_mcdi_mtd_read,
937 .mtd_erase = efx_mcdi_mtd_erase,
938 .mtd_write = efx_mcdi_mtd_write,
939 .mtd_sync = efx_mcdi_mtd_sync,
940#endif
941 .ptp_write_host_time = siena_ptp_write_host_time,
711 942
712 .revision = EFX_REV_SIENA_A0, 943 .revision = EFX_REV_SIENA_A0,
713 .mem_map_size = (FR_CZ_MC_TREG_SMEM +
714 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
715 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 944 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
716 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, 945 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
717 .buf_tbl_base = FR_BZ_BUF_FULL_TBL, 946 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
718 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, 947 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
719 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, 948 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
720 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), 949 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
721 .rx_buffer_hash_size = 0x10, 950 .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
951 .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
722 .rx_buffer_padding = 0, 952 .rx_buffer_padding = 0,
723 .can_rx_scatter = true, 953 .can_rx_scatter = true,
724 .max_interrupt_mode = EFX_INT_MODE_MSIX, 954 .max_interrupt_mode = EFX_INT_MODE_MSIX,
725 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
726 * interrupt handler only supports 32
727 * channels */
728 .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, 955 .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
729 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 956 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
730 NETIF_F_RXHASH | NETIF_F_NTUPLE), 957 NETIF_F_RXHASH | NETIF_F_NTUPLE),
958 .mcdi_max_ver = 1,
959 .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
731}; 960};
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 90f8d1604f5f..0c38f926871e 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2010-2011 Solarflare Communications Inc. 3 * Copyright 2010-2012 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -15,7 +15,7 @@
15#include "mcdi.h" 15#include "mcdi.h"
16#include "filter.h" 16#include "filter.h"
17#include "mcdi_pcol.h" 17#include "mcdi_pcol.h"
18#include "regs.h" 18#include "farch_regs.h"
19#include "vfdi.h" 19#include "vfdi.h"
20 20
21/* Number of longs required to track all the VIs in a VF */ 21/* Number of longs required to track all the VIs in a VF */
@@ -197,8 +197,8 @@ static unsigned abs_index(struct efx_vf *vf, unsigned index)
197static int efx_sriov_cmd(struct efx_nic *efx, bool enable, 197static int efx_sriov_cmd(struct efx_nic *efx, bool enable,
198 unsigned *vi_scale_out, unsigned *vf_total_out) 198 unsigned *vi_scale_out, unsigned *vf_total_out)
199{ 199{
200 u8 inbuf[MC_CMD_SRIOV_IN_LEN]; 200 MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN);
201 u8 outbuf[MC_CMD_SRIOV_OUT_LEN]; 201 MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN);
202 unsigned vi_scale, vf_total; 202 unsigned vi_scale, vf_total;
203 size_t outlen; 203 size_t outlen;
204 int rc; 204 int rc;
@@ -240,64 +240,55 @@ static void efx_sriov_usrev(struct efx_nic *efx, bool enabled)
240static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req, 240static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req,
241 unsigned int count) 241 unsigned int count)
242{ 242{
243 u8 *inbuf, *record; 243 MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1);
244 unsigned int used; 244 MCDI_DECLARE_STRUCT_PTR(record);
245 u32 from_rid, from_hi, from_lo; 245 unsigned int index, used;
246 u64 from_addr;
247 u32 from_rid;
246 int rc; 248 int rc;
247 249
248 mb(); /* Finish writing source/reading dest before DMA starts */ 250 mb(); /* Finish writing source/reading dest before DMA starts */
249 251
250 used = MC_CMD_MEMCPY_IN_LEN(count); 252 if (WARN_ON(count > MC_CMD_MEMCPY_IN_RECORD_MAXNUM))
251 if (WARN_ON(used > MCDI_CTL_SDU_LEN_MAX))
252 return -ENOBUFS; 253 return -ENOBUFS;
254 used = MC_CMD_MEMCPY_IN_LEN(count);
253 255
254 /* Allocate room for the largest request */ 256 for (index = 0; index < count; index++) {
255 inbuf = kzalloc(MCDI_CTL_SDU_LEN_MAX, GFP_KERNEL); 257 record = MCDI_ARRAY_STRUCT_PTR(inbuf, MEMCPY_IN_RECORD, index);
256 if (inbuf == NULL) 258 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_NUM_RECORDS,
257 return -ENOMEM; 259 count);
258
259 record = inbuf;
260 MCDI_SET_DWORD(record, MEMCPY_IN_RECORD, count);
261 while (count-- > 0) {
262 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID, 260 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID,
263 req->to_rid); 261 req->to_rid);
264 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO, 262 MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR,
265 (u32)req->to_addr); 263 req->to_addr);
266 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI,
267 (u32)(req->to_addr >> 32));
268 if (req->from_buf == NULL) { 264 if (req->from_buf == NULL) {
269 from_rid = req->from_rid; 265 from_rid = req->from_rid;
270 from_lo = (u32)req->from_addr; 266 from_addr = req->from_addr;
271 from_hi = (u32)(req->from_addr >> 32);
272 } else { 267 } else {
273 if (WARN_ON(used + req->length > MCDI_CTL_SDU_LEN_MAX)) { 268 if (WARN_ON(used + req->length >
269 MCDI_CTL_SDU_LEN_MAX_V1)) {
274 rc = -ENOBUFS; 270 rc = -ENOBUFS;
275 goto out; 271 goto out;
276 } 272 }
277 273
278 from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE; 274 from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE;
279 from_lo = used; 275 from_addr = used;
280 from_hi = 0; 276 memcpy(_MCDI_PTR(inbuf, used), req->from_buf,
281 memcpy(inbuf + used, req->from_buf, req->length); 277 req->length);
282 used += req->length; 278 used += req->length;
283 } 279 }
284 280
285 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid); 281 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid);
286 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO, 282 MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR,
287 from_lo); 283 from_addr);
288 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI,
289 from_hi);
290 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH, 284 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH,
291 req->length); 285 req->length);
292 286
293 ++req; 287 ++req;
294 record += MC_CMD_MEMCPY_IN_RECORD_LEN;
295 } 288 }
296 289
297 rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL); 290 rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
298out: 291out:
299 kfree(inbuf);
300
301 mb(); /* Don't write source/read dest before DMA is complete */ 292 mb(); /* Don't write source/read dest before DMA is complete */
302 293
303 return rc; 294 return rc;
@@ -473,8 +464,9 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
473 VFDI_EV_SEQ, (vf->msg_seqno & 0xff), 464 VFDI_EV_SEQ, (vf->msg_seqno & 0xff),
474 VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS); 465 VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS);
475 ++vf->msg_seqno; 466 ++vf->msg_seqno;
476 efx_generate_event(efx, EFX_VI_BASE + vf->index * efx_vf_size(efx), 467 efx_farch_generate_event(efx,
477 &event); 468 EFX_VI_BASE + vf->index * efx_vf_size(efx),
469 &event);
478} 470}
479 471
480static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset, 472static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset,
@@ -684,16 +676,12 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
684 unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx); 676 unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx);
685 unsigned timeout = HZ; 677 unsigned timeout = HZ;
686 unsigned index, rxqs_count; 678 unsigned index, rxqs_count;
687 __le32 *rxqs; 679 MCDI_DECLARE_BUF(inbuf, MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX);
688 int rc; 680 int rc;
689 681
690 BUILD_BUG_ON(VF_MAX_RX_QUEUES > 682 BUILD_BUG_ON(VF_MAX_RX_QUEUES >
691 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); 683 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
692 684
693 rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
694 if (rxqs == NULL)
695 return VFDI_RC_ENOMEM;
696
697 rtnl_lock(); 685 rtnl_lock();
698 siena_prepare_flush(efx); 686 siena_prepare_flush(efx);
699 rtnl_unlock(); 687 rtnl_unlock();
@@ -708,14 +696,19 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
708 vf_offset + index); 696 vf_offset + index);
709 efx_writeo(efx, &reg, FR_AZ_TX_FLUSH_DESCQ); 697 efx_writeo(efx, &reg, FR_AZ_TX_FLUSH_DESCQ);
710 } 698 }
711 if (test_bit(index, vf->rxq_mask)) 699 if (test_bit(index, vf->rxq_mask)) {
712 rxqs[rxqs_count++] = cpu_to_le32(vf_offset + index); 700 MCDI_SET_ARRAY_DWORD(
701 inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
702 rxqs_count, vf_offset + index);
703 rxqs_count++;
704 }
713 } 705 }
714 706
715 atomic_set(&vf->rxq_retry_count, 0); 707 atomic_set(&vf->rxq_retry_count, 0);
716 while (timeout && (vf->rxq_count || vf->txq_count)) { 708 while (timeout && (vf->rxq_count || vf->txq_count)) {
717 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)rxqs, 709 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
718 rxqs_count * sizeof(*rxqs), NULL, 0, NULL); 710 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(rxqs_count),
711 NULL, 0, NULL);
719 WARN_ON(rc < 0); 712 WARN_ON(rc < 0);
720 713
721 timeout = wait_event_timeout(vf->flush_waitq, 714 timeout = wait_event_timeout(vf->flush_waitq,
@@ -725,8 +718,10 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
725 for (index = 0; index < count; ++index) { 718 for (index = 0; index < count; ++index) {
726 if (test_and_clear_bit(index, vf->rxq_retry_mask)) { 719 if (test_and_clear_bit(index, vf->rxq_retry_mask)) {
727 atomic_dec(&vf->rxq_retry_count); 720 atomic_dec(&vf->rxq_retry_count);
728 rxqs[rxqs_count++] = 721 MCDI_SET_ARRAY_DWORD(
729 cpu_to_le32(vf_offset + index); 722 inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
723 rxqs_count, vf_offset + index);
724 rxqs_count++;
730 } 725 }
731 } 726 }
732 } 727 }
@@ -749,7 +744,6 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
749 } 744 }
750 efx_sriov_bufs(efx, vf->buftbl_base, NULL, 745 efx_sriov_bufs(efx, vf->buftbl_base, NULL,
751 EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx)); 746 EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
752 kfree(rxqs);
753 efx_vfdi_flush_clear(vf); 747 efx_vfdi_flush_clear(vf);
754 748
755 vf->evq0_count = 0; 749 vf->evq0_count = 0;
@@ -1004,7 +998,7 @@ static void efx_sriov_reset_vf_work(struct work_struct *work)
1004 struct efx_nic *efx = vf->efx; 998 struct efx_nic *efx = vf->efx;
1005 struct efx_buffer buf; 999 struct efx_buffer buf;
1006 1000
1007 if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) { 1001 if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
1008 efx_sriov_reset_vf(vf, &buf); 1002 efx_sriov_reset_vf(vf, &buf);
1009 efx_nic_free_buffer(efx, &buf); 1003 efx_nic_free_buffer(efx, &buf);
1010 } 1004 }
@@ -1248,7 +1242,8 @@ static int efx_sriov_vfs_init(struct efx_nic *efx)
1248 pci_domain_nr(pci_dev->bus), pci_dev->bus->number, 1242 pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
1249 PCI_SLOT(devfn), PCI_FUNC(devfn)); 1243 PCI_SLOT(devfn), PCI_FUNC(devfn));
1250 1244
1251 rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE); 1245 rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE,
1246 GFP_KERNEL);
1252 if (rc) 1247 if (rc)
1253 goto fail; 1248 goto fail;
1254 1249
@@ -1280,7 +1275,8 @@ int efx_sriov_init(struct efx_nic *efx)
1280 if (rc) 1275 if (rc)
1281 goto fail_cmd; 1276 goto fail_cmd;
1282 1277
1283 rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status)); 1278 rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status),
1279 GFP_KERNEL);
1284 if (rc) 1280 if (rc)
1285 goto fail_status; 1281 goto fail_status;
1286 vfdi_status = efx->vfdi_status.addr; 1282 vfdi_status = efx->vfdi_status.addr;
@@ -1535,7 +1531,7 @@ void efx_sriov_reset(struct efx_nic *efx)
1535 efx_sriov_usrev(efx, true); 1531 efx_sriov_usrev(efx, true);
1536 (void)efx_sriov_cmd(efx, true, NULL, NULL); 1532 (void)efx_sriov_cmd(efx, true, NULL, NULL);
1537 1533
1538 if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) 1534 if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
1539 return; 1535 return;
1540 1536
1541 for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) { 1537 for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
diff --git a/drivers/net/ethernet/sfc/spi.h b/drivers/net/ethernet/sfc/spi.h
deleted file mode 100644
index 5431a1bbff5c..000000000000
--- a/drivers/net/ethernet/sfc/spi.h
+++ /dev/null
@@ -1,99 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_SPI_H
12#define EFX_SPI_H
13
14#include "net_driver.h"
15
16/**************************************************************************
17 *
18 * Basic SPI command set and bit definitions
19 *
20 *************************************************************************/
21
22#define SPI_WRSR 0x01 /* Write status register */
23#define SPI_WRITE 0x02 /* Write data to memory array */
24#define SPI_READ 0x03 /* Read data from memory array */
25#define SPI_WRDI 0x04 /* Reset write enable latch */
26#define SPI_RDSR 0x05 /* Read status register */
27#define SPI_WREN 0x06 /* Set write enable latch */
28#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
29
30#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
31#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
32#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
33#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
34#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
35#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
36
37/**
38 * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device
39 * @device_id: Controller's id for the device
40 * @size: Size (in bytes)
41 * @addr_len: Number of address bytes in read/write commands
42 * @munge_address: Flag whether addresses should be munged.
43 * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
44 * use bit 3 of the command byte as address bit A8, rather
45 * than having a two-byte address. If this flag is set, then
46 * commands should be munged in this way.
47 * @erase_command: Erase command (or 0 if sector erase not needed).
48 * @erase_size: Erase sector size (in bytes)
49 * Erase commands affect sectors with this size and alignment.
50 * This must be a power of two.
51 * @block_size: Write block size (in bytes).
52 * Write commands are limited to blocks with this size and alignment.
53 */
54struct efx_spi_device {
55 int device_id;
56 unsigned int size;
57 unsigned int addr_len;
58 unsigned int munge_address:1;
59 u8 erase_command;
60 unsigned int erase_size;
61 unsigned int block_size;
62};
63
64static inline bool efx_spi_present(const struct efx_spi_device *spi)
65{
66 return spi->size != 0;
67}
68
69int falcon_spi_cmd(struct efx_nic *efx,
70 const struct efx_spi_device *spi, unsigned int command,
71 int address, const void *in, void *out, size_t len);
72int falcon_spi_wait_write(struct efx_nic *efx,
73 const struct efx_spi_device *spi);
74int falcon_spi_read(struct efx_nic *efx,
75 const struct efx_spi_device *spi, loff_t start,
76 size_t len, size_t *retlen, u8 *buffer);
77int falcon_spi_write(struct efx_nic *efx,
78 const struct efx_spi_device *spi, loff_t start,
79 size_t len, size_t *retlen, const u8 *buffer);
80
81/*
82 * SFC4000 flash is partitioned into:
83 * 0-0x400 chip and board config (see falcon_hwdefs.h)
84 * 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
85 * 0x8000-end boot code (mapped to PCI expansion ROM)
86 * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
87 * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
88 * 0-0x400 chip and board config
89 * configurable VPD
90 * 0x800-0x1800 boot config
91 * Aside from the chip and board config, all of these are optional and may
92 * be absent or truncated depending on the devices used.
93 */
94#define FALCON_NVCONFIG_END 0x400U
95#define FALCON_FLASH_BOOTCODE_START 0x8000U
96#define EFX_EEPROM_BOOTCONFIG_START 0x800U
97#define EFX_EEPROM_BOOTCONFIG_END 0x1800U
98
99#endif /* EFX_SPI_H */
diff --git a/drivers/net/ethernet/sfc/tenxpress.c b/drivers/net/ethernet/sfc/tenxpress.c
index d37cb5017129..2c90e6b31575 100644
--- a/drivers/net/ethernet/sfc/tenxpress.c
+++ b/drivers/net/ethernet/sfc/tenxpress.c
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2007-2011 Solarflare Communications Inc. 3 * Copyright 2007-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 5e090e54298e..2ac91c5b5eea 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2010 Solarflare Communications Inc. 4 * Copyright 2005-2013 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -306,7 +306,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
306 306
307 while (read_ptr != stop_index) { 307 while (read_ptr != stop_index) {
308 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 308 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
309 if (unlikely(buffer->len == 0)) { 309
310 if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
311 unlikely(buffer->len == 0)) {
310 netif_err(efx, tx_err, efx->net_dev, 312 netif_err(efx, tx_err, efx->net_dev,
311 "TX queue %d spurious TX completion id %x\n", 313 "TX queue %d spurious TX completion id %x\n",
312 tx_queue->queue, read_ptr); 314 tx_queue->queue, read_ptr);
@@ -437,6 +439,9 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
437 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); 439 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
438 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); 440 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
439 441
442 if (pkts_compl > 1)
443 ++tx_queue->merge_events;
444
440 /* See if we need to restart the netif queue. This memory 445 /* See if we need to restart the netif queue. This memory
441 * barrier ensures that we write read_count (inside 446 * barrier ensures that we write read_count (inside
442 * efx_dequeue_buffers()) before reading the queue status. 447 * efx_dequeue_buffers()) before reading the queue status.
@@ -543,10 +548,13 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
543 tx_queue->initialised = true; 548 tx_queue->initialised = true;
544} 549}
545 550
546void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) 551void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
547{ 552{
548 struct efx_tx_buffer *buffer; 553 struct efx_tx_buffer *buffer;
549 554
555 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
556 "shutting down TX queue %d\n", tx_queue->queue);
557
550 if (!tx_queue->buffer) 558 if (!tx_queue->buffer)
551 return; 559 return;
552 560
@@ -561,22 +569,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
561 netdev_tx_reset_queue(tx_queue->core_txq); 569 netdev_tx_reset_queue(tx_queue->core_txq);
562} 570}
563 571
564void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
565{
566 if (!tx_queue->initialised)
567 return;
568
569 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
570 "shutting down TX queue %d\n", tx_queue->queue);
571
572 tx_queue->initialised = false;
573
574 /* Flush TX queue, remove descriptor ring */
575 efx_nic_fini_tx(tx_queue);
576
577 efx_release_tx_buffers(tx_queue);
578}
579
580void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 572void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
581{ 573{
582 int i; 574 int i;
@@ -708,7 +700,8 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
708 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET; 700 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
709 701
710 if (unlikely(!page_buf->addr) && 702 if (unlikely(!page_buf->addr) &&
711 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE)) 703 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
704 GFP_ATOMIC))
712 return NULL; 705 return NULL;
713 706
714 result = (u8 *)page_buf->addr + offset; 707 result = (u8 *)page_buf->addr + offset;
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c
index 29bb3f9941c0..3d5ee3259885 100644
--- a/drivers/net/ethernet/sfc/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/txc43128_phy.c
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2006-2011 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/vfdi.h b/drivers/net/ethernet/sfc/vfdi.h
index 225557caaf5a..ae044f44936a 100644
--- a/drivers/net/ethernet/sfc/vfdi.h
+++ b/drivers/net/ethernet/sfc/vfdi.h
@@ -1,5 +1,5 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2010-2012 Solarflare Communications Inc. 3 * Copyright 2010-2012 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h
index e4dd3a7f304b..2310b75d4ec2 100644
--- a/drivers/net/ethernet/sfc/workarounds.h
+++ b/drivers/net/ethernet/sfc/workarounds.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare network controllers and boards
3 * Copyright 2006-2010 Solarflare Communications Inc. 3 * Copyright 2006-2013 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -15,27 +15,15 @@
15 * Bug numbers are from Solarflare's Bugzilla. 15 * Bug numbers are from Solarflare's Bugzilla.
16 */ 16 */
17 17
18#define EFX_WORKAROUND_ALWAYS(efx) 1
19#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) 18#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
20#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0) 19#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
21#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) 20#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
22#define EFX_WORKAROUND_10G(efx) 1 21#define EFX_WORKAROUND_10G(efx) 1
23 22
24/* XAUI resets if link not detected */
25#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
26/* RX PCIe double split performance issue */
27#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
28/* Bit-bashed I2C reads cause performance drop */ 23/* Bit-bashed I2C reads cause performance drop */
29#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G 24#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
30/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
31 * or a PCIe error (bug 11028) */
32#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
33/* Transmit flow control may get disabled */
34#define EFX_WORKAROUND_11482 EFX_WORKAROUND_FALCON_AB
35/* Truncated IPv4 packets can confuse the TX packet parser */ 25/* Truncated IPv4 packets can confuse the TX packet parser */
36#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB 26#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
37/* Legacy ISR read can return zero once */
38#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
39/* Legacy interrupt storm when interrupt fifo fills */ 27/* Legacy interrupt storm when interrupt fifo fills */
40#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA 28#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
41 29
@@ -56,4 +44,10 @@
56/* Leak overlength packets rather than free */ 44/* Leak overlength packets rather than free */
57#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A 45#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
58 46
47/* Lockup when writing event block registers at gen2/gen3 */
48#define EFX_EF10_WORKAROUND_35388(efx) \
49 (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388)
50#define EFX_WORKAROUND_35388(efx) \
51 (efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx))
52
59#endif /* EFX_WORKAROUNDS_H */ 53#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 9f5f35e041ac..770036bc2d87 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -212,9 +212,8 @@ static void meth_check_link(struct net_device *dev)
212static int meth_init_tx_ring(struct meth_private *priv) 212static int meth_init_tx_ring(struct meth_private *priv)
213{ 213{
214 /* Init TX ring */ 214 /* Init TX ring */
215 priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE, 215 priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE,
216 &priv->tx_ring_dma, 216 &priv->tx_ring_dma, GFP_ATOMIC);
217 GFP_ATOMIC | __GFP_ZERO);
218 if (!priv->tx_ring) 217 if (!priv->tx_ring)
219 return -ENOMEM; 218 return -ENOMEM;
220 219
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 02df0894690d..ee18e6f7b4fe 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1770,9 +1770,6 @@ static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1770 struct sis190_private *tp = netdev_priv(dev); 1770 struct sis190_private *tp = netdev_priv(dev);
1771 unsigned long flags; 1771 unsigned long flags;
1772 1772
1773 if (regs->len > SIS190_REGS_SIZE)
1774 regs->len = SIS190_REGS_SIZE;
1775
1776 spin_lock_irqsave(&tp->lock, flags); 1773 spin_lock_irqsave(&tp->lock, flags);
1777 memcpy_fromio(p, tp->mmio_addr, regs->len); 1774 memcpy_fromio(p, tp->mmio_addr, regs->len);
1778 spin_unlock_irqrestore(&tp->lock, flags); 1775 spin_unlock_irqrestore(&tp->lock, flags);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index f5d7ad75e479..b7a39305472b 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1309,23 +1309,9 @@ static void sis900_timer(unsigned long data)
1309 struct sis900_private *sis_priv = netdev_priv(net_dev); 1309 struct sis900_private *sis_priv = netdev_priv(net_dev);
1310 struct mii_phy *mii_phy = sis_priv->mii; 1310 struct mii_phy *mii_phy = sis_priv->mii;
1311 static const int next_tick = 5*HZ; 1311 static const int next_tick = 5*HZ;
1312 int speed = 0, duplex = 0;
1312 u16 status; 1313 u16 status;
1313 1314
1314 if (!sis_priv->autong_complete){
1315 int uninitialized_var(speed), duplex = 0;
1316
1317 sis900_read_mode(net_dev, &speed, &duplex);
1318 if (duplex){
1319 sis900_set_mode(sis_priv, speed, duplex);
1320 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1321 netif_carrier_on(net_dev);
1322 }
1323
1324 sis_priv->timer.expires = jiffies + HZ;
1325 add_timer(&sis_priv->timer);
1326 return;
1327 }
1328
1329 status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS); 1315 status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
1330 status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS); 1316 status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
1331 1317
@@ -1336,8 +1322,16 @@ static void sis900_timer(unsigned long data)
1336 status = sis900_default_phy(net_dev); 1322 status = sis900_default_phy(net_dev);
1337 mii_phy = sis_priv->mii; 1323 mii_phy = sis_priv->mii;
1338 1324
1339 if (status & MII_STAT_LINK) 1325 if (status & MII_STAT_LINK) {
1340 sis900_check_mode(net_dev, mii_phy); 1326 WARN_ON(!(status & MII_STAT_AUTO_DONE));
1327
1328 sis900_read_mode(net_dev, &speed, &duplex);
1329 if (duplex) {
1330 sis900_set_mode(sis_priv, speed, duplex);
1331 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1332 netif_carrier_on(net_dev);
1333 }
1334 }
1341 } else { 1335 } else {
1342 /* Link ON -> OFF */ 1336 /* Link ON -> OFF */
1343 if (!(status & MII_STAT_LINK)){ 1337 if (!(status & MII_STAT_LINK)){
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 345558fe7367..afe01c4088a3 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -2067,7 +2067,7 @@ static int smc911x_drv_probe(struct platform_device *pdev)
2067 lp->netdev = ndev; 2067 lp->netdev = ndev;
2068#ifdef SMC_DYNAMIC_BUS_CONFIG 2068#ifdef SMC_DYNAMIC_BUS_CONFIG
2069 { 2069 {
2070 struct smc911x_platdata *pd = pdev->dev.platform_data; 2070 struct smc911x_platdata *pd = dev_get_platdata(&pdev->dev);
2071 if (!pd) { 2071 if (!pd) {
2072 ret = -EINVAL; 2072 ret = -EINVAL;
2073 goto release_both; 2073 goto release_both;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index cde13be7c7de..73be7f3982e6 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2202,7 +2202,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
2202 */ 2202 */
2203static int smc_drv_probe(struct platform_device *pdev) 2203static int smc_drv_probe(struct platform_device *pdev)
2204{ 2204{
2205 struct smc91x_platdata *pd = pdev->dev.platform_data; 2205 struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev);
2206 struct smc_local *lp; 2206 struct smc_local *lp;
2207 struct net_device *ndev; 2207 struct net_device *ndev;
2208 struct resource *res, *ires; 2208 struct resource *res, *ires;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index a1419211585b..5fdbc2686eb3 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2374,7 +2374,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2374 struct device_node *np = pdev->dev.of_node; 2374 struct device_node *np = pdev->dev.of_node;
2375 struct net_device *dev; 2375 struct net_device *dev;
2376 struct smsc911x_data *pdata; 2376 struct smsc911x_data *pdata;
2377 struct smsc911x_platform_config *config = pdev->dev.platform_data; 2377 struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
2378 struct resource *res, *irq_res; 2378 struct resource *res, *irq_res;
2379 unsigned int intcfg = 0; 2379 unsigned int intcfg = 0;
2380 int res_size, irq_flags; 2380 int res_size, irq_flags;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index c922fde929a1..f16a9bdf45bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -70,7 +70,6 @@ struct stmmac_priv {
70 struct net_device *dev; 70 struct net_device *dev;
71 struct device *device; 71 struct device *device;
72 struct mac_device_info *hw; 72 struct mac_device_info *hw;
73 int no_csum_insertion;
74 spinlock_t lock; 73 spinlock_t lock;
75 74
76 struct phy_device *phydev ____cacheline_aligned_in_smp; 75 struct phy_device *phydev ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index def7e75e1d57..76ad214b4036 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -45,8 +45,8 @@ static void stmmac_config_sub_second_increment(void __iomem *ioaddr)
45 data = (1000000000ULL / 50000000); 45 data = (1000000000ULL / 50000000);
46 46
47 /* 0.465ns accuracy */ 47 /* 0.465ns accuracy */
48 if (value & PTP_TCR_TSCTRLSSR) 48 if (!(value & PTP_TCR_TSCTRLSSR))
49 data = (data * 100) / 465; 49 data = (data * 1000) / 465;
50 50
51 writel(data, ioaddr + PTP_SSIR); 51 writel(data, ioaddr + PTP_SSIR);
52} 52}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0a9bb9d30c3f..8d4ccd35a016 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1224,8 +1224,9 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
1224 */ 1224 */
1225static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 1225static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1226{ 1226{
1227 if (likely(priv->plat->force_sf_dma_mode || 1227 if (priv->plat->force_thresh_dma_mode)
1228 ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) { 1228 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc);
1229 else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1229 /* 1230 /*
1230 * In case of GMAC, SF mode can be enabled 1231 * In case of GMAC, SF mode can be enabled
1231 * to perform the TX COE in HW. This depends on: 1232 * to perform the TX COE in HW. This depends on:
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 1c83a44c547b..7a0072003f34 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -83,6 +83,10 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
83 dma_cfg->mixed_burst = 83 dma_cfg->mixed_burst =
84 of_property_read_bool(np, "snps,mixed-burst"); 84 of_property_read_bool(np, "snps,mixed-burst");
85 } 85 }
86 plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
87 if (plat->force_thresh_dma_mode) {
88 plat->force_sf_dma_mode = 0;
89 pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
86 90
87 return 0; 91 return 0;
88} 92}
@@ -113,14 +117,11 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
113 const char *mac = NULL; 117 const char *mac = NULL;
114 118
115 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 119 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
116 if (!res)
117 return -ENODEV;
118
119 addr = devm_ioremap_resource(dev, res); 120 addr = devm_ioremap_resource(dev, res);
120 if (IS_ERR(addr)) 121 if (IS_ERR(addr))
121 return PTR_ERR(addr); 122 return PTR_ERR(addr);
122 123
123 plat_dat = pdev->dev.platform_data; 124 plat_dat = dev_get_platdata(&pdev->dev);
124 if (pdev->dev.of_node) { 125 if (pdev->dev.of_node) {
125 if (!plat_dat) 126 if (!plat_dat)
126 plat_dat = devm_kzalloc(&pdev->dev, 127 plat_dat = devm_kzalloc(&pdev->dev,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 52b2adf63cbf..f28460ce24a7 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9360,7 +9360,7 @@ static ssize_t show_port_phy(struct device *dev,
9360 struct device_attribute *attr, char *buf) 9360 struct device_attribute *attr, char *buf)
9361{ 9361{
9362 struct platform_device *plat_dev = to_platform_device(dev); 9362 struct platform_device *plat_dev = to_platform_device(dev);
9363 struct niu_parent *p = plat_dev->dev.platform_data; 9363 struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9364 u32 port_phy = p->port_phy; 9364 u32 port_phy = p->port_phy;
9365 char *orig_buf = buf; 9365 char *orig_buf = buf;
9366 int i; 9366 int i;
@@ -9390,7 +9390,7 @@ static ssize_t show_plat_type(struct device *dev,
9390 struct device_attribute *attr, char *buf) 9390 struct device_attribute *attr, char *buf)
9391{ 9391{
9392 struct platform_device *plat_dev = to_platform_device(dev); 9392 struct platform_device *plat_dev = to_platform_device(dev);
9393 struct niu_parent *p = plat_dev->dev.platform_data; 9393 struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9394 const char *type_str; 9394 const char *type_str;
9395 9395
9396 switch (p->plat_type) { 9396 switch (p->plat_type) {
@@ -9419,7 +9419,7 @@ static ssize_t __show_chan_per_port(struct device *dev,
9419 int rx) 9419 int rx)
9420{ 9420{
9421 struct platform_device *plat_dev = to_platform_device(dev); 9421 struct platform_device *plat_dev = to_platform_device(dev);
9422 struct niu_parent *p = plat_dev->dev.platform_data; 9422 struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9423 char *orig_buf = buf; 9423 char *orig_buf = buf;
9424 u8 *arr; 9424 u8 *arr;
9425 int i; 9425 int i;
@@ -9452,7 +9452,7 @@ static ssize_t show_num_ports(struct device *dev,
9452 struct device_attribute *attr, char *buf) 9452 struct device_attribute *attr, char *buf)
9453{ 9453{
9454 struct platform_device *plat_dev = to_platform_device(dev); 9454 struct platform_device *plat_dev = to_platform_device(dev);
9455 struct niu_parent *p = plat_dev->dev.platform_data; 9455 struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9456 9456
9457 return sprintf(buf, "%d\n", p->num_ports); 9457 return sprintf(buf, "%d\n", p->num_ports);
9458} 9458}
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 0d43fa9ff980..7217ee5d6273 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1239,7 +1239,7 @@ static int bigmac_sbus_probe(struct platform_device *op)
1239 1239
1240static int bigmac_sbus_remove(struct platform_device *op) 1240static int bigmac_sbus_remove(struct platform_device *op)
1241{ 1241{
1242 struct bigmac *bp = dev_get_drvdata(&op->dev); 1242 struct bigmac *bp = platform_get_drvdata(op);
1243 struct device *parent = op->dev.parent; 1243 struct device *parent = op->dev.parent;
1244 struct net_device *net_dev = bp->dev; 1244 struct net_device *net_dev = bp->dev;
1245 struct platform_device *qec_op; 1245 struct platform_device *qec_op;
@@ -1259,8 +1259,6 @@ static int bigmac_sbus_remove(struct platform_device *op)
1259 1259
1260 free_netdev(net_dev); 1260 free_netdev(net_dev);
1261 1261
1262 dev_set_drvdata(&op->dev, NULL);
1263
1264 return 0; 1262 return 0;
1265} 1263}
1266 1264
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 171f5b0809c4..e37b587b3860 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2798,7 +2798,7 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
2798 goto err_out_free_coherent; 2798 goto err_out_free_coherent;
2799 } 2799 }
2800 2800
2801 dev_set_drvdata(&op->dev, hp); 2801 platform_set_drvdata(op, hp);
2802 2802
2803 if (qfe_slot != -1) 2803 if (qfe_slot != -1)
2804 printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ", 2804 printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
@@ -3111,7 +3111,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
3111 goto err_out_iounmap; 3111 goto err_out_iounmap;
3112 } 3112 }
3113 3113
3114 dev_set_drvdata(&pdev->dev, hp); 3114 pci_set_drvdata(pdev, hp);
3115 3115
3116 if (!qfe_slot) { 3116 if (!qfe_slot) {
3117 struct pci_dev *qpdev = qp->quattro_dev; 3117 struct pci_dev *qpdev = qp->quattro_dev;
@@ -3159,7 +3159,7 @@ err_out:
3159 3159
3160static void happy_meal_pci_remove(struct pci_dev *pdev) 3160static void happy_meal_pci_remove(struct pci_dev *pdev)
3161{ 3161{
3162 struct happy_meal *hp = dev_get_drvdata(&pdev->dev); 3162 struct happy_meal *hp = pci_get_drvdata(pdev);
3163 struct net_device *net_dev = hp->dev; 3163 struct net_device *net_dev = hp->dev;
3164 3164
3165 unregister_netdev(net_dev); 3165 unregister_netdev(net_dev);
@@ -3171,7 +3171,7 @@ static void happy_meal_pci_remove(struct pci_dev *pdev)
3171 3171
3172 free_netdev(net_dev); 3172 free_netdev(net_dev);
3173 3173
3174 dev_set_drvdata(&pdev->dev, NULL); 3174 pci_set_drvdata(pdev, NULL);
3175} 3175}
3176 3176
3177static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = { 3177static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
@@ -3231,7 +3231,7 @@ static int hme_sbus_probe(struct platform_device *op)
3231 3231
3232static int hme_sbus_remove(struct platform_device *op) 3232static int hme_sbus_remove(struct platform_device *op)
3233{ 3233{
3234 struct happy_meal *hp = dev_get_drvdata(&op->dev); 3234 struct happy_meal *hp = platform_get_drvdata(op);
3235 struct net_device *net_dev = hp->dev; 3235 struct net_device *net_dev = hp->dev;
3236 3236
3237 unregister_netdev(net_dev); 3237 unregister_netdev(net_dev);
@@ -3250,8 +3250,6 @@ static int hme_sbus_remove(struct platform_device *op)
3250 3250
3251 free_netdev(net_dev); 3251 free_netdev(net_dev);
3252 3252
3253 dev_set_drvdata(&op->dev, NULL);
3254
3255 return 0; 3253 return 0;
3256} 3254}
3257 3255
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 31bbbca341a7..2dc16b6efaf0 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -636,7 +636,7 @@ static void cpmac_hw_stop(struct net_device *dev)
636{ 636{
637 int i; 637 int i;
638 struct cpmac_priv *priv = netdev_priv(dev); 638 struct cpmac_priv *priv = netdev_priv(dev);
639 struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; 639 struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
640 640
641 ar7_device_reset(pdata->reset_bit); 641 ar7_device_reset(pdata->reset_bit);
642 cpmac_write(priv->regs, CPMAC_RX_CONTROL, 642 cpmac_write(priv->regs, CPMAC_RX_CONTROL,
@@ -659,7 +659,7 @@ static void cpmac_hw_start(struct net_device *dev)
659{ 659{
660 int i; 660 int i;
661 struct cpmac_priv *priv = netdev_priv(dev); 661 struct cpmac_priv *priv = netdev_priv(dev);
662 struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; 662 struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
663 663
664 ar7_device_reset(pdata->reset_bit); 664 ar7_device_reset(pdata->reset_bit);
665 for (i = 0; i < 8; i++) { 665 for (i = 0; i < 8; i++) {
@@ -1118,7 +1118,7 @@ static int cpmac_probe(struct platform_device *pdev)
1118 struct net_device *dev; 1118 struct net_device *dev;
1119 struct plat_cpmac_data *pdata; 1119 struct plat_cpmac_data *pdata;
1120 1120
1121 pdata = pdev->dev.platform_data; 1121 pdata = dev_get_platdata(&pdev->dev);
1122 1122
1123 if (external_switch || dumb_switch) { 1123 if (external_switch || dumb_switch) {
1124 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */ 1124 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 22a7a4336211..79974e31187a 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -34,9 +34,9 @@
34#include <linux/of_device.h> 34#include <linux/of_device.h>
35#include <linux/if_vlan.h> 35#include <linux/if_vlan.h>
36 36
37#include <linux/platform_data/cpsw.h>
38#include <linux/pinctrl/consumer.h> 37#include <linux/pinctrl/consumer.h>
39 38
39#include "cpsw.h"
40#include "cpsw_ale.h" 40#include "cpsw_ale.h"
41#include "cpts.h" 41#include "cpts.h"
42#include "davinci_cpdma.h" 42#include "davinci_cpdma.h"
@@ -82,6 +82,8 @@ do { \
82 82
83#define CPSW_VERSION_1 0x19010a 83#define CPSW_VERSION_1 0x19010a
84#define CPSW_VERSION_2 0x19010c 84#define CPSW_VERSION_2 0x19010c
85#define CPSW_VERSION_3 0x19010f
86#define CPSW_VERSION_4 0x190112
85 87
86#define HOST_PORT_NUM 0 88#define HOST_PORT_NUM 0
87#define SLIVER_SIZE 0x40 89#define SLIVER_SIZE 0x40
@@ -91,6 +93,7 @@ do { \
91#define CPSW1_SLAVE_SIZE 0x040 93#define CPSW1_SLAVE_SIZE 0x040
92#define CPSW1_CPDMA_OFFSET 0x100 94#define CPSW1_CPDMA_OFFSET 0x100
93#define CPSW1_STATERAM_OFFSET 0x200 95#define CPSW1_STATERAM_OFFSET 0x200
96#define CPSW1_HW_STATS 0x400
94#define CPSW1_CPTS_OFFSET 0x500 97#define CPSW1_CPTS_OFFSET 0x500
95#define CPSW1_ALE_OFFSET 0x600 98#define CPSW1_ALE_OFFSET 0x600
96#define CPSW1_SLIVER_OFFSET 0x700 99#define CPSW1_SLIVER_OFFSET 0x700
@@ -99,6 +102,7 @@ do { \
99#define CPSW2_SLAVE_OFFSET 0x200 102#define CPSW2_SLAVE_OFFSET 0x200
100#define CPSW2_SLAVE_SIZE 0x100 103#define CPSW2_SLAVE_SIZE 0x100
101#define CPSW2_CPDMA_OFFSET 0x800 104#define CPSW2_CPDMA_OFFSET 0x800
105#define CPSW2_HW_STATS 0x900
102#define CPSW2_STATERAM_OFFSET 0xa00 106#define CPSW2_STATERAM_OFFSET 0xa00
103#define CPSW2_CPTS_OFFSET 0xc00 107#define CPSW2_CPTS_OFFSET 0xc00
104#define CPSW2_ALE_OFFSET 0xd00 108#define CPSW2_ALE_OFFSET 0xd00
@@ -299,6 +303,44 @@ struct cpsw_sliver_regs {
299 u32 rx_pri_map; 303 u32 rx_pri_map;
300}; 304};
301 305
306struct cpsw_hw_stats {
307 u32 rxgoodframes;
308 u32 rxbroadcastframes;
309 u32 rxmulticastframes;
310 u32 rxpauseframes;
311 u32 rxcrcerrors;
312 u32 rxaligncodeerrors;
313 u32 rxoversizedframes;
314 u32 rxjabberframes;
315 u32 rxundersizedframes;
316 u32 rxfragments;
317 u32 __pad_0[2];
318 u32 rxoctets;
319 u32 txgoodframes;
320 u32 txbroadcastframes;
321 u32 txmulticastframes;
322 u32 txpauseframes;
323 u32 txdeferredframes;
324 u32 txcollisionframes;
325 u32 txsinglecollframes;
326 u32 txmultcollframes;
327 u32 txexcessivecollisions;
328 u32 txlatecollisions;
329 u32 txunderrun;
330 u32 txcarriersenseerrors;
331 u32 txoctets;
332 u32 octetframes64;
333 u32 octetframes65t127;
334 u32 octetframes128t255;
335 u32 octetframes256t511;
336 u32 octetframes512t1023;
337 u32 octetframes1024tup;
338 u32 netoctets;
339 u32 rxsofoverruns;
340 u32 rxmofoverruns;
341 u32 rxdmaoverruns;
342};
343
302struct cpsw_slave { 344struct cpsw_slave {
303 void __iomem *regs; 345 void __iomem *regs;
304 struct cpsw_sliver_regs __iomem *sliver; 346 struct cpsw_sliver_regs __iomem *sliver;
@@ -332,6 +374,7 @@ struct cpsw_priv {
332 struct cpsw_platform_data data; 374 struct cpsw_platform_data data;
333 struct cpsw_ss_regs __iomem *regs; 375 struct cpsw_ss_regs __iomem *regs;
334 struct cpsw_wr_regs __iomem *wr_regs; 376 struct cpsw_wr_regs __iomem *wr_regs;
377 u8 __iomem *hw_stats;
335 struct cpsw_host_regs __iomem *host_port_regs; 378 struct cpsw_host_regs __iomem *host_port_regs;
336 u32 msg_enable; 379 u32 msg_enable;
337 u32 version; 380 u32 version;
@@ -354,6 +397,94 @@ struct cpsw_priv {
354 u32 emac_port; 397 u32 emac_port;
355}; 398};
356 399
400struct cpsw_stats {
401 char stat_string[ETH_GSTRING_LEN];
402 int type;
403 int sizeof_stat;
404 int stat_offset;
405};
406
407enum {
408 CPSW_STATS,
409 CPDMA_RX_STATS,
410 CPDMA_TX_STATS,
411};
412
413#define CPSW_STAT(m) CPSW_STATS, \
414 sizeof(((struct cpsw_hw_stats *)0)->m), \
415 offsetof(struct cpsw_hw_stats, m)
416#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
417 sizeof(((struct cpdma_chan_stats *)0)->m), \
418 offsetof(struct cpdma_chan_stats, m)
419#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
420 sizeof(((struct cpdma_chan_stats *)0)->m), \
421 offsetof(struct cpdma_chan_stats, m)
422
423static const struct cpsw_stats cpsw_gstrings_stats[] = {
424 { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
425 { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
426 { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
427 { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
428 { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
429 { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
430 { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
431 { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
432 { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
433 { "Rx Fragments", CPSW_STAT(rxfragments) },
434 { "Rx Octets", CPSW_STAT(rxoctets) },
435 { "Good Tx Frames", CPSW_STAT(txgoodframes) },
436 { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
437 { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
438 { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
439 { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
440 { "Collisions", CPSW_STAT(txcollisionframes) },
441 { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
442 { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
443 { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
444 { "Late Collisions", CPSW_STAT(txlatecollisions) },
445 { "Tx Underrun", CPSW_STAT(txunderrun) },
446 { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
447 { "Tx Octets", CPSW_STAT(txoctets) },
448 { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
449 { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
450 { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
451 { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
452 { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
453 { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
454 { "Net Octets", CPSW_STAT(netoctets) },
455 { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
456 { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
457 { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
458 { "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) },
459 { "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
460 { "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
461 { "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) },
462 { "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
463 { "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
464 { "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
465 { "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
466 { "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
467 { "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
468 { "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) },
469 { "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) },
470 { "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
471 { "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) },
472 { "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) },
473 { "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) },
474 { "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) },
475 { "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) },
476 { "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) },
477 { "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) },
478 { "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) },
479 { "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) },
480 { "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) },
481 { "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) },
482 { "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) },
483 { "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) },
484};
485
486#define CPSW_STATS_LEN ARRAY_SIZE(cpsw_gstrings_stats)
487
357#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) 488#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
358#define for_each_slave(priv, func, arg...) \ 489#define for_each_slave(priv, func, arg...) \
359 do { \ 490 do { \
@@ -723,6 +854,69 @@ static int cpsw_set_coalesce(struct net_device *ndev,
723 return 0; 854 return 0;
724} 855}
725 856
857static int cpsw_get_sset_count(struct net_device *ndev, int sset)
858{
859 switch (sset) {
860 case ETH_SS_STATS:
861 return CPSW_STATS_LEN;
862 default:
863 return -EOPNOTSUPP;
864 }
865}
866
867static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
868{
869 u8 *p = data;
870 int i;
871
872 switch (stringset) {
873 case ETH_SS_STATS:
874 for (i = 0; i < CPSW_STATS_LEN; i++) {
875 memcpy(p, cpsw_gstrings_stats[i].stat_string,
876 ETH_GSTRING_LEN);
877 p += ETH_GSTRING_LEN;
878 }
879 break;
880 }
881}
882
883static void cpsw_get_ethtool_stats(struct net_device *ndev,
884 struct ethtool_stats *stats, u64 *data)
885{
886 struct cpsw_priv *priv = netdev_priv(ndev);
887 struct cpdma_chan_stats rx_stats;
888 struct cpdma_chan_stats tx_stats;
889 u32 val;
890 u8 *p;
891 int i;
892
893 /* Collect Davinci CPDMA stats for Rx and Tx Channel */
894 cpdma_chan_get_stats(priv->rxch, &rx_stats);
895 cpdma_chan_get_stats(priv->txch, &tx_stats);
896
897 for (i = 0; i < CPSW_STATS_LEN; i++) {
898 switch (cpsw_gstrings_stats[i].type) {
899 case CPSW_STATS:
900 val = readl(priv->hw_stats +
901 cpsw_gstrings_stats[i].stat_offset);
902 data[i] = val;
903 break;
904
905 case CPDMA_RX_STATS:
906 p = (u8 *)&rx_stats +
907 cpsw_gstrings_stats[i].stat_offset;
908 data[i] = *(u32 *)p;
909 break;
910
911 case CPDMA_TX_STATS:
912 p = (u8 *)&tx_stats +
913 cpsw_gstrings_stats[i].stat_offset;
914 data[i] = *(u32 *)p;
915 break;
916 }
917 }
918}
919
726static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) 920static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
727{ 921{
728 static char *leader = "........................................"; 922 static char *leader = "........................................";
@@ -799,6 +993,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
799 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); 993 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
800 break; 994 break;
801 case CPSW_VERSION_2: 995 case CPSW_VERSION_2:
996 case CPSW_VERSION_3:
997 case CPSW_VERSION_4:
802 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); 998 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
803 break; 999 break;
804 } 1000 }
@@ -1232,6 +1428,33 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
1232 1428
1233} 1429}
1234 1430
1431static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
1432{
1433 struct cpsw_priv *priv = netdev_priv(ndev);
1434 struct sockaddr *addr = (struct sockaddr *)p;
1435 int flags = 0;
1436 u16 vid = 0;
1437
1438 if (!is_valid_ether_addr(addr->sa_data))
1439 return -EADDRNOTAVAIL;
1440
1441 if (priv->data.dual_emac) {
1442 vid = priv->slaves[priv->emac_port].port_vlan;
1443 flags = ALE_VLAN;
1444 }
1445
1446 cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port,
1447 flags, vid);
1448 cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port,
1449 flags, vid);
1450
1451 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
1452 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
1453 for_each_slave(priv, cpsw_set_slave_mac, priv);
1454
1455 return 0;
1456}
1457
1235static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev) 1458static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
1236{ 1459{
1237 struct cpsw_priv *priv = netdev_priv(ndev); 1460 struct cpsw_priv *priv = netdev_priv(ndev);
@@ -1326,6 +1549,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
1326 .ndo_stop = cpsw_ndo_stop, 1549 .ndo_stop = cpsw_ndo_stop,
1327 .ndo_start_xmit = cpsw_ndo_start_xmit, 1550 .ndo_start_xmit = cpsw_ndo_start_xmit,
1328 .ndo_change_rx_flags = cpsw_ndo_change_rx_flags, 1551 .ndo_change_rx_flags = cpsw_ndo_change_rx_flags,
1552 .ndo_set_mac_address = cpsw_ndo_set_mac_address,
1329 .ndo_do_ioctl = cpsw_ndo_ioctl, 1553 .ndo_do_ioctl = cpsw_ndo_ioctl,
1330 .ndo_validate_addr = eth_validate_addr, 1554 .ndo_validate_addr = eth_validate_addr,
1331 .ndo_change_mtu = eth_change_mtu, 1555 .ndo_change_mtu = eth_change_mtu,
@@ -1416,6 +1640,29 @@ static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1416 return -EOPNOTSUPP; 1640 return -EOPNOTSUPP;
1417} 1641}
1418 1642
1643static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1644{
1645 struct cpsw_priv *priv = netdev_priv(ndev);
1646 int slave_no = cpsw_slave_index(priv);
1647
1648 wol->supported = 0;
1649 wol->wolopts = 0;
1650
1651 if (priv->slaves[slave_no].phy)
1652 phy_ethtool_get_wol(priv->slaves[slave_no].phy, wol);
1653}
1654
1655static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1656{
1657 struct cpsw_priv *priv = netdev_priv(ndev);
1658 int slave_no = cpsw_slave_index(priv);
1659
1660 if (priv->slaves[slave_no].phy)
1661 return phy_ethtool_set_wol(priv->slaves[slave_no].phy, wol);
1662 else
1663 return -EOPNOTSUPP;
1664}
1665
1419static const struct ethtool_ops cpsw_ethtool_ops = { 1666static const struct ethtool_ops cpsw_ethtool_ops = {
1420 .get_drvinfo = cpsw_get_drvinfo, 1667 .get_drvinfo = cpsw_get_drvinfo,
1421 .get_msglevel = cpsw_get_msglevel, 1668 .get_msglevel = cpsw_get_msglevel,
@@ -1426,6 +1673,11 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
1426 .set_settings = cpsw_set_settings, 1673 .set_settings = cpsw_set_settings,
1427 .get_coalesce = cpsw_get_coalesce, 1674 .get_coalesce = cpsw_get_coalesce,
1428 .set_coalesce = cpsw_set_coalesce, 1675 .set_coalesce = cpsw_set_coalesce,
1676 .get_sset_count = cpsw_get_sset_count,
1677 .get_strings = cpsw_get_strings,
1678 .get_ethtool_stats = cpsw_get_ethtool_stats,
1679 .get_wol = cpsw_get_wol,
1680 .set_wol = cpsw_set_wol,
1429}; 1681};
1430 1682
1431static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, 1683static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@ -1623,6 +1875,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1623 priv_sl2->host_port = priv->host_port; 1875 priv_sl2->host_port = priv->host_port;
1624 priv_sl2->host_port_regs = priv->host_port_regs; 1876 priv_sl2->host_port_regs = priv->host_port_regs;
1625 priv_sl2->wr_regs = priv->wr_regs; 1877 priv_sl2->wr_regs = priv->wr_regs;
1878 priv_sl2->hw_stats = priv->hw_stats;
1626 priv_sl2->dma = priv->dma; 1879 priv_sl2->dma = priv->dma;
1627 priv_sl2->txch = priv->txch; 1880 priv_sl2->txch = priv->txch;
1628 priv_sl2->rxch = priv->rxch; 1881 priv_sl2->rxch = priv->rxch;
@@ -1780,7 +2033,8 @@ static int cpsw_probe(struct platform_device *pdev)
1780 switch (priv->version) { 2033 switch (priv->version) {
1781 case CPSW_VERSION_1: 2034 case CPSW_VERSION_1:
1782 priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; 2035 priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
1783 priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; 2036 priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
2037 priv->hw_stats = ss_regs + CPSW1_HW_STATS;
1784 dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET; 2038 dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
1785 dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET; 2039 dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
1786 ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET; 2040 ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
@@ -1790,8 +2044,11 @@ static int cpsw_probe(struct platform_device *pdev)
1790 dma_params.desc_mem_phys = 0; 2044 dma_params.desc_mem_phys = 0;
1791 break; 2045 break;
1792 case CPSW_VERSION_2: 2046 case CPSW_VERSION_2:
2047 case CPSW_VERSION_3:
2048 case CPSW_VERSION_4:
1793 priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; 2049 priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
1794 priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; 2050 priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
2051 priv->hw_stats = ss_regs + CPSW2_HW_STATS;
1795 dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET; 2052 dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
1796 dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET; 2053 dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
1797 ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET; 2054 ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
new file mode 100644
index 000000000000..eb3e101ec048
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -0,0 +1,42 @@
1/* Texas Instruments Ethernet Switch Driver
2 *
3 * Copyright (C) 2013 Texas Instruments
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
10 * kind, whether express or implied; without even the implied warranty
11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#ifndef __CPSW_H__
15#define __CPSW_H__
16
17#include <linux/if_ether.h>
18
19struct cpsw_slave_data {
20 char phy_id[MII_BUS_ID_SIZE];
21 int phy_if;
22 u8 mac_addr[ETH_ALEN];
23 u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
24};
25
26struct cpsw_platform_data {
27 struct cpsw_slave_data *slave_data;
28 u32 ss_reg_ofs; /* Subsystem control register offset */
29 u32 channels; /* number of cpdma channels (symmetric) */
30 u32 slaves; /* number of slave cpgmac ports */
31 u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
32 u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */
33 u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */
34 u32 ale_entries; /* ale table size */
35 u32 bd_ram_size; /*buffer descriptor ram size */
36 u32 rx_descs; /* Number of Rx Descriptios */
37 u32 mac_control; /* Mac control register */
38 u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
39 bool dual_emac; /* Enable Dual EMAC mode */
40};
41
42#endif /* __CPSW_H__ */
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 031ebc81b50c..90a79462c869 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -591,6 +591,7 @@ int cpdma_chan_get_stats(struct cpdma_chan *chan,
591 spin_unlock_irqrestore(&chan->lock, flags); 591 spin_unlock_irqrestore(&chan->lock, flags);
592 return 0; 592 return 0;
593} 593}
594EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
594 595
595int cpdma_chan_dump(struct cpdma_chan *chan) 596int cpdma_chan_dump(struct cpdma_chan *chan)
596{ 597{
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 1a222bce4bd7..67df09ea9d04 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1761,7 +1761,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1761 const u8 *mac_addr; 1761 const u8 *mac_addr;
1762 1762
1763 if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node) 1763 if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
1764 return pdev->dev.platform_data; 1764 return dev_get_platdata(&pdev->dev);
1765 1765
1766 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1766 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1767 if (!pdata) 1767 if (!pdata)
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 16ddfc348062..4ec92659a100 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -314,7 +314,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
314 314
315static int davinci_mdio_probe(struct platform_device *pdev) 315static int davinci_mdio_probe(struct platform_device *pdev)
316{ 316{
317 struct mdio_platform_data *pdata = pdev->dev.platform_data; 317 struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
318 struct device *dev = &pdev->dev; 318 struct device *dev = &pdev->dev;
319 struct davinci_mdio_data *data; 319 struct davinci_mdio_data *data;
320 struct resource *res; 320 struct resource *res;
@@ -421,8 +421,7 @@ bail_out:
421 421
422static int davinci_mdio_remove(struct platform_device *pdev) 422static int davinci_mdio_remove(struct platform_device *pdev)
423{ 423{
424 struct device *dev = &pdev->dev; 424 struct davinci_mdio_data *data = platform_get_drvdata(pdev);
425 struct davinci_mdio_data *data = dev_get_drvdata(dev);
426 425
427 if (data->bus) { 426 if (data->bus) {
428 mdiobus_unregister(data->bus); 427 mdiobus_unregister(data->bus);
@@ -434,8 +433,6 @@ static int davinci_mdio_remove(struct platform_device *pdev)
434 pm_runtime_put_sync(&pdev->dev); 433 pm_runtime_put_sync(&pdev->dev);
435 pm_runtime_disable(&pdev->dev); 434 pm_runtime_disable(&pdev->dev);
436 435
437 dev_set_drvdata(dev, NULL);
438
439 kfree(data); 436 kfree(data);
440 437
441 return 0; 438 return 0;
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 098b1c42b393..4083ba8839e1 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -15,3 +15,14 @@ config TILE_NET
15 15
16 To compile this driver as a module, choose M here: the module 16 To compile this driver as a module, choose M here: the module
17 will be called tile_net. 17 will be called tile_net.
18
19config PTP_1588_CLOCK_TILEGX
20 tristate "Tilera TILE-Gx mPIPE as PTP clock"
21 select PTP_1588_CLOCK
22 depends on TILE_NET
23 depends on TILEGX
24 ---help---
25 This driver adds support for using the mPIPE as a PTP
26 clock. This clock is only useful if your PTP programs are
27 getting hardware time stamps on the PTP Ethernet packets
28 using the SO_TIMESTAMPING API.
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index f3c2d034b32c..949076f4e6ae 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -36,7 +36,10 @@
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/ctype.h> 37#include <linux/ctype.h>
38#include <linux/ip.h> 38#include <linux/ip.h>
39#include <linux/ipv6.h>
39#include <linux/tcp.h> 40#include <linux/tcp.h>
41#include <linux/net_tstamp.h>
42#include <linux/ptp_clock_kernel.h>
40 43
41#include <asm/checksum.h> 44#include <asm/checksum.h>
42#include <asm/homecache.h> 45#include <asm/homecache.h>
@@ -76,6 +79,9 @@
76 79
77#define MAX_FRAGS (MAX_SKB_FRAGS + 1) 80#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
78 81
82/* The "kinds" of buffer stacks (small/large/jumbo). */
83#define MAX_KINDS 3
84
79/* Size of completions data to allocate. 85/* Size of completions data to allocate.
80 * ISSUE: Probably more than needed since we don't use all the channels. 86 * ISSUE: Probably more than needed since we don't use all the channels.
81 */ 87 */
@@ -130,29 +136,31 @@ struct tile_net_tx_wake {
130 136
131/* Info for a specific cpu. */ 137/* Info for a specific cpu. */
132struct tile_net_info { 138struct tile_net_info {
133 /* The NAPI struct. */
134 struct napi_struct napi;
135 /* Packet queue. */
136 gxio_mpipe_iqueue_t iqueue;
137 /* Our cpu. */ 139 /* Our cpu. */
138 int my_cpu; 140 int my_cpu;
139 /* True if iqueue is valid. */
140 bool has_iqueue;
141 /* NAPI flags. */
142 bool napi_added;
143 bool napi_enabled;
144 /* Number of small sk_buffs which must still be provided. */
145 unsigned int num_needed_small_buffers;
146 /* Number of large sk_buffs which must still be provided. */
147 unsigned int num_needed_large_buffers;
148 /* A timer for handling egress completions. */ 141 /* A timer for handling egress completions. */
149 struct hrtimer egress_timer; 142 struct hrtimer egress_timer;
150 /* True if "egress_timer" is scheduled. */ 143 /* True if "egress_timer" is scheduled. */
151 bool egress_timer_scheduled; 144 bool egress_timer_scheduled;
152 /* Comps for each egress channel. */ 145 struct info_mpipe {
153 struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS]; 146 /* Packet queue. */
154 /* Transmit wake timer for each egress channel. */ 147 gxio_mpipe_iqueue_t iqueue;
155 struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS]; 148 /* The NAPI struct. */
149 struct napi_struct napi;
150 /* Number of buffers (by kind) which must still be provided. */
151 unsigned int num_needed_buffers[MAX_KINDS];
152 /* instance id. */
153 int instance;
154 /* True if iqueue is valid. */
155 bool has_iqueue;
156 /* NAPI flags. */
157 bool napi_added;
158 bool napi_enabled;
159 /* Comps for each egress channel. */
160 struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
161 /* Transmit wake timer for each egress channel. */
162 struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
163 } mpipe[NR_MPIPE_MAX];
156}; 164};
157 165
158/* Info for egress on a particular egress channel. */ 166/* Info for egress on a particular egress channel. */
@@ -177,19 +185,67 @@ struct tile_net_priv {
177 int loopify_channel; 185 int loopify_channel;
178 /* The egress channel (channel or loopify_channel). */ 186 /* The egress channel (channel or loopify_channel). */
179 int echannel; 187 int echannel;
180 /* Total stats. */ 188 /* mPIPE instance, 0 or 1. */
181 struct net_device_stats stats; 189 int instance;
190#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
191 /* The timestamp config. */
192 struct hwtstamp_config stamp_cfg;
193#endif
182}; 194};
183 195
184/* Egress info, indexed by "priv->echannel" (lazily created as needed). */ 196static struct mpipe_data {
185static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS]; 197 /* The ingress irq. */
198 int ingress_irq;
186 199
187/* Devices currently associated with each channel. 200 /* The "context" for all devices. */
188 * NOTE: The array entry can become NULL after ifconfig down, but 201 gxio_mpipe_context_t context;
189 * we do not free the underlying net_device structures, so it is 202
190 * safe to use a pointer after reading it from this array. 203 /* Egress info, indexed by "priv->echannel"
191 */ 204 * (lazily created as needed).
192static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS]; 205 */
206 struct tile_net_egress
207 egress_for_echannel[TILE_NET_CHANNELS];
208
209 /* Devices currently associated with each channel.
210 * NOTE: The array entry can become NULL after ifconfig down, but
211 * we do not free the underlying net_device structures, so it is
212 * safe to use a pointer after reading it from this array.
213 */
214 struct net_device
215 *tile_net_devs_for_channel[TILE_NET_CHANNELS];
216
217 /* The actual memory allocated for the buffer stacks. */
218 void *buffer_stack_vas[MAX_KINDS];
219
220 /* The amount of memory allocated for each buffer stack. */
221 size_t buffer_stack_bytes[MAX_KINDS];
222
223 /* The first buffer stack index
224 * (small = +0, large = +1, jumbo = +2).
225 */
226 int first_buffer_stack;
227
228 /* The buckets. */
229 int first_bucket;
230 int num_buckets;
231
232#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
233 /* PTP-specific data. */
234 struct ptp_clock *ptp_clock;
235 struct ptp_clock_info caps;
236
237 /* Lock for ptp accessors. */
238 struct mutex ptp_lock;
239#endif
240
241} mpipe_data[NR_MPIPE_MAX] = {
242 [0 ... (NR_MPIPE_MAX - 1)] {
243 .ingress_irq = -1,
244 .first_buffer_stack = -1,
245 .first_bucket = -1,
246 .num_buckets = 1
247 }
248};
193 249
194/* A mutex for "tile_net_devs_for_channel". */ 250/* A mutex for "tile_net_devs_for_channel". */
195static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); 251static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
@@ -197,34 +253,17 @@ static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
197/* The per-cpu info. */ 253/* The per-cpu info. */
198static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info); 254static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
199 255
200/* The "context" for all devices. */
201static gxio_mpipe_context_t context;
202 256
203/* Buffer sizes and mpipe enum codes for buffer stacks. 257/* The buffer size enums for each buffer stack.
204 * See arch/tile/include/gxio/mpipe.h for the set of possible values. 258 * See arch/tile/include/gxio/mpipe.h for the set of possible values.
259 * We avoid the "10384" size because it can induce "false chaining"
260 * on "cut-through" jumbo packets.
205 */ 261 */
206#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128 262static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = {
207#define BUFFER_SIZE_SMALL 128 263 GXIO_MPIPE_BUFFER_SIZE_128,
208#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664 264 GXIO_MPIPE_BUFFER_SIZE_1664,
209#define BUFFER_SIZE_LARGE 1664 265 GXIO_MPIPE_BUFFER_SIZE_16384
210 266};
211/* The small/large "buffer stacks". */
212static int small_buffer_stack = -1;
213static int large_buffer_stack = -1;
214
215/* Amount of memory allocated for each buffer stack. */
216static size_t buffer_stack_size;
217
218/* The actual memory allocated for the buffer stacks. */
219static void *small_buffer_stack_va;
220static void *large_buffer_stack_va;
221
222/* The buckets. */
223static int first_bucket = -1;
224static int num_buckets = 1;
225
226/* The ingress irq. */
227static int ingress_irq = -1;
228 267
229/* Text value of tile_net.cpus if passed as a module parameter. */ 268/* Text value of tile_net.cpus if passed as a module parameter. */
230static char *network_cpus_string; 269static char *network_cpus_string;
@@ -232,11 +271,21 @@ static char *network_cpus_string;
232/* The actual cpus in "network_cpus". */ 271/* The actual cpus in "network_cpus". */
233static struct cpumask network_cpus_map; 272static struct cpumask network_cpus_map;
234 273
235/* If "loopify=LINK" was specified, this is "LINK". */ 274/* If "tile_net.loopify=LINK" was specified, this is "LINK". */
236static char *loopify_link_name; 275static char *loopify_link_name;
237 276
238/* If "tile_net.custom" was specified, this is non-NULL. */ 277/* If "tile_net.custom" was specified, this is true. */
239static char *custom_str; 278static bool custom_flag;
279
280/* If "tile_net.jumbo=NUM" was specified, this is "NUM". */
281static uint jumbo_num;
282
283/* Obtain mpipe instance from struct tile_net_priv given struct net_device. */
284static inline int mpipe_instance(struct net_device *dev)
285{
286 struct tile_net_priv *priv = netdev_priv(dev);
287 return priv->instance;
288}
240 289
241/* The "tile_net.cpus" argument specifies the cpus that are dedicated 290/* The "tile_net.cpus" argument specifies the cpus that are dedicated
242 * to handle ingress packets. 291 * to handle ingress packets.
@@ -289,9 +338,15 @@ MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
289/* The "tile_net.custom" argument causes us to ignore the "conventional" 338/* The "tile_net.custom" argument causes us to ignore the "conventional"
290 * classifier metadata, in particular, the "l2_offset". 339 * classifier metadata, in particular, the "l2_offset".
291 */ 340 */
292module_param_named(custom, custom_str, charp, 0444); 341module_param_named(custom, custom_flag, bool, 0444);
293MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier"); 342MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
294 343
344/* The "tile_net.jumbo" argument causes us to support "jumbo" packets,
345 * and to allocate the given number of "jumbo" buffers.
346 */
347module_param_named(jumbo, jumbo_num, uint, 0444);
348MODULE_PARM_DESC(jumbo, "the number of buffers to support jumbo packets");
349
295/* Atomically update a statistics field. 350/* Atomically update a statistics field.
296 * Note that on TILE-Gx, this operation is fire-and-forget on the 351 * Note that on TILE-Gx, this operation is fire-and-forget on the
297 * issuing core (single-cycle dispatch) and takes only a few cycles 352 * issuing core (single-cycle dispatch) and takes only a few cycles
@@ -305,15 +360,16 @@ static void tile_net_stats_add(unsigned long value, unsigned long *field)
305} 360}
306 361
307/* Allocate and push a buffer. */ 362/* Allocate and push a buffer. */
308static bool tile_net_provide_buffer(bool small) 363static bool tile_net_provide_buffer(int instance, int kind)
309{ 364{
310 int stack = small ? small_buffer_stack : large_buffer_stack; 365 struct mpipe_data *md = &mpipe_data[instance];
366 gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind];
367 size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse);
311 const unsigned long buffer_alignment = 128; 368 const unsigned long buffer_alignment = 128;
312 struct sk_buff *skb; 369 struct sk_buff *skb;
313 int len; 370 int len;
314 371
315 len = sizeof(struct sk_buff **) + buffer_alignment; 372 len = sizeof(struct sk_buff **) + buffer_alignment + bs;
316 len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
317 skb = dev_alloc_skb(len); 373 skb = dev_alloc_skb(len);
318 if (skb == NULL) 374 if (skb == NULL)
319 return false; 375 return false;
@@ -328,7 +384,7 @@ static bool tile_net_provide_buffer(bool small)
328 /* Make sure "skb" and the back-pointer have been flushed. */ 384 /* Make sure "skb" and the back-pointer have been flushed. */
329 wmb(); 385 wmb();
330 386
331 gxio_mpipe_push_buffer(&context, stack, 387 gxio_mpipe_push_buffer(&md->context, md->first_buffer_stack + kind,
332 (void *)va_to_tile_io_addr(skb->data)); 388 (void *)va_to_tile_io_addr(skb->data));
333 389
334 return true; 390 return true;
@@ -354,11 +410,14 @@ static struct sk_buff *mpipe_buf_to_skb(void *va)
354 return skb; 410 return skb;
355} 411}
356 412
357static void tile_net_pop_all_buffers(int stack) 413static void tile_net_pop_all_buffers(int instance, int stack)
358{ 414{
415 struct mpipe_data *md = &mpipe_data[instance];
416
359 for (;;) { 417 for (;;) {
360 tile_io_addr_t addr = 418 tile_io_addr_t addr =
361 (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack); 419 (tile_io_addr_t)gxio_mpipe_pop_buffer(&md->context,
420 stack);
362 if (addr == 0) 421 if (addr == 0)
363 break; 422 break;
364 dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr))); 423 dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
@@ -369,24 +428,111 @@ static void tile_net_pop_all_buffers(int stack)
369static void tile_net_provide_needed_buffers(void) 428static void tile_net_provide_needed_buffers(void)
370{ 429{
371 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 430 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
431 int instance, kind;
432 for (instance = 0; instance < NR_MPIPE_MAX &&
433 info->mpipe[instance].has_iqueue; instance++) {
434 for (kind = 0; kind < MAX_KINDS; kind++) {
435 while (info->mpipe[instance].num_needed_buffers[kind]
436 != 0) {
437 if (!tile_net_provide_buffer(instance, kind)) {
438 pr_notice("Tile %d still needs"
439 " some buffers\n",
440 info->my_cpu);
441 return;
442 }
443 info->mpipe[instance].
444 num_needed_buffers[kind]--;
445 }
446 }
447 }
448}
372 449
373 while (info->num_needed_small_buffers != 0) { 450/* Get RX timestamp, and store it in the skb. */
374 if (!tile_net_provide_buffer(true)) 451static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb,
375 goto oops; 452 gxio_mpipe_idesc_t *idesc)
376 info->num_needed_small_buffers--; 453{
454#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
455 if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) {
456 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
457 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
458 shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec,
459 idesc->time_stamp_ns);
377 } 460 }
461#endif
462}
378 463
379 while (info->num_needed_large_buffers != 0) { 464/* Get TX timestamp, and store it in the skb. */
380 if (!tile_net_provide_buffer(false)) 465static void tile_tx_timestamp(struct sk_buff *skb, int instance)
381 goto oops; 466{
382 info->num_needed_large_buffers--; 467#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
468 struct skb_shared_info *shtx = skb_shinfo(skb);
469 if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
470 struct mpipe_data *md = &mpipe_data[instance];
471 struct skb_shared_hwtstamps shhwtstamps;
472 struct timespec ts;
473
474 shtx->tx_flags |= SKBTX_IN_PROGRESS;
475 gxio_mpipe_get_timestamp(&md->context, &ts);
476 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
477 shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
478 skb_tstamp_tx(skb, &shhwtstamps);
383 } 479 }
480#endif
481}
384 482
385 return; 483/* Use ioctl() to enable or disable TX or RX timestamping. */
484static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq,
485 int cmd)
486{
487#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
488 struct hwtstamp_config config;
489 struct tile_net_priv *priv = netdev_priv(dev);
386 490
387oops: 491 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
388 /* Add a description to the page allocation failure dump. */ 492 return -EFAULT;
389 pr_notice("Tile %d still needs some buffers\n", info->my_cpu); 493
494 if (config.flags) /* reserved for future extensions */
495 return -EINVAL;
496
497 switch (config.tx_type) {
498 case HWTSTAMP_TX_OFF:
499 case HWTSTAMP_TX_ON:
500 break;
501 default:
502 return -ERANGE;
503 }
504
505 switch (config.rx_filter) {
506 case HWTSTAMP_FILTER_NONE:
507 break;
508 case HWTSTAMP_FILTER_ALL:
509 case HWTSTAMP_FILTER_SOME:
510 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
511 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
512 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
513 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
514 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
515 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
516 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
517 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
518 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
519 case HWTSTAMP_FILTER_PTP_V2_EVENT:
520 case HWTSTAMP_FILTER_PTP_V2_SYNC:
521 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
522 config.rx_filter = HWTSTAMP_FILTER_ALL;
523 break;
524 default:
525 return -ERANGE;
526 }
527
528 if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
529 return -EFAULT;
530
531 priv->stamp_cfg = config;
532 return 0;
533#else
534 return -EOPNOTSUPP;
535#endif
390} 536}
391 537
392static inline bool filter_packet(struct net_device *dev, void *buf) 538static inline bool filter_packet(struct net_device *dev, void *buf)
@@ -398,7 +544,7 @@ static inline bool filter_packet(struct net_device *dev, void *buf)
398 /* Filter out packets that aren't for us. */ 544 /* Filter out packets that aren't for us. */
399 if (!(dev->flags & IFF_PROMISC) && 545 if (!(dev->flags & IFF_PROMISC) &&
400 !is_multicast_ether_addr(buf) && 546 !is_multicast_ether_addr(buf) &&
401 compare_ether_addr(dev->dev_addr, buf) != 0) 547 !ether_addr_equal(dev->dev_addr, buf))
402 return true; 548 return true;
403 549
404 return false; 550 return false;
@@ -409,6 +555,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
409{ 555{
410 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 556 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
411 struct tile_net_priv *priv = netdev_priv(dev); 557 struct tile_net_priv *priv = netdev_priv(dev);
558 int instance = priv->instance;
412 559
413 /* Encode the actual packet length. */ 560 /* Encode the actual packet length. */
414 skb_put(skb, len); 561 skb_put(skb, len);
@@ -419,47 +566,52 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
419 if (idesc->cs && idesc->csum_seed_val == 0xFFFF) 566 if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
420 skb->ip_summed = CHECKSUM_UNNECESSARY; 567 skb->ip_summed = CHECKSUM_UNNECESSARY;
421 568
422 netif_receive_skb(skb); 569 /* Get RX timestamp from idesc. */
570 tile_rx_timestamp(priv, skb, idesc);
571
572 napi_gro_receive(&info->mpipe[instance].napi, skb);
423 573
424 /* Update stats. */ 574 /* Update stats. */
425 tile_net_stats_add(1, &priv->stats.rx_packets); 575 tile_net_stats_add(1, &dev->stats.rx_packets);
426 tile_net_stats_add(len, &priv->stats.rx_bytes); 576 tile_net_stats_add(len, &dev->stats.rx_bytes);
427 577
428 /* Need a new buffer. */ 578 /* Need a new buffer. */
429 if (idesc->size == BUFFER_SIZE_SMALL_ENUM) 579 if (idesc->size == buffer_size_enums[0])
430 info->num_needed_small_buffers++; 580 info->mpipe[instance].num_needed_buffers[0]++;
581 else if (idesc->size == buffer_size_enums[1])
582 info->mpipe[instance].num_needed_buffers[1]++;
431 else 583 else
432 info->num_needed_large_buffers++; 584 info->mpipe[instance].num_needed_buffers[2]++;
433} 585}
434 586
435/* Handle a packet. Return true if "processed", false if "filtered". */ 587/* Handle a packet. Return true if "processed", false if "filtered". */
436static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) 588static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc)
437{ 589{
438 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 590 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
439 struct net_device *dev = tile_net_devs_for_channel[idesc->channel]; 591 struct mpipe_data *md = &mpipe_data[instance];
592 struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel];
440 uint8_t l2_offset; 593 uint8_t l2_offset;
441 void *va; 594 void *va;
442 void *buf; 595 void *buf;
443 unsigned long len; 596 unsigned long len;
444 bool filter; 597 bool filter;
445 598
446 /* Drop packets for which no buffer was available. 599 /* Drop packets for which no buffer was available (which can
447 * NOTE: This happens under heavy load. 600 * happen under heavy load), or for which the me/tr/ce flags
601 * are set (which can happen for jumbo cut-through packets,
602 * or with a customized classifier).
448 */ 603 */
449 if (idesc->be) { 604 if (idesc->be || idesc->me || idesc->tr || idesc->ce) {
450 struct tile_net_priv *priv = netdev_priv(dev); 605 if (dev)
451 tile_net_stats_add(1, &priv->stats.rx_dropped); 606 tile_net_stats_add(1, &dev->stats.rx_errors);
452 gxio_mpipe_iqueue_consume(&info->iqueue, idesc); 607 goto drop;
453 if (net_ratelimit())
454 pr_info("Dropping packet (insufficient buffers).\n");
455 return false;
456 } 608 }
457 609
458 /* Get the "l2_offset", if allowed. */ 610 /* Get the "l2_offset", if allowed. */
459 l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc); 611 l2_offset = custom_flag ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
460 612
461 /* Get the raw buffer VA (includes "headroom"). */ 613 /* Get the VA (including NET_IP_ALIGN bytes of "headroom"). */
462 va = tile_io_addr_to_va((unsigned long)(long)idesc->va); 614 va = tile_io_addr_to_va((unsigned long)idesc->va);
463 615
464 /* Get the actual packet start/length. */ 616 /* Get the actual packet start/length. */
465 buf = va + l2_offset; 617 buf = va + l2_offset;
@@ -470,7 +622,10 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
470 622
471 filter = filter_packet(dev, buf); 623 filter = filter_packet(dev, buf);
472 if (filter) { 624 if (filter) {
473 gxio_mpipe_iqueue_drop(&info->iqueue, idesc); 625 if (dev)
626 tile_net_stats_add(1, &dev->stats.rx_dropped);
627drop:
628 gxio_mpipe_iqueue_drop(&info->mpipe[instance].iqueue, idesc);
474 } else { 629 } else {
475 struct sk_buff *skb = mpipe_buf_to_skb(va); 630 struct sk_buff *skb = mpipe_buf_to_skb(va);
476 631
@@ -480,7 +635,7 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
480 tile_net_receive_skb(dev, skb, idesc, len); 635 tile_net_receive_skb(dev, skb, idesc, len);
481 } 636 }
482 637
483 gxio_mpipe_iqueue_consume(&info->iqueue, idesc); 638 gxio_mpipe_iqueue_consume(&info->mpipe[instance].iqueue, idesc);
484 return !filter; 639 return !filter;
485} 640}
486 641
@@ -501,14 +656,20 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
501 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 656 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
502 unsigned int work = 0; 657 unsigned int work = 0;
503 gxio_mpipe_idesc_t *idesc; 658 gxio_mpipe_idesc_t *idesc;
504 int i, n; 659 int instance, i, n;
505 660 struct mpipe_data *md;
506 /* Process packets. */ 661 struct info_mpipe *info_mpipe =
507 while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) { 662 container_of(napi, struct info_mpipe, napi);
663
664 instance = info_mpipe->instance;
665 while ((n = gxio_mpipe_iqueue_try_peek(
666 &info_mpipe->iqueue,
667 &idesc)) > 0) {
508 for (i = 0; i < n; i++) { 668 for (i = 0; i < n; i++) {
509 if (i == TILE_NET_BATCH) 669 if (i == TILE_NET_BATCH)
510 goto done; 670 goto done;
511 if (tile_net_handle_packet(idesc + i)) { 671 if (tile_net_handle_packet(instance,
672 idesc + i)) {
512 if (++work >= budget) 673 if (++work >= budget)
513 goto done; 674 goto done;
514 } 675 }
@@ -516,14 +677,16 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
516 } 677 }
517 678
518 /* There are no packets left. */ 679 /* There are no packets left. */
519 napi_complete(&info->napi); 680 napi_complete(&info_mpipe->napi);
520 681
682 md = &mpipe_data[instance];
521 /* Re-enable hypervisor interrupts. */ 683 /* Re-enable hypervisor interrupts. */
522 gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring); 684 gxio_mpipe_enable_notif_ring_interrupt(
685 &md->context, info->mpipe[instance].iqueue.ring);
523 686
524 /* HACK: Avoid the "rotting packet" problem. */ 687 /* HACK: Avoid the "rotting packet" problem. */
525 if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0) 688 if (gxio_mpipe_iqueue_try_peek(&info_mpipe->iqueue, &idesc) > 0)
526 napi_schedule(&info->napi); 689 napi_schedule(&info_mpipe->napi);
527 690
528 /* ISSUE: Handle completions? */ 691 /* ISSUE: Handle completions? */
529 692
@@ -533,11 +696,11 @@ done:
533 return work; 696 return work;
534} 697}
535 698
536/* Handle an ingress interrupt on the current cpu. */ 699/* Handle an ingress interrupt from an instance on the current cpu. */
537static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused) 700static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id)
538{ 701{
539 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 702 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
540 napi_schedule(&info->napi); 703 napi_schedule(&info->mpipe[(uint64_t)id].napi);
541 return IRQ_HANDLED; 704 return IRQ_HANDLED;
542} 705}
543 706
@@ -579,7 +742,9 @@ static void tile_net_schedule_tx_wake_timer(struct net_device *dev,
579{ 742{
580 struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx); 743 struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx);
581 struct tile_net_priv *priv = netdev_priv(dev); 744 struct tile_net_priv *priv = netdev_priv(dev);
582 struct tile_net_tx_wake *tx_wake = &info->tx_wake[priv->echannel]; 745 int instance = priv->instance;
746 struct tile_net_tx_wake *tx_wake =
747 &info->mpipe[instance].tx_wake[priv->echannel];
583 748
584 hrtimer_start(&tx_wake->timer, 749 hrtimer_start(&tx_wake->timer,
585 ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), 750 ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
@@ -617,7 +782,7 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
617 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 782 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
618 unsigned long irqflags; 783 unsigned long irqflags;
619 bool pending = false; 784 bool pending = false;
620 int i; 785 int i, instance;
621 786
622 local_irq_save(irqflags); 787 local_irq_save(irqflags);
623 788
@@ -625,13 +790,19 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
625 info->egress_timer_scheduled = false; 790 info->egress_timer_scheduled = false;
626 791
627 /* Free all possible comps for this tile. */ 792 /* Free all possible comps for this tile. */
628 for (i = 0; i < TILE_NET_CHANNELS; i++) { 793 for (instance = 0; instance < NR_MPIPE_MAX &&
629 struct tile_net_egress *egress = &egress_for_echannel[i]; 794 info->mpipe[instance].has_iqueue; instance++) {
630 struct tile_net_comps *comps = info->comps_for_echannel[i]; 795 for (i = 0; i < TILE_NET_CHANNELS; i++) {
631 if (comps->comp_last >= comps->comp_next) 796 struct tile_net_egress *egress =
632 continue; 797 &mpipe_data[instance].egress_for_echannel[i];
633 tile_net_free_comps(egress->equeue, comps, -1, true); 798 struct tile_net_comps *comps =
634 pending = pending || (comps->comp_last < comps->comp_next); 799 info->mpipe[instance].comps_for_echannel[i];
800 if (!egress || comps->comp_last >= comps->comp_next)
801 continue;
802 tile_net_free_comps(egress->equeue, comps, -1, true);
803 pending = pending ||
804 (comps->comp_last < comps->comp_next);
805 }
635 } 806 }
636 807
637 /* Reschedule timer if needed. */ 808 /* Reschedule timer if needed. */
@@ -643,37 +814,112 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
643 return HRTIMER_NORESTART; 814 return HRTIMER_NORESTART;
644} 815}
645 816
646/* Helper function for "tile_net_update()". 817#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
647 * "dev" (i.e. arg) is the device being brought up or down, 818
648 * or NULL if all devices are now down. 819/* PTP clock operations. */
649 */ 820
650static void tile_net_update_cpu(void *arg) 821static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
651{ 822{
652 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 823 int ret = 0;
653 struct net_device *dev = arg; 824 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
825 mutex_lock(&md->ptp_lock);
826 if (gxio_mpipe_adjust_timestamp_freq(&md->context, ppb))
827 ret = -EINVAL;
828 mutex_unlock(&md->ptp_lock);
829 return ret;
830}
654 831
655 if (!info->has_iqueue) 832static int ptp_mpipe_adjtime(struct ptp_clock_info *ptp, s64 delta)
656 return; 833{
834 int ret = 0;
835 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
836 mutex_lock(&md->ptp_lock);
837 if (gxio_mpipe_adjust_timestamp(&md->context, delta))
838 ret = -EBUSY;
839 mutex_unlock(&md->ptp_lock);
840 return ret;
841}
657 842
658 if (dev != NULL) { 843static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
659 if (!info->napi_added) { 844{
660 netif_napi_add(dev, &info->napi, 845 int ret = 0;
661 tile_net_poll, TILE_NET_WEIGHT); 846 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
662 info->napi_added = true; 847 mutex_lock(&md->ptp_lock);
663 } 848 if (gxio_mpipe_get_timestamp(&md->context, ts))
664 if (!info->napi_enabled) { 849 ret = -EBUSY;
665 napi_enable(&info->napi); 850 mutex_unlock(&md->ptp_lock);
666 info->napi_enabled = true; 851 return ret;
667 } 852}
668 enable_percpu_irq(ingress_irq, 0); 853
669 } else { 854static int ptp_mpipe_settime(struct ptp_clock_info *ptp,
670 disable_percpu_irq(ingress_irq); 855 const struct timespec *ts)
671 if (info->napi_enabled) { 856{
672 napi_disable(&info->napi); 857 int ret = 0;
673 info->napi_enabled = false; 858 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
674 } 859 mutex_lock(&md->ptp_lock);
675 /* FIXME: Drain the iqueue. */ 860 if (gxio_mpipe_set_timestamp(&md->context, ts))
676 } 861 ret = -EBUSY;
862 mutex_unlock(&md->ptp_lock);
863 return ret;
864}
865
866static int ptp_mpipe_enable(struct ptp_clock_info *ptp,
867 struct ptp_clock_request *request, int on)
868{
869 return -EOPNOTSUPP;
870}
871
872static struct ptp_clock_info ptp_mpipe_caps = {
873 .owner = THIS_MODULE,
874 .name = "mPIPE clock",
875 .max_adj = 999999999,
876 .n_ext_ts = 0,
877 .pps = 0,
878 .adjfreq = ptp_mpipe_adjfreq,
879 .adjtime = ptp_mpipe_adjtime,
880 .gettime = ptp_mpipe_gettime,
881 .settime = ptp_mpipe_settime,
882 .enable = ptp_mpipe_enable,
883};
884
885#endif /* CONFIG_PTP_1588_CLOCK_TILEGX */
886
887/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
888static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
889{
890#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
891 struct timespec ts;
892
893 getnstimeofday(&ts);
894 gxio_mpipe_set_timestamp(&md->context, &ts);
895
896 mutex_init(&md->ptp_lock);
897 md->caps = ptp_mpipe_caps;
898 md->ptp_clock = ptp_clock_register(&md->caps, NULL);
899 if (IS_ERR(md->ptp_clock))
900 netdev_err(dev, "ptp_clock_register failed %ld\n",
901 PTR_ERR(md->ptp_clock));
902#endif
903}
904
905/* Initialize PTP fields in a new device. */
906static void init_ptp_dev(struct tile_net_priv *priv)
907{
908#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
909 priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
910 priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
911#endif
912}
913
914/* Helper functions for "tile_net_update()". */
915static void enable_ingress_irq(void *irq)
916{
917 enable_percpu_irq((long)irq, 0);
918}
919
920static void disable_ingress_irq(void *irq)
921{
922 disable_percpu_irq((long)irq);
677} 923}
678 924
679/* Helper function for tile_net_open() and tile_net_stop(). 925/* Helper function for tile_net_open() and tile_net_stop().
@@ -683,19 +929,22 @@ static int tile_net_update(struct net_device *dev)
683{ 929{
684 static gxio_mpipe_rules_t rules; /* too big to fit on the stack */ 930 static gxio_mpipe_rules_t rules; /* too big to fit on the stack */
685 bool saw_channel = false; 931 bool saw_channel = false;
932 int instance = mpipe_instance(dev);
933 struct mpipe_data *md = &mpipe_data[instance];
686 int channel; 934 int channel;
687 int rc; 935 int rc;
688 int cpu; 936 int cpu;
689 937
690 gxio_mpipe_rules_init(&rules, &context); 938 saw_channel = false;
939 gxio_mpipe_rules_init(&rules, &md->context);
691 940
692 for (channel = 0; channel < TILE_NET_CHANNELS; channel++) { 941 for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
693 if (tile_net_devs_for_channel[channel] == NULL) 942 if (md->tile_net_devs_for_channel[channel] == NULL)
694 continue; 943 continue;
695 if (!saw_channel) { 944 if (!saw_channel) {
696 saw_channel = true; 945 saw_channel = true;
697 gxio_mpipe_rules_begin(&rules, first_bucket, 946 gxio_mpipe_rules_begin(&rules, md->first_bucket,
698 num_buckets, NULL); 947 md->num_buckets, NULL);
699 gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN); 948 gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
700 } 949 }
701 gxio_mpipe_rules_add_channel(&rules, channel); 950 gxio_mpipe_rules_add_channel(&rules, channel);
@@ -706,102 +955,150 @@ static int tile_net_update(struct net_device *dev)
706 */ 955 */
707 rc = gxio_mpipe_rules_commit(&rules); 956 rc = gxio_mpipe_rules_commit(&rules);
708 if (rc != 0) { 957 if (rc != 0) {
709 netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc); 958 netdev_warn(dev, "gxio_mpipe_rules_commit: mpipe[%d] %d\n",
959 instance, rc);
710 return -EIO; 960 return -EIO;
711 } 961 }
712 962
713 /* Update all cpus, sequentially (to protect "netif_napi_add()"). */ 963 /* Update all cpus, sequentially (to protect "netif_napi_add()").
714 for_each_online_cpu(cpu) 964 * We use on_each_cpu to handle the IPI mask or unmask.
715 smp_call_function_single(cpu, tile_net_update_cpu, 965 */
716 (saw_channel ? dev : NULL), 1); 966 if (!saw_channel)
967 on_each_cpu(disable_ingress_irq,
968 (void *)(long)(md->ingress_irq), 1);
969 for_each_online_cpu(cpu) {
970 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
971
972 if (!info->mpipe[instance].has_iqueue)
973 continue;
974 if (saw_channel) {
975 if (!info->mpipe[instance].napi_added) {
976 netif_napi_add(dev, &info->mpipe[instance].napi,
977 tile_net_poll, TILE_NET_WEIGHT);
978 info->mpipe[instance].napi_added = true;
979 }
980 if (!info->mpipe[instance].napi_enabled) {
981 napi_enable(&info->mpipe[instance].napi);
982 info->mpipe[instance].napi_enabled = true;
983 }
984 } else {
985 if (info->mpipe[instance].napi_enabled) {
986 napi_disable(&info->mpipe[instance].napi);
987 info->mpipe[instance].napi_enabled = false;
988 }
989 /* FIXME: Drain the iqueue. */
990 }
991 }
992 if (saw_channel)
993 on_each_cpu(enable_ingress_irq,
994 (void *)(long)(md->ingress_irq), 1);
717 995
718 /* HACK: Allow packets to flow in the simulator. */ 996 /* HACK: Allow packets to flow in the simulator. */
719 if (saw_channel) 997 if (saw_channel)
720 sim_enable_mpipe_links(0, -1); 998 sim_enable_mpipe_links(instance, -1);
721 999
722 return 0; 1000 return 0;
723} 1001}
724 1002
725/* Allocate and initialize mpipe buffer stacks, and register them in 1003/* Initialize a buffer stack. */
726 * the mPIPE TLBs, for both small and large packet sizes. 1004static int create_buffer_stack(struct net_device *dev,
727 * This routine supports tile_net_init_mpipe(), below. 1005 int kind, size_t num_buffers)
728 */
729static int init_buffer_stacks(struct net_device *dev, int num_buffers)
730{ 1006{
731 pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH); 1007 pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
732 int rc; 1008 int instance = mpipe_instance(dev);
1009 struct mpipe_data *md = &mpipe_data[instance];
1010 size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers);
1011 int stack_idx = md->first_buffer_stack + kind;
1012 void *va;
1013 int i, rc;
733 1014
734 /* Compute stack bytes; we round up to 64KB and then use 1015 /* Round up to 64KB and then use alloc_pages() so we get the
735 * alloc_pages() so we get the required 64KB alignment as well. 1016 * required 64KB alignment.
736 */ 1017 */
737 buffer_stack_size = 1018 md->buffer_stack_bytes[kind] =
738 ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers), 1019 ALIGN(needed, 64 * 1024);
739 64 * 1024);
740 1020
741 /* Allocate two buffer stack indices. */ 1021 va = alloc_pages_exact(md->buffer_stack_bytes[kind], GFP_KERNEL);
742 rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0); 1022 if (va == NULL) {
743 if (rc < 0) {
744 netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
745 rc);
746 return rc;
747 }
748 small_buffer_stack = rc;
749 large_buffer_stack = rc + 1;
750
751 /* Allocate the small memory stack. */
752 small_buffer_stack_va =
753 alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
754 if (small_buffer_stack_va == NULL) {
755 netdev_err(dev, 1023 netdev_err(dev,
756 "Could not alloc %zd bytes for buffer stacks\n", 1024 "Could not alloc %zd bytes for buffer stack %d\n",
757 buffer_stack_size); 1025 md->buffer_stack_bytes[kind], kind);
758 return -ENOMEM; 1026 return -ENOMEM;
759 } 1027 }
760 rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack, 1028
761 BUFFER_SIZE_SMALL_ENUM, 1029 /* Initialize the buffer stack. */
762 small_buffer_stack_va, 1030 rc = gxio_mpipe_init_buffer_stack(&md->context, stack_idx,
763 buffer_stack_size, 0); 1031 buffer_size_enums[kind], va,
1032 md->buffer_stack_bytes[kind], 0);
764 if (rc != 0) { 1033 if (rc != 0) {
765 netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc); 1034 netdev_err(dev, "gxio_mpipe_init_buffer_stack: mpipe[%d] %d\n",
1035 instance, rc);
1036 free_pages_exact(va, md->buffer_stack_bytes[kind]);
766 return rc; 1037 return rc;
767 } 1038 }
768 rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack, 1039
1040 md->buffer_stack_vas[kind] = va;
1041
1042 rc = gxio_mpipe_register_client_memory(&md->context, stack_idx,
769 hash_pte, 0); 1043 hash_pte, 0);
770 if (rc != 0) { 1044 if (rc != 0) {
771 netdev_err(dev, 1045 netdev_err(dev,
772 "gxio_mpipe_register_buffer_memory failed: %d\n", 1046 "gxio_mpipe_register_client_memory: mpipe[%d] %d\n",
773 rc); 1047 instance, rc);
774 return rc; 1048 return rc;
775 } 1049 }
776 1050
777 /* Allocate the large buffer stack. */ 1051 /* Provide initial buffers. */
778 large_buffer_stack_va = 1052 for (i = 0; i < num_buffers; i++) {
779 alloc_pages_exact(buffer_stack_size, GFP_KERNEL); 1053 if (!tile_net_provide_buffer(instance, kind)) {
780 if (large_buffer_stack_va == NULL) { 1054 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
781 netdev_err(dev, 1055 return -ENOMEM;
782 "Could not alloc %zd bytes for buffer stacks\n", 1056 }
783 buffer_stack_size);
784 return -ENOMEM;
785 }
786 rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
787 BUFFER_SIZE_LARGE_ENUM,
788 large_buffer_stack_va,
789 buffer_stack_size, 0);
790 if (rc != 0) {
791 netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
792 rc);
793 return rc;
794 } 1057 }
795 rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack, 1058
796 hash_pte, 0); 1059 return 0;
797 if (rc != 0) { 1060}
1061
1062/* Allocate and initialize mpipe buffer stacks, and register them in
1063 * the mPIPE TLBs, for small, large, and (possibly) jumbo packet sizes.
1064 * This routine supports tile_net_init_mpipe(), below.
1065 */
1066static int init_buffer_stacks(struct net_device *dev,
1067 int network_cpus_count)
1068{
1069 int num_kinds = MAX_KINDS - (jumbo_num == 0);
1070 size_t num_buffers;
1071 int rc;
1072 int instance = mpipe_instance(dev);
1073 struct mpipe_data *md = &mpipe_data[instance];
1074
1075 /* Allocate the buffer stacks. */
1076 rc = gxio_mpipe_alloc_buffer_stacks(&md->context, num_kinds, 0, 0);
1077 if (rc < 0) {
798 netdev_err(dev, 1078 netdev_err(dev,
799 "gxio_mpipe_register_buffer_memory failed: %d\n", 1079 "gxio_mpipe_alloc_buffer_stacks: mpipe[%d] %d\n",
800 rc); 1080 instance, rc);
801 return rc; 1081 return rc;
802 } 1082 }
1083 md->first_buffer_stack = rc;
803 1084
804 return 0; 1085 /* Enough small/large buffers to (normally) avoid buffer errors. */
1086 num_buffers =
1087 network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
1088
1089 /* Allocate the small memory stack. */
1090 if (rc >= 0)
1091 rc = create_buffer_stack(dev, 0, num_buffers);
1092
1093 /* Allocate the large buffer stack. */
1094 if (rc >= 0)
1095 rc = create_buffer_stack(dev, 1, num_buffers);
1096
1097 /* Allocate the jumbo buffer stack if needed. */
1098 if (rc >= 0 && jumbo_num != 0)
1099 rc = create_buffer_stack(dev, 2, jumbo_num);
1100
1101 return rc;
805} 1102}
806 1103
807/* Allocate per-cpu resources (memory for completions and idescs). 1104/* Allocate per-cpu resources (memory for completions and idescs).
@@ -812,6 +1109,8 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
812{ 1109{
813 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1110 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
814 int order, i, rc; 1111 int order, i, rc;
1112 int instance = mpipe_instance(dev);
1113 struct mpipe_data *md = &mpipe_data[instance];
815 struct page *page; 1114 struct page *page;
816 void *addr; 1115 void *addr;
817 1116
@@ -826,7 +1125,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
826 addr = pfn_to_kaddr(page_to_pfn(page)); 1125 addr = pfn_to_kaddr(page_to_pfn(page));
827 memset(addr, 0, COMPS_SIZE); 1126 memset(addr, 0, COMPS_SIZE);
828 for (i = 0; i < TILE_NET_CHANNELS; i++) 1127 for (i = 0; i < TILE_NET_CHANNELS; i++)
829 info->comps_for_echannel[i] = 1128 info->mpipe[instance].comps_for_echannel[i] =
830 addr + i * sizeof(struct tile_net_comps); 1129 addr + i * sizeof(struct tile_net_comps);
831 1130
832 /* If this is a network cpu, create an iqueue. */ 1131 /* If this is a network cpu, create an iqueue. */
@@ -840,14 +1139,15 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
840 return -ENOMEM; 1139 return -ENOMEM;
841 } 1140 }
842 addr = pfn_to_kaddr(page_to_pfn(page)); 1141 addr = pfn_to_kaddr(page_to_pfn(page));
843 rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++, 1142 rc = gxio_mpipe_iqueue_init(&info->mpipe[instance].iqueue,
844 addr, NOTIF_RING_SIZE, 0); 1143 &md->context, ring++, addr,
1144 NOTIF_RING_SIZE, 0);
845 if (rc < 0) { 1145 if (rc < 0) {
846 netdev_err(dev, 1146 netdev_err(dev,
847 "gxio_mpipe_iqueue_init failed: %d\n", rc); 1147 "gxio_mpipe_iqueue_init failed: %d\n", rc);
848 return rc; 1148 return rc;
849 } 1149 }
850 info->has_iqueue = true; 1150 info->mpipe[instance].has_iqueue = true;
851 } 1151 }
852 1152
853 return ring; 1153 return ring;
@@ -860,40 +1160,41 @@ static int init_notif_group_and_buckets(struct net_device *dev,
860 int ring, int network_cpus_count) 1160 int ring, int network_cpus_count)
861{ 1161{
862 int group, rc; 1162 int group, rc;
1163 int instance = mpipe_instance(dev);
1164 struct mpipe_data *md = &mpipe_data[instance];
863 1165
864 /* Allocate one NotifGroup. */ 1166 /* Allocate one NotifGroup. */
865 rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0); 1167 rc = gxio_mpipe_alloc_notif_groups(&md->context, 1, 0, 0);
866 if (rc < 0) { 1168 if (rc < 0) {
867 netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n", 1169 netdev_err(dev, "gxio_mpipe_alloc_notif_groups: mpipe[%d] %d\n",
868 rc); 1170 instance, rc);
869 return rc; 1171 return rc;
870 } 1172 }
871 group = rc; 1173 group = rc;
872 1174
873 /* Initialize global num_buckets value. */ 1175 /* Initialize global num_buckets value. */
874 if (network_cpus_count > 4) 1176 if (network_cpus_count > 4)
875 num_buckets = 256; 1177 md->num_buckets = 256;
876 else if (network_cpus_count > 1) 1178 else if (network_cpus_count > 1)
877 num_buckets = 16; 1179 md->num_buckets = 16;
878 1180
879 /* Allocate some buckets, and set global first_bucket value. */ 1181 /* Allocate some buckets, and set global first_bucket value. */
880 rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0); 1182 rc = gxio_mpipe_alloc_buckets(&md->context, md->num_buckets, 0, 0);
881 if (rc < 0) { 1183 if (rc < 0) {
882 netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc); 1184 netdev_err(dev, "gxio_mpipe_alloc_buckets: mpipe[%d] %d\n",
1185 instance, rc);
883 return rc; 1186 return rc;
884 } 1187 }
885 first_bucket = rc; 1188 md->first_bucket = rc;
886 1189
887 /* Init group and buckets. */ 1190 /* Init group and buckets. */
888 rc = gxio_mpipe_init_notif_group_and_buckets( 1191 rc = gxio_mpipe_init_notif_group_and_buckets(
889 &context, group, ring, network_cpus_count, 1192 &md->context, group, ring, network_cpus_count,
890 first_bucket, num_buckets, 1193 md->first_bucket, md->num_buckets,
891 GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY); 1194 GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
892 if (rc != 0) { 1195 if (rc != 0) {
893 netdev_err( 1196 netdev_err(dev, "gxio_mpipe_init_notif_group_and_buckets: "
894 dev, 1197 "mpipe[%d] %d\n", instance, rc);
895 "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
896 rc);
897 return rc; 1198 return rc;
898 } 1199 }
899 1200
@@ -907,30 +1208,39 @@ static int init_notif_group_and_buckets(struct net_device *dev,
907 */ 1208 */
908static int tile_net_setup_interrupts(struct net_device *dev) 1209static int tile_net_setup_interrupts(struct net_device *dev)
909{ 1210{
910 int cpu, rc; 1211 int cpu, rc, irq;
1212 int instance = mpipe_instance(dev);
1213 struct mpipe_data *md = &mpipe_data[instance];
1214
1215 irq = md->ingress_irq;
1216 if (irq < 0) {
1217 irq = create_irq();
1218 if (irq < 0) {
1219 netdev_err(dev,
1220 "create_irq failed: mpipe[%d] %d\n",
1221 instance, irq);
1222 return irq;
1223 }
1224 tile_irq_activate(irq, TILE_IRQ_PERCPU);
911 1225
912 rc = create_irq(); 1226 rc = request_irq(irq, tile_net_handle_ingress_irq,
913 if (rc < 0) { 1227 0, "tile_net", (void *)((uint64_t)instance));
914 netdev_err(dev, "create_irq failed: %d\n", rc); 1228
915 return rc; 1229 if (rc != 0) {
916 } 1230 netdev_err(dev, "request_irq failed: mpipe[%d] %d\n",
917 ingress_irq = rc; 1231 instance, rc);
918 tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); 1232 destroy_irq(irq);
919 rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, 1233 return rc;
920 0, "tile_net", NULL); 1234 }
921 if (rc != 0) { 1235 md->ingress_irq = irq;
922 netdev_err(dev, "request_irq failed: %d\n", rc);
923 destroy_irq(ingress_irq);
924 ingress_irq = -1;
925 return rc;
926 } 1236 }
927 1237
928 for_each_online_cpu(cpu) { 1238 for_each_online_cpu(cpu) {
929 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1239 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
930 if (info->has_iqueue) { 1240 if (info->mpipe[instance].has_iqueue) {
931 gxio_mpipe_request_notif_ring_interrupt( 1241 gxio_mpipe_request_notif_ring_interrupt(&md->context,
932 &context, cpu_x(cpu), cpu_y(cpu), 1242 cpu_x(cpu), cpu_y(cpu), KERNEL_PL, irq,
933 KERNEL_PL, ingress_irq, info->iqueue.ring); 1243 info->mpipe[instance].iqueue.ring);
934 } 1244 }
935 } 1245 }
936 1246
@@ -938,39 +1248,45 @@ static int tile_net_setup_interrupts(struct net_device *dev)
938} 1248}
939 1249
940/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */ 1250/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
941static void tile_net_init_mpipe_fail(void) 1251static void tile_net_init_mpipe_fail(int instance)
942{ 1252{
943 int cpu; 1253 int kind, cpu;
1254 struct mpipe_data *md = &mpipe_data[instance];
944 1255
945 /* Do cleanups that require the mpipe context first. */ 1256 /* Do cleanups that require the mpipe context first. */
946 if (small_buffer_stack >= 0) 1257 for (kind = 0; kind < MAX_KINDS; kind++) {
947 tile_net_pop_all_buffers(small_buffer_stack); 1258 if (md->buffer_stack_vas[kind] != NULL) {
948 if (large_buffer_stack >= 0) 1259 tile_net_pop_all_buffers(instance,
949 tile_net_pop_all_buffers(large_buffer_stack); 1260 md->first_buffer_stack +
1261 kind);
1262 }
1263 }
950 1264
951 /* Destroy mpipe context so the hardware no longer owns any memory. */ 1265 /* Destroy mpipe context so the hardware no longer owns any memory. */
952 gxio_mpipe_destroy(&context); 1266 gxio_mpipe_destroy(&md->context);
953 1267
954 for_each_online_cpu(cpu) { 1268 for_each_online_cpu(cpu) {
955 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1269 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
956 free_pages((unsigned long)(info->comps_for_echannel[0]), 1270 free_pages(
957 get_order(COMPS_SIZE)); 1271 (unsigned long)(
958 info->comps_for_echannel[0] = NULL; 1272 info->mpipe[instance].comps_for_echannel[0]),
959 free_pages((unsigned long)(info->iqueue.idescs), 1273 get_order(COMPS_SIZE));
1274 info->mpipe[instance].comps_for_echannel[0] = NULL;
1275 free_pages((unsigned long)(info->mpipe[instance].iqueue.idescs),
960 get_order(NOTIF_RING_SIZE)); 1276 get_order(NOTIF_RING_SIZE));
961 info->iqueue.idescs = NULL; 1277 info->mpipe[instance].iqueue.idescs = NULL;
962 } 1278 }
963 1279
964 if (small_buffer_stack_va) 1280 for (kind = 0; kind < MAX_KINDS; kind++) {
965 free_pages_exact(small_buffer_stack_va, buffer_stack_size); 1281 if (md->buffer_stack_vas[kind] != NULL) {
966 if (large_buffer_stack_va) 1282 free_pages_exact(md->buffer_stack_vas[kind],
967 free_pages_exact(large_buffer_stack_va, buffer_stack_size); 1283 md->buffer_stack_bytes[kind]);
1284 md->buffer_stack_vas[kind] = NULL;
1285 }
1286 }
968 1287
969 small_buffer_stack_va = NULL; 1288 md->first_buffer_stack = -1;
970 large_buffer_stack_va = NULL; 1289 md->first_bucket = -1;
971 large_buffer_stack = -1;
972 small_buffer_stack = -1;
973 first_bucket = -1;
974} 1290}
975 1291
976/* The first time any tilegx network device is opened, we initialize 1292/* The first time any tilegx network device is opened, we initialize
@@ -984,9 +1300,11 @@ static void tile_net_init_mpipe_fail(void)
984 */ 1300 */
985static int tile_net_init_mpipe(struct net_device *dev) 1301static int tile_net_init_mpipe(struct net_device *dev)
986{ 1302{
987 int i, num_buffers, rc; 1303 int rc;
988 int cpu; 1304 int cpu;
989 int first_ring, ring; 1305 int first_ring, ring;
1306 int instance = mpipe_instance(dev);
1307 struct mpipe_data *md = &mpipe_data[instance];
990 int network_cpus_count = cpus_weight(network_cpus_map); 1308 int network_cpus_count = cpus_weight(network_cpus_map);
991 1309
992 if (!hash_default) { 1310 if (!hash_default) {
@@ -994,36 +1312,21 @@ static int tile_net_init_mpipe(struct net_device *dev)
994 return -EIO; 1312 return -EIO;
995 } 1313 }
996 1314
997 rc = gxio_mpipe_init(&context, 0); 1315 rc = gxio_mpipe_init(&md->context, instance);
998 if (rc != 0) { 1316 if (rc != 0) {
999 netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc); 1317 netdev_err(dev, "gxio_mpipe_init: mpipe[%d] %d\n",
1318 instance, rc);
1000 return -EIO; 1319 return -EIO;
1001 } 1320 }
1002 1321
1003 /* Set up the buffer stacks. */ 1322 /* Set up the buffer stacks. */
1004 num_buffers = 1323 rc = init_buffer_stacks(dev, network_cpus_count);
1005 network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
1006 rc = init_buffer_stacks(dev, num_buffers);
1007 if (rc != 0) 1324 if (rc != 0)
1008 goto fail; 1325 goto fail;
1009 1326
1010 /* Provide initial buffers. */
1011 rc = -ENOMEM;
1012 for (i = 0; i < num_buffers; i++) {
1013 if (!tile_net_provide_buffer(true)) {
1014 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
1015 goto fail;
1016 }
1017 }
1018 for (i = 0; i < num_buffers; i++) {
1019 if (!tile_net_provide_buffer(false)) {
1020 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
1021 goto fail;
1022 }
1023 }
1024
1025 /* Allocate one NotifRing for each network cpu. */ 1327 /* Allocate one NotifRing for each network cpu. */
1026 rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0); 1328 rc = gxio_mpipe_alloc_notif_rings(&md->context,
1329 network_cpus_count, 0, 0);
1027 if (rc < 0) { 1330 if (rc < 0) {
1028 netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n", 1331 netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
1029 rc); 1332 rc);
@@ -1050,10 +1353,13 @@ static int tile_net_init_mpipe(struct net_device *dev)
1050 if (rc != 0) 1353 if (rc != 0)
1051 goto fail; 1354 goto fail;
1052 1355
1356 /* Register PTP clock and set mPIPE timestamp, if configured. */
1357 register_ptp_clock(dev, md);
1358
1053 return 0; 1359 return 0;
1054 1360
1055fail: 1361fail:
1056 tile_net_init_mpipe_fail(); 1362 tile_net_init_mpipe_fail(instance);
1057 return rc; 1363 return rc;
1058} 1364}
1059 1365
@@ -1063,17 +1369,19 @@ fail:
1063 */ 1369 */
1064static int tile_net_init_egress(struct net_device *dev, int echannel) 1370static int tile_net_init_egress(struct net_device *dev, int echannel)
1065{ 1371{
1372 static int ering = -1;
1066 struct page *headers_page, *edescs_page, *equeue_page; 1373 struct page *headers_page, *edescs_page, *equeue_page;
1067 gxio_mpipe_edesc_t *edescs; 1374 gxio_mpipe_edesc_t *edescs;
1068 gxio_mpipe_equeue_t *equeue; 1375 gxio_mpipe_equeue_t *equeue;
1069 unsigned char *headers; 1376 unsigned char *headers;
1070 int headers_order, edescs_order, equeue_order; 1377 int headers_order, edescs_order, equeue_order;
1071 size_t edescs_size; 1378 size_t edescs_size;
1072 int edma;
1073 int rc = -ENOMEM; 1379 int rc = -ENOMEM;
1380 int instance = mpipe_instance(dev);
1381 struct mpipe_data *md = &mpipe_data[instance];
1074 1382
1075 /* Only initialize once. */ 1383 /* Only initialize once. */
1076 if (egress_for_echannel[echannel].equeue != NULL) 1384 if (md->egress_for_echannel[echannel].equeue != NULL)
1077 return 0; 1385 return 0;
1078 1386
1079 /* Allocate memory for the "headers". */ 1387 /* Allocate memory for the "headers". */
@@ -1110,28 +1418,41 @@ static int tile_net_init_egress(struct net_device *dev, int echannel)
1110 } 1418 }
1111 equeue = pfn_to_kaddr(page_to_pfn(equeue_page)); 1419 equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
1112 1420
1113 /* Allocate an edma ring. Note that in practice this can't 1421 /* Allocate an edma ring (using a one entry "free list"). */
1114 * fail, which is good, because we will leak an edma ring if so. 1422 if (ering < 0) {
1115 */ 1423 rc = gxio_mpipe_alloc_edma_rings(&md->context, 1, 0, 0);
1116 rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0); 1424 if (rc < 0) {
1117 if (rc < 0) { 1425 netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: "
1118 netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n", 1426 "mpipe[%d] %d\n", instance, rc);
1119 rc); 1427 goto fail_equeue;
1120 goto fail_equeue; 1428 }
1429 ering = rc;
1121 } 1430 }
1122 edma = rc;
1123 1431
1124 /* Initialize the equeue. */ 1432 /* Initialize the equeue. */
1125 rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel, 1433 rc = gxio_mpipe_equeue_init(equeue, &md->context, ering, echannel,
1126 edescs, edescs_size, 0); 1434 edescs, edescs_size, 0);
1127 if (rc != 0) { 1435 if (rc != 0) {
1128 netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc); 1436 netdev_err(dev, "gxio_mpipe_equeue_init: mpipe[%d] %d\n",
1437 instance, rc);
1129 goto fail_equeue; 1438 goto fail_equeue;
1130 } 1439 }
1131 1440
1441 /* Don't reuse the ering later. */
1442 ering = -1;
1443
1444 if (jumbo_num != 0) {
1445 /* Make sure "jumbo" packets can be egressed safely. */
1446 if (gxio_mpipe_equeue_set_snf_size(equeue, 10368) < 0) {
1447 /* ISSUE: There is no "gxio_mpipe_equeue_destroy()". */
1448 netdev_warn(dev, "Jumbo packets may not be egressed"
1449 " properly on channel %d\n", echannel);
1450 }
1451 }
1452
1132 /* Done. */ 1453 /* Done. */
1133 egress_for_echannel[echannel].equeue = equeue; 1454 md->egress_for_echannel[echannel].equeue = equeue;
1134 egress_for_echannel[echannel].headers = headers; 1455 md->egress_for_echannel[echannel].headers = headers;
1135 return 0; 1456 return 0;
1136 1457
1137fail_equeue: 1458fail_equeue:
@@ -1151,11 +1472,25 @@ fail:
1151static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, 1472static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
1152 const char *link_name) 1473 const char *link_name)
1153{ 1474{
1154 int rc = gxio_mpipe_link_open(link, &context, link_name, 0); 1475 int instance = mpipe_instance(dev);
1476 struct mpipe_data *md = &mpipe_data[instance];
1477 int rc = gxio_mpipe_link_open(link, &md->context, link_name, 0);
1155 if (rc < 0) { 1478 if (rc < 0) {
1156 netdev_err(dev, "Failed to open '%s'\n", link_name); 1479 netdev_err(dev, "Failed to open '%s', mpipe[%d], %d\n",
1480 link_name, instance, rc);
1157 return rc; 1481 return rc;
1158 } 1482 }
1483 if (jumbo_num != 0) {
1484 u32 attr = GXIO_MPIPE_LINK_RECEIVE_JUMBO;
1485 rc = gxio_mpipe_link_set_attr(link, attr, 1);
1486 if (rc != 0) {
1487 netdev_err(dev,
1488 "Cannot receive jumbo packets on '%s'\n",
1489 link_name);
1490 gxio_mpipe_link_close(link);
1491 return rc;
1492 }
1493 }
1159 rc = gxio_mpipe_link_channel(link); 1494 rc = gxio_mpipe_link_channel(link);
1160 if (rc < 0 || rc >= TILE_NET_CHANNELS) { 1495 if (rc < 0 || rc >= TILE_NET_CHANNELS) {
1161 netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc); 1496 netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
@@ -1169,12 +1504,23 @@ static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
1169static int tile_net_open(struct net_device *dev) 1504static int tile_net_open(struct net_device *dev)
1170{ 1505{
1171 struct tile_net_priv *priv = netdev_priv(dev); 1506 struct tile_net_priv *priv = netdev_priv(dev);
1172 int cpu, rc; 1507 int cpu, rc, instance;
1173 1508
1174 mutex_lock(&tile_net_devs_for_channel_mutex); 1509 mutex_lock(&tile_net_devs_for_channel_mutex);
1175 1510
1176 /* Do one-time initialization the first time any device is opened. */ 1511 /* Get the instance info. */
1177 if (ingress_irq < 0) { 1512 rc = gxio_mpipe_link_instance(dev->name);
1513 if (rc < 0 || rc >= NR_MPIPE_MAX) {
1514 mutex_unlock(&tile_net_devs_for_channel_mutex);
1515 return -EIO;
1516 }
1517
1518 priv->instance = rc;
1519 instance = rc;
1520 if (!mpipe_data[rc].context.mmio_fast_base) {
1521 /* Do one-time initialization per instance the first time
1522 * any device is opened.
1523 */
1178 rc = tile_net_init_mpipe(dev); 1524 rc = tile_net_init_mpipe(dev);
1179 if (rc != 0) 1525 if (rc != 0)
1180 goto fail; 1526 goto fail;
@@ -1205,7 +1551,7 @@ static int tile_net_open(struct net_device *dev)
1205 if (rc != 0) 1551 if (rc != 0)
1206 goto fail; 1552 goto fail;
1207 1553
1208 tile_net_devs_for_channel[priv->channel] = dev; 1554 mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = dev;
1209 1555
1210 rc = tile_net_update(dev); 1556 rc = tile_net_update(dev);
1211 if (rc != 0) 1557 if (rc != 0)
@@ -1217,7 +1563,7 @@ static int tile_net_open(struct net_device *dev)
1217 for_each_online_cpu(cpu) { 1563 for_each_online_cpu(cpu) {
1218 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1564 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1219 struct tile_net_tx_wake *tx_wake = 1565 struct tile_net_tx_wake *tx_wake =
1220 &info->tx_wake[priv->echannel]; 1566 &info->mpipe[instance].tx_wake[priv->echannel];
1221 1567
1222 hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC, 1568 hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
1223 HRTIMER_MODE_REL); 1569 HRTIMER_MODE_REL);
@@ -1243,7 +1589,7 @@ fail:
1243 priv->channel = -1; 1589 priv->channel = -1;
1244 } 1590 }
1245 priv->echannel = -1; 1591 priv->echannel = -1;
1246 tile_net_devs_for_channel[priv->channel] = NULL; 1592 mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = NULL;
1247 mutex_unlock(&tile_net_devs_for_channel_mutex); 1593 mutex_unlock(&tile_net_devs_for_channel_mutex);
1248 1594
1249 /* Don't return raw gxio error codes to generic Linux. */ 1595 /* Don't return raw gxio error codes to generic Linux. */
@@ -1255,18 +1601,20 @@ static int tile_net_stop(struct net_device *dev)
1255{ 1601{
1256 struct tile_net_priv *priv = netdev_priv(dev); 1602 struct tile_net_priv *priv = netdev_priv(dev);
1257 int cpu; 1603 int cpu;
1604 int instance = priv->instance;
1605 struct mpipe_data *md = &mpipe_data[instance];
1258 1606
1259 for_each_online_cpu(cpu) { 1607 for_each_online_cpu(cpu) {
1260 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1608 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1261 struct tile_net_tx_wake *tx_wake = 1609 struct tile_net_tx_wake *tx_wake =
1262 &info->tx_wake[priv->echannel]; 1610 &info->mpipe[instance].tx_wake[priv->echannel];
1263 1611
1264 hrtimer_cancel(&tx_wake->timer); 1612 hrtimer_cancel(&tx_wake->timer);
1265 netif_stop_subqueue(dev, cpu); 1613 netif_stop_subqueue(dev, cpu);
1266 } 1614 }
1267 1615
1268 mutex_lock(&tile_net_devs_for_channel_mutex); 1616 mutex_lock(&tile_net_devs_for_channel_mutex);
1269 tile_net_devs_for_channel[priv->channel] = NULL; 1617 md->tile_net_devs_for_channel[priv->channel] = NULL;
1270 (void)tile_net_update(dev); 1618 (void)tile_net_update(dev);
1271 if (priv->loopify_channel >= 0) { 1619 if (priv->loopify_channel >= 0) {
1272 if (gxio_mpipe_link_close(&priv->loopify_link) != 0) 1620 if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
@@ -1374,20 +1722,20 @@ static int tso_count_edescs(struct sk_buff *skb)
1374 return num_edescs; 1722 return num_edescs;
1375} 1723}
1376 1724
1377/* Prepare modified copies of the skbuff headers. 1725/* Prepare modified copies of the skbuff headers. */
1378 * FIXME: add support for IPv6.
1379 */
1380static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, 1726static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1381 s64 slot) 1727 s64 slot)
1382{ 1728{
1383 struct skb_shared_info *sh = skb_shinfo(skb); 1729 struct skb_shared_info *sh = skb_shinfo(skb);
1384 struct iphdr *ih; 1730 struct iphdr *ih;
1731 struct ipv6hdr *ih6;
1385 struct tcphdr *th; 1732 struct tcphdr *th;
1386 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1733 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1387 unsigned int data_len = skb->len - sh_len; 1734 unsigned int data_len = skb->len - sh_len;
1388 unsigned char *data = skb->data; 1735 unsigned char *data = skb->data;
1389 unsigned int ih_off, th_off, p_len; 1736 unsigned int ih_off, th_off, p_len;
1390 unsigned int isum_seed, tsum_seed, id, seq; 1737 unsigned int isum_seed, tsum_seed, id, seq;
1738 int is_ipv6;
1391 long f_id = -1; /* id of the current fragment */ 1739 long f_id = -1; /* id of the current fragment */
1392 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ 1740 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1393 long f_used = 0; /* bytes used from the current fragment */ 1741 long f_used = 0; /* bytes used from the current fragment */
@@ -1395,18 +1743,24 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1395 int segment; 1743 int segment;
1396 1744
1397 /* Locate original headers and compute various lengths. */ 1745 /* Locate original headers and compute various lengths. */
1398 ih = ip_hdr(skb); 1746 is_ipv6 = skb_is_gso_v6(skb);
1747 if (is_ipv6) {
1748 ih6 = ipv6_hdr(skb);
1749 ih_off = skb_network_offset(skb);
1750 } else {
1751 ih = ip_hdr(skb);
1752 ih_off = skb_network_offset(skb);
1753 isum_seed = ((0xFFFF - ih->check) +
1754 (0xFFFF - ih->tot_len) +
1755 (0xFFFF - ih->id));
1756 id = ntohs(ih->id);
1757 }
1758
1399 th = tcp_hdr(skb); 1759 th = tcp_hdr(skb);
1400 ih_off = skb_network_offset(skb);
1401 th_off = skb_transport_offset(skb); 1760 th_off = skb_transport_offset(skb);
1402 p_len = sh->gso_size; 1761 p_len = sh->gso_size;
1403 1762
1404 /* Set up seed values for IP and TCP csum and initialize id and seq. */
1405 isum_seed = ((0xFFFF - ih->check) +
1406 (0xFFFF - ih->tot_len) +
1407 (0xFFFF - ih->id));
1408 tsum_seed = th->check + (0xFFFF ^ htons(skb->len)); 1763 tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
1409 id = ntohs(ih->id);
1410 seq = ntohl(th->seq); 1764 seq = ntohl(th->seq);
1411 1765
1412 /* Prepare all the headers. */ 1766 /* Prepare all the headers. */
@@ -1420,11 +1774,17 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1420 memcpy(buf, data, sh_len); 1774 memcpy(buf, data, sh_len);
1421 1775
1422 /* Update copied ip header. */ 1776 /* Update copied ip header. */
1423 ih = (struct iphdr *)(buf + ih_off); 1777 if (is_ipv6) {
1424 ih->tot_len = htons(sh_len + p_len - ih_off); 1778 ih6 = (struct ipv6hdr *)(buf + ih_off);
1425 ih->id = htons(id); 1779 ih6->payload_len = htons(sh_len + p_len - ih_off -
1426 ih->check = csum_long(isum_seed + ih->tot_len + 1780 sizeof(*ih6));
1427 ih->id) ^ 0xffff; 1781 } else {
1782 ih = (struct iphdr *)(buf + ih_off);
1783 ih->tot_len = htons(sh_len + p_len - ih_off);
1784 ih->id = htons(id);
1785 ih->check = csum_long(isum_seed + ih->tot_len +
1786 ih->id) ^ 0xffff;
1787 }
1428 1788
1429 /* Update copied tcp header. */ 1789 /* Update copied tcp header. */
1430 th = (struct tcphdr *)(buf + th_off); 1790 th = (struct tcphdr *)(buf + th_off);
@@ -1475,8 +1835,9 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1475static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, 1835static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1476 struct sk_buff *skb, unsigned char *headers, s64 slot) 1836 struct sk_buff *skb, unsigned char *headers, s64 slot)
1477{ 1837{
1478 struct tile_net_priv *priv = netdev_priv(dev);
1479 struct skb_shared_info *sh = skb_shinfo(skb); 1838 struct skb_shared_info *sh = skb_shinfo(skb);
1839 int instance = mpipe_instance(dev);
1840 struct mpipe_data *md = &mpipe_data[instance];
1480 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1841 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1481 unsigned int data_len = skb->len - sh_len; 1842 unsigned int data_len = skb->len - sh_len;
1482 unsigned int p_len = sh->gso_size; 1843 unsigned int p_len = sh->gso_size;
@@ -1499,8 +1860,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1499 edesc_head.xfer_size = sh_len; 1860 edesc_head.xfer_size = sh_len;
1500 1861
1501 /* This is only used to specify the TLB. */ 1862 /* This is only used to specify the TLB. */
1502 edesc_head.stack_idx = large_buffer_stack; 1863 edesc_head.stack_idx = md->first_buffer_stack;
1503 edesc_body.stack_idx = large_buffer_stack; 1864 edesc_body.stack_idx = md->first_buffer_stack;
1504 1865
1505 /* Egress all the edescs. */ 1866 /* Egress all the edescs. */
1506 for (segment = 0; segment < sh->gso_segs; segment++) { 1867 for (segment = 0; segment < sh->gso_segs; segment++) {
@@ -1553,8 +1914,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1553 } 1914 }
1554 1915
1555 /* Update stats. */ 1916 /* Update stats. */
1556 tile_net_stats_add(tx_packets, &priv->stats.tx_packets); 1917 tile_net_stats_add(tx_packets, &dev->stats.tx_packets);
1557 tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes); 1918 tile_net_stats_add(tx_bytes, &dev->stats.tx_bytes);
1558} 1919}
1559 1920
1560/* Do "TSO" handling for egress. 1921/* Do "TSO" handling for egress.
@@ -1575,8 +1936,11 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
1575 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 1936 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1576 struct tile_net_priv *priv = netdev_priv(dev); 1937 struct tile_net_priv *priv = netdev_priv(dev);
1577 int channel = priv->echannel; 1938 int channel = priv->echannel;
1578 struct tile_net_egress *egress = &egress_for_echannel[channel]; 1939 int instance = priv->instance;
1579 struct tile_net_comps *comps = info->comps_for_echannel[channel]; 1940 struct mpipe_data *md = &mpipe_data[instance];
1941 struct tile_net_egress *egress = &md->egress_for_echannel[channel];
1942 struct tile_net_comps *comps =
1943 info->mpipe[instance].comps_for_echannel[channel];
1580 gxio_mpipe_equeue_t *equeue = egress->equeue; 1944 gxio_mpipe_equeue_t *equeue = egress->equeue;
1581 unsigned long irqflags; 1945 unsigned long irqflags;
1582 int num_edescs; 1946 int num_edescs;
@@ -1640,10 +2004,13 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1640{ 2004{
1641 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 2005 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1642 struct tile_net_priv *priv = netdev_priv(dev); 2006 struct tile_net_priv *priv = netdev_priv(dev);
1643 struct tile_net_egress *egress = &egress_for_echannel[priv->echannel]; 2007 int instance = priv->instance;
2008 struct mpipe_data *md = &mpipe_data[instance];
2009 struct tile_net_egress *egress =
2010 &md->egress_for_echannel[priv->echannel];
1644 gxio_mpipe_equeue_t *equeue = egress->equeue; 2011 gxio_mpipe_equeue_t *equeue = egress->equeue;
1645 struct tile_net_comps *comps = 2012 struct tile_net_comps *comps =
1646 info->comps_for_echannel[priv->echannel]; 2013 info->mpipe[instance].comps_for_echannel[priv->echannel];
1647 unsigned int len = skb->len; 2014 unsigned int len = skb->len;
1648 unsigned char *data = skb->data; 2015 unsigned char *data = skb->data;
1649 unsigned int num_edescs; 2016 unsigned int num_edescs;
@@ -1660,7 +2027,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1660 num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); 2027 num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
1661 2028
1662 /* This is only used to specify the TLB. */ 2029 /* This is only used to specify the TLB. */
1663 edesc.stack_idx = large_buffer_stack; 2030 edesc.stack_idx = md->first_buffer_stack;
1664 2031
1665 /* Prepare the edescs. */ 2032 /* Prepare the edescs. */
1666 for (i = 0; i < num_edescs; i++) { 2033 for (i = 0; i < num_edescs; i++) {
@@ -1693,13 +2060,16 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1693 for (i = 0; i < num_edescs; i++) 2060 for (i = 0; i < num_edescs; i++)
1694 gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++); 2061 gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
1695 2062
2063 /* Store TX timestamp if needed. */
2064 tile_tx_timestamp(skb, instance);
2065
1696 /* Add a completion record. */ 2066 /* Add a completion record. */
1697 add_comp(equeue, comps, slot - 1, skb); 2067 add_comp(equeue, comps, slot - 1, skb);
1698 2068
1699 /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */ 2069 /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
1700 tile_net_stats_add(1, &priv->stats.tx_packets); 2070 tile_net_stats_add(1, &dev->stats.tx_packets);
1701 tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN), 2071 tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
1702 &priv->stats.tx_bytes); 2072 &dev->stats.tx_bytes);
1703 2073
1704 local_irq_restore(irqflags); 2074 local_irq_restore(irqflags);
1705 2075
@@ -1727,20 +2097,18 @@ static void tile_net_tx_timeout(struct net_device *dev)
1727/* Ioctl commands. */ 2097/* Ioctl commands. */
1728static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2098static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1729{ 2099{
1730 return -EOPNOTSUPP; 2100 if (cmd == SIOCSHWTSTAMP)
1731} 2101 return tile_hwtstamp_ioctl(dev, rq, cmd);
1732 2102
1733/* Get system network statistics for device. */ 2103 return -EOPNOTSUPP;
1734static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
1735{
1736 struct tile_net_priv *priv = netdev_priv(dev);
1737 return &priv->stats;
1738} 2104}
1739 2105
1740/* Change the MTU. */ 2106/* Change the MTU. */
1741static int tile_net_change_mtu(struct net_device *dev, int new_mtu) 2107static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
1742{ 2108{
1743 if ((new_mtu < 68) || (new_mtu > 1500)) 2109 if (new_mtu < 68)
2110 return -EINVAL;
2111 if (new_mtu > ((jumbo_num != 0) ? 9000 : 1500))
1744 return -EINVAL; 2112 return -EINVAL;
1745 dev->mtu = new_mtu; 2113 dev->mtu = new_mtu;
1746 return 0; 2114 return 0;
@@ -1772,9 +2140,13 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p)
1772 */ 2140 */
1773static void tile_net_netpoll(struct net_device *dev) 2141static void tile_net_netpoll(struct net_device *dev)
1774{ 2142{
1775 disable_percpu_irq(ingress_irq); 2143 int instance = mpipe_instance(dev);
1776 tile_net_handle_ingress_irq(ingress_irq, NULL); 2144 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1777 enable_percpu_irq(ingress_irq, 0); 2145 struct mpipe_data *md = &mpipe_data[instance];
2146
2147 disable_percpu_irq(md->ingress_irq);
2148 napi_schedule(&info->mpipe[instance].napi);
2149 enable_percpu_irq(md->ingress_irq, 0);
1778} 2150}
1779#endif 2151#endif
1780 2152
@@ -1784,7 +2156,6 @@ static const struct net_device_ops tile_net_ops = {
1784 .ndo_start_xmit = tile_net_tx, 2156 .ndo_start_xmit = tile_net_tx,
1785 .ndo_select_queue = tile_net_select_queue, 2157 .ndo_select_queue = tile_net_select_queue,
1786 .ndo_do_ioctl = tile_net_ioctl, 2158 .ndo_do_ioctl = tile_net_ioctl,
1787 .ndo_get_stats = tile_net_get_stats,
1788 .ndo_change_mtu = tile_net_change_mtu, 2159 .ndo_change_mtu = tile_net_change_mtu,
1789 .ndo_tx_timeout = tile_net_tx_timeout, 2160 .ndo_tx_timeout = tile_net_tx_timeout,
1790 .ndo_set_mac_address = tile_net_set_mac_address, 2161 .ndo_set_mac_address = tile_net_set_mac_address,
@@ -1800,14 +2171,21 @@ static const struct net_device_ops tile_net_ops = {
1800 */ 2171 */
1801static void tile_net_setup(struct net_device *dev) 2172static void tile_net_setup(struct net_device *dev)
1802{ 2173{
2174 netdev_features_t features = 0;
2175
1803 ether_setup(dev); 2176 ether_setup(dev);
1804 dev->netdev_ops = &tile_net_ops; 2177 dev->netdev_ops = &tile_net_ops;
1805 dev->watchdog_timeo = TILE_NET_TIMEOUT; 2178 dev->watchdog_timeo = TILE_NET_TIMEOUT;
1806 dev->features |= NETIF_F_LLTX;
1807 dev->features |= NETIF_F_HW_CSUM;
1808 dev->features |= NETIF_F_SG;
1809 dev->features |= NETIF_F_TSO;
1810 dev->mtu = 1500; 2179 dev->mtu = 1500;
2180
2181 features |= NETIF_F_HW_CSUM;
2182 features |= NETIF_F_SG;
2183 features |= NETIF_F_TSO;
2184 features |= NETIF_F_TSO6;
2185
2186 dev->hw_features |= features;
2187 dev->vlan_features |= features;
2188 dev->features |= features;
1811} 2189}
1812 2190
1813/* Allocate the device structure, register the device, and obtain the 2191/* Allocate the device structure, register the device, and obtain the
@@ -1842,6 +2220,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
1842 priv->channel = -1; 2220 priv->channel = -1;
1843 priv->loopify_channel = -1; 2221 priv->loopify_channel = -1;
1844 priv->echannel = -1; 2222 priv->echannel = -1;
2223 init_ptp_dev(priv);
1845 2224
1846 /* Get the MAC address and set it in the device struct; this must 2225 /* Get the MAC address and set it in the device struct; this must
1847 * be done before the device is opened. If the MAC is all zeroes, 2226 * be done before the device is opened. If the MAC is all zeroes,
@@ -1871,9 +2250,12 @@ static void tile_net_init_module_percpu(void *unused)
1871{ 2250{
1872 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 2251 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1873 int my_cpu = smp_processor_id(); 2252 int my_cpu = smp_processor_id();
2253 int instance;
1874 2254
1875 info->has_iqueue = false; 2255 for (instance = 0; instance < NR_MPIPE_MAX; instance++) {
1876 2256 info->mpipe[instance].has_iqueue = false;
2257 info->mpipe[instance].instance = instance;
2258 }
1877 info->my_cpu = my_cpu; 2259 info->my_cpu = my_cpu;
1878 2260
1879 /* Initialize the egress timer. */ 2261 /* Initialize the egress timer. */
@@ -1890,6 +2272,8 @@ static int __init tile_net_init_module(void)
1890 2272
1891 pr_info("Tilera Network Driver\n"); 2273 pr_info("Tilera Network Driver\n");
1892 2274
2275 BUILD_BUG_ON(NR_MPIPE_MAX != 2);
2276
1893 mutex_init(&tile_net_devs_for_channel_mutex); 2277 mutex_init(&tile_net_devs_for_channel_mutex);
1894 2278
1895 /* Initialize each CPU. */ 2279 /* Initialize each CPU. */
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 36435499814b..106be47716e7 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -31,6 +31,7 @@
31#include <linux/in6.h> 31#include <linux/in6.h>
32#include <linux/timer.h> 32#include <linux/timer.h>
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/u64_stats_sync.h>
34#include <asm/checksum.h> 35#include <asm/checksum.h>
35#include <asm/homecache.h> 36#include <asm/homecache.h>
36 37
@@ -88,13 +89,6 @@
88/* ISSUE: This has not been thoroughly tested (except at 1500). */ 89/* ISSUE: This has not been thoroughly tested (except at 1500). */
89#define TILE_NET_MTU 1500 90#define TILE_NET_MTU 1500
90 91
91/* HACK: Define to support GSO. */
92/* ISSUE: This may actually hurt performance of the TCP blaster. */
93/* #define TILE_NET_GSO */
94
95/* Define this to collapse "duplicate" acks. */
96/* #define IGNORE_DUP_ACKS */
97
98/* HACK: Define this to verify incoming packets. */ 92/* HACK: Define this to verify incoming packets. */
99/* #define TILE_NET_VERIFY_INGRESS */ 93/* #define TILE_NET_VERIFY_INGRESS */
100 94
@@ -156,10 +150,13 @@ struct tile_netio_queue {
156 * Statistics counters for a specific cpu and device. 150 * Statistics counters for a specific cpu and device.
157 */ 151 */
158struct tile_net_stats_t { 152struct tile_net_stats_t {
159 u32 rx_packets; 153 struct u64_stats_sync syncp;
160 u32 rx_bytes; 154 u64 rx_packets; /* total packets received */
161 u32 tx_packets; 155 u64 tx_packets; /* total packets transmitted */
162 u32 tx_bytes; 156 u64 rx_bytes; /* total bytes received */
157 u64 tx_bytes; /* total bytes transmitted */
158 u64 rx_errors; /* packets truncated or marked bad by hw */
159 u64 rx_dropped; /* packets not for us or intf not up */
163}; 160};
164 161
165 162
@@ -218,8 +215,6 @@ struct tile_net_priv {
218 int network_cpus_count; 215 int network_cpus_count;
219 /* Credits per network cpu. */ 216 /* Credits per network cpu. */
220 int network_cpus_credits; 217 int network_cpus_credits;
221 /* Network stats. */
222 struct net_device_stats stats;
223 /* For NetIO bringup retries. */ 218 /* For NetIO bringup retries. */
224 struct delayed_work retry_work; 219 struct delayed_work retry_work;
225 /* Quick access to per cpu data. */ 220 /* Quick access to per cpu data. */
@@ -627,79 +622,6 @@ static void tile_net_handle_egress_timer(unsigned long arg)
627} 622}
628 623
629 624
630#ifdef IGNORE_DUP_ACKS
631
632/*
633 * Help detect "duplicate" ACKs. These are sequential packets (for a
634 * given flow) which are exactly 66 bytes long, sharing everything but
635 * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32,
636 * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are
637 * +N, and the Tstamps are usually identical.
638 *
639 * NOTE: Apparently truly duplicate acks (with identical "ack" values),
640 * should not be collapsed, as they are used for some kind of flow control.
641 */
642static bool is_dup_ack(char *s1, char *s2, unsigned int len)
643{
644 int i;
645
646 unsigned long long ignorable = 0;
647
648 /* Identification. */
649 ignorable |= (1ULL << 0x12);
650 ignorable |= (1ULL << 0x13);
651
652 /* Header checksum. */
653 ignorable |= (1ULL << 0x18);
654 ignorable |= (1ULL << 0x19);
655
656 /* ACK. */
657 ignorable |= (1ULL << 0x2a);
658 ignorable |= (1ULL << 0x2b);
659 ignorable |= (1ULL << 0x2c);
660 ignorable |= (1ULL << 0x2d);
661
662 /* WinSize. */
663 ignorable |= (1ULL << 0x30);
664 ignorable |= (1ULL << 0x31);
665
666 /* Checksum. */
667 ignorable |= (1ULL << 0x32);
668 ignorable |= (1ULL << 0x33);
669
670 for (i = 0; i < len; i++, ignorable >>= 1) {
671
672 if ((ignorable & 1) || (s1[i] == s2[i]))
673 continue;
674
675#ifdef TILE_NET_DEBUG
676 /* HACK: Mention non-timestamp diffs. */
677 if (i < 0x38 && i != 0x2f &&
678 net_ratelimit())
679 pr_info("Diff at 0x%x\n", i);
680#endif
681
682 return false;
683 }
684
685#ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS
686 /* HACK: Do not suppress truly duplicate ACKs. */
687 /* ISSUE: Is this actually necessary or helpful? */
688 if (s1[0x2a] == s2[0x2a] &&
689 s1[0x2b] == s2[0x2b] &&
690 s1[0x2c] == s2[0x2c] &&
691 s1[0x2d] == s2[0x2d]) {
692 return false;
693 }
694#endif
695
696 return true;
697}
698
699#endif
700
701
702
703static void tile_net_discard_aux(struct tile_net_cpu *info, int index) 625static void tile_net_discard_aux(struct tile_net_cpu *info, int index)
704{ 626{
705 struct tile_netio_queue *queue = &info->queue; 627 struct tile_netio_queue *queue = &info->queue;
@@ -774,6 +696,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
774 netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); 696 netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
775 697
776 netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt); 698 netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
699 netio_pkt_status_t pkt_status = NETIO_PKT_STATUS_M(metadata, pkt);
777 700
778 /* Extract the packet size. FIXME: Shouldn't the second line */ 701 /* Extract the packet size. FIXME: Shouldn't the second line */
779 /* get subtracted? Mostly moot, since it should be "zero". */ 702 /* get subtracted? Mostly moot, since it should be "zero". */
@@ -806,40 +729,25 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
806#endif /* TILE_NET_DUMP_PACKETS */ 729#endif /* TILE_NET_DUMP_PACKETS */
807 730
808#ifdef TILE_NET_VERIFY_INGRESS 731#ifdef TILE_NET_VERIFY_INGRESS
809 if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) && 732 if (pkt_status == NETIO_PKT_STATUS_OVERSIZE && len >= 64) {
810 NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) {
811 /* Bug 6624: Includes UDP packets with a "zero" checksum. */
812 pr_warning("Bad L4 checksum on %d byte packet.\n", len);
813 }
814 if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) &&
815 NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) {
816 dump_packet(buf, len, "rx"); 733 dump_packet(buf, len, "rx");
817 panic("Bad L3 checksum."); 734 panic("Unexpected OVERSIZE.");
818 }
819 switch (NETIO_PKT_STATUS_M(metadata, pkt)) {
820 case NETIO_PKT_STATUS_OVERSIZE:
821 if (len >= 64) {
822 dump_packet(buf, len, "rx");
823 panic("Unexpected OVERSIZE.");
824 }
825 break;
826 case NETIO_PKT_STATUS_BAD:
827 pr_warning("Unexpected BAD %ld byte packet.\n", len);
828 } 735 }
829#endif 736#endif
830 737
831 filter = 0; 738 filter = 0;
832 739
833 /* ISSUE: Filter TCP packets with "bad" checksums? */ 740 if (pkt_status == NETIO_PKT_STATUS_BAD) {
834 741 /* Handle CRC error and hardware truncation. */
835 if (!(dev->flags & IFF_UP)) { 742 filter = 2;
743 } else if (!(dev->flags & IFF_UP)) {
836 /* Filter packets received before we're up. */ 744 /* Filter packets received before we're up. */
837 filter = 1; 745 filter = 1;
838 } else if (NETIO_PKT_STATUS_M(metadata, pkt) == NETIO_PKT_STATUS_BAD) { 746 } else if (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(metadata, pkt) &&
747 pkt_status == NETIO_PKT_STATUS_UNDERSIZE) {
839 /* Filter "truncated" packets. */ 748 /* Filter "truncated" packets. */
840 filter = 1; 749 filter = 2;
841 } else if (!(dev->flags & IFF_PROMISC)) { 750 } else if (!(dev->flags & IFF_PROMISC)) {
842 /* FIXME: Implement HW multicast filter. */
843 if (!is_multicast_ether_addr(buf)) { 751 if (!is_multicast_ether_addr(buf)) {
844 /* Filter packets not for our address. */ 752 /* Filter packets not for our address. */
845 const u8 *mine = dev->dev_addr; 753 const u8 *mine = dev->dev_addr;
@@ -847,9 +755,14 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
847 } 755 }
848 } 756 }
849 757
850 if (filter) { 758 u64_stats_update_begin(&stats->syncp);
851 759
852 /* ISSUE: Update "drop" statistics? */ 760 if (filter != 0) {
761
762 if (filter == 1)
763 stats->rx_dropped++;
764 else
765 stats->rx_errors++;
853 766
854 tile_net_provide_linux_buffer(info, va, small); 767 tile_net_provide_linux_buffer(info, va, small);
855 768
@@ -881,6 +794,8 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
881 stats->rx_bytes += len; 794 stats->rx_bytes += len;
882 } 795 }
883 796
797 u64_stats_update_end(&stats->syncp);
798
884 /* ISSUE: It would be nice to defer this until the packet has */ 799 /* ISSUE: It would be nice to defer this until the packet has */
885 /* actually been processed. */ 800 /* actually been processed. */
886 tile_net_return_credit(info); 801 tile_net_return_credit(info);
@@ -1907,8 +1822,10 @@ busy:
1907 kfree_skb(olds[i]); 1822 kfree_skb(olds[i]);
1908 1823
1909 /* Update stats. */ 1824 /* Update stats. */
1825 u64_stats_update_begin(&stats->syncp);
1910 stats->tx_packets += num_segs; 1826 stats->tx_packets += num_segs;
1911 stats->tx_bytes += (num_segs * sh_len) + d_len; 1827 stats->tx_bytes += (num_segs * sh_len) + d_len;
1828 u64_stats_update_end(&stats->syncp);
1912 1829
1913 /* Make sure the egress timer is scheduled. */ 1830 /* Make sure the egress timer is scheduled. */
1914 tile_net_schedule_egress_timer(info); 1831 tile_net_schedule_egress_timer(info);
@@ -1936,7 +1853,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1936 1853
1937 unsigned int csum_start = skb_checksum_start_offset(skb); 1854 unsigned int csum_start = skb_checksum_start_offset(skb);
1938 1855
1939 lepp_frag_t frags[LEPP_MAX_FRAGS]; 1856 lepp_frag_t frags[1 + MAX_SKB_FRAGS];
1940 1857
1941 unsigned int num_frags; 1858 unsigned int num_frags;
1942 1859
@@ -1951,7 +1868,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1951 unsigned int cmd_head, cmd_tail, cmd_next; 1868 unsigned int cmd_head, cmd_tail, cmd_next;
1952 unsigned int comp_tail; 1869 unsigned int comp_tail;
1953 1870
1954 lepp_cmd_t cmds[LEPP_MAX_FRAGS]; 1871 lepp_cmd_t cmds[1 + MAX_SKB_FRAGS];
1955 1872
1956 1873
1957 /* 1874 /*
@@ -2089,8 +2006,10 @@ busy:
2089 kfree_skb(olds[i]); 2006 kfree_skb(olds[i]);
2090 2007
2091 /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ 2008 /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
2009 u64_stats_update_begin(&stats->syncp);
2092 stats->tx_packets++; 2010 stats->tx_packets++;
2093 stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN); 2011 stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN);
2012 u64_stats_update_end(&stats->syncp);
2094 2013
2095 /* Make sure the egress timer is scheduled. */ 2014 /* Make sure the egress timer is scheduled. */
2096 tile_net_schedule_egress_timer(info); 2015 tile_net_schedule_egress_timer(info);
@@ -2127,30 +2046,51 @@ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2127 * 2046 *
2128 * Returns the address of the device statistics structure. 2047 * Returns the address of the device statistics structure.
2129 */ 2048 */
2130static struct net_device_stats *tile_net_get_stats(struct net_device *dev) 2049static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
2050 struct rtnl_link_stats64 *stats)
2131{ 2051{
2132 struct tile_net_priv *priv = netdev_priv(dev); 2052 struct tile_net_priv *priv = netdev_priv(dev);
2133 u32 rx_packets = 0; 2053 u64 rx_packets = 0, tx_packets = 0;
2134 u32 tx_packets = 0; 2054 u64 rx_bytes = 0, tx_bytes = 0;
2135 u32 rx_bytes = 0; 2055 u64 rx_errors = 0, rx_dropped = 0;
2136 u32 tx_bytes = 0;
2137 int i; 2056 int i;
2138 2057
2139 for_each_online_cpu(i) { 2058 for_each_online_cpu(i) {
2140 if (priv->cpu[i]) { 2059 struct tile_net_stats_t *cpu_stats;
2141 rx_packets += priv->cpu[i]->stats.rx_packets; 2060 u64 trx_packets, ttx_packets, trx_bytes, ttx_bytes;
2142 rx_bytes += priv->cpu[i]->stats.rx_bytes; 2061 u64 trx_errors, trx_dropped;
2143 tx_packets += priv->cpu[i]->stats.tx_packets; 2062 unsigned int start;
2144 tx_bytes += priv->cpu[i]->stats.tx_bytes; 2063
2145 } 2064 if (priv->cpu[i] == NULL)
2065 continue;
2066 cpu_stats = &priv->cpu[i]->stats;
2067
2068 do {
2069 start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
2070 trx_packets = cpu_stats->rx_packets;
2071 ttx_packets = cpu_stats->tx_packets;
2072 trx_bytes = cpu_stats->rx_bytes;
2073 ttx_bytes = cpu_stats->tx_bytes;
2074 trx_errors = cpu_stats->rx_errors;
2075 trx_dropped = cpu_stats->rx_dropped;
2076 } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
2077
2078 rx_packets += trx_packets;
2079 tx_packets += ttx_packets;
2080 rx_bytes += trx_bytes;
2081 tx_bytes += ttx_bytes;
2082 rx_errors += trx_errors;
2083 rx_dropped += trx_dropped;
2146 } 2084 }
2147 2085
2148 priv->stats.rx_packets = rx_packets; 2086 stats->rx_packets = rx_packets;
2149 priv->stats.rx_bytes = rx_bytes; 2087 stats->tx_packets = tx_packets;
2150 priv->stats.tx_packets = tx_packets; 2088 stats->rx_bytes = rx_bytes;
2151 priv->stats.tx_bytes = tx_bytes; 2089 stats->tx_bytes = tx_bytes;
2090 stats->rx_errors = rx_errors;
2091 stats->rx_dropped = rx_dropped;
2152 2092
2153 return &priv->stats; 2093 return stats;
2154} 2094}
2155 2095
2156 2096
@@ -2287,7 +2227,7 @@ static const struct net_device_ops tile_net_ops = {
2287 .ndo_stop = tile_net_stop, 2227 .ndo_stop = tile_net_stop,
2288 .ndo_start_xmit = tile_net_tx, 2228 .ndo_start_xmit = tile_net_tx,
2289 .ndo_do_ioctl = tile_net_ioctl, 2229 .ndo_do_ioctl = tile_net_ioctl,
2290 .ndo_get_stats = tile_net_get_stats, 2230 .ndo_get_stats64 = tile_net_get_stats64,
2291 .ndo_change_mtu = tile_net_change_mtu, 2231 .ndo_change_mtu = tile_net_change_mtu,
2292 .ndo_tx_timeout = tile_net_tx_timeout, 2232 .ndo_tx_timeout = tile_net_tx_timeout,
2293 .ndo_set_mac_address = tile_net_set_mac_address, 2233 .ndo_set_mac_address = tile_net_set_mac_address,
@@ -2305,39 +2245,30 @@ static const struct net_device_ops tile_net_ops = {
2305 */ 2245 */
2306static void tile_net_setup(struct net_device *dev) 2246static void tile_net_setup(struct net_device *dev)
2307{ 2247{
2308 PDEBUG("tile_net_setup()\n"); 2248 netdev_features_t features = 0;
2309 2249
2310 ether_setup(dev); 2250 ether_setup(dev);
2311
2312 dev->netdev_ops = &tile_net_ops; 2251 dev->netdev_ops = &tile_net_ops;
2313
2314 dev->watchdog_timeo = TILE_NET_TIMEOUT; 2252 dev->watchdog_timeo = TILE_NET_TIMEOUT;
2253 dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
2254 dev->mtu = TILE_NET_MTU;
2315 2255
2316 /* We want lockless xmit. */ 2256 features |= NETIF_F_HW_CSUM;
2317 dev->features |= NETIF_F_LLTX; 2257 features |= NETIF_F_SG;
2318
2319 /* We support hardware tx checksums. */
2320 dev->features |= NETIF_F_HW_CSUM;
2321
2322 /* We support scatter/gather. */
2323 dev->features |= NETIF_F_SG;
2324
2325 /* We support TSO. */
2326 dev->features |= NETIF_F_TSO;
2327 2258
2328#ifdef TILE_NET_GSO 2259 /* We support TSO iff the HV supports sufficient frags. */
2329 /* We support GSO. */ 2260 if (LEPP_MAX_FRAGS >= 1 + MAX_SKB_FRAGS)
2330 dev->features |= NETIF_F_GSO; 2261 features |= NETIF_F_TSO;
2331#endif
2332 2262
2263 /* We can't support HIGHDMA without hash_default, since we need
2264 * to be able to finv() with a VA if we don't have hash_default.
2265 */
2333 if (hash_default) 2266 if (hash_default)
2334 dev->features |= NETIF_F_HIGHDMA; 2267 features |= NETIF_F_HIGHDMA;
2335
2336 /* ISSUE: We should support NETIF_F_UFO. */
2337 2268
2338 dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; 2269 dev->hw_features |= features;
2339 2270 dev->vlan_features |= features;
2340 dev->mtu = TILE_NET_MTU; 2271 dev->features |= features;
2341} 2272}
2342 2273
2343 2274
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 01bdc6ca0755..c4dbf981804b 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1308,13 +1308,13 @@ static int tsi108_open(struct net_device *dev)
1308 data->id, dev->irq, dev->name); 1308 data->id, dev->irq, dev->name);
1309 } 1309 }
1310 1310
1311 data->rxring = dma_alloc_coherent(NULL, rxring_size, &data->rxdma, 1311 data->rxring = dma_zalloc_coherent(NULL, rxring_size, &data->rxdma,
1312 GFP_KERNEL | __GFP_ZERO); 1312 GFP_KERNEL);
1313 if (!data->rxring) 1313 if (!data->rxring)
1314 return -ENOMEM; 1314 return -ENOMEM;
1315 1315
1316 data->txring = dma_alloc_coherent(NULL, txring_size, &data->txdma, 1316 data->txring = dma_zalloc_coherent(NULL, txring_size, &data->txdma,
1317 GFP_KERNEL | __GFP_ZERO); 1317 GFP_KERNEL);
1318 if (!data->txring) { 1318 if (!data->txring) {
1319 pci_free_consistent(0, rxring_size, data->rxring, data->rxdma); 1319 pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
1320 return -ENOMEM; 1320 return -ENOMEM;
@@ -1558,7 +1558,7 @@ tsi108_init_one(struct platform_device *pdev)
1558 hw_info *einfo; 1558 hw_info *einfo;
1559 int err = 0; 1559 int err = 0;
1560 1560
1561 einfo = pdev->dev.platform_data; 1561 einfo = dev_get_platdata(&pdev->dev);
1562 1562
1563 if (NULL == einfo) { 1563 if (NULL == einfo) {
1564 printk(KERN_ERR "tsi-eth %d: Missing additional data!\n", 1564 printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index b75eb9e0e867..c8f088ab5fdf 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2407,7 +2407,7 @@ static struct pci_driver rhine_driver = {
2407 .driver.pm = RHINE_PM_OPS, 2407 .driver.pm = RHINE_PM_OPS,
2408}; 2408};
2409 2409
2410static struct dmi_system_id __initdata rhine_dmi_table[] = { 2410static struct dmi_system_id rhine_dmi_table[] __initdata = {
2411 { 2411 {
2412 .ident = "EPIA-M", 2412 .ident = "EPIA-M",
2413 .matches = { 2413 .matches = {
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index d01cacf8a7c2..d022bf936572 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2376,6 +2376,23 @@ out_0:
2376 return ret; 2376 return ret;
2377} 2377}
2378 2378
2379#ifdef CONFIG_NET_POLL_CONTROLLER
2380/**
2381 * velocity_poll_controller - Velocity Poll controller function
2382 * @dev: network device
2383 *
2384 *
2385 * Used by NETCONSOLE and other diagnostic tools to allow network I/P
2386 * with interrupts disabled.
2387 */
2388static void velocity_poll_controller(struct net_device *dev)
2389{
2390 disable_irq(dev->irq);
2391 velocity_intr(dev->irq, dev);
2392 enable_irq(dev->irq);
2393}
2394#endif
2395
2379/** 2396/**
2380 * velocity_mii_ioctl - MII ioctl handler 2397 * velocity_mii_ioctl - MII ioctl handler
2381 * @dev: network device 2398 * @dev: network device
@@ -2641,6 +2658,9 @@ static const struct net_device_ops velocity_netdev_ops = {
2641 .ndo_do_ioctl = velocity_ioctl, 2658 .ndo_do_ioctl = velocity_ioctl,
2642 .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid, 2659 .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
2643 .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid, 2660 .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
2661#ifdef CONFIG_NET_POLL_CONTROLLER
2662 .ndo_poll_controller = velocity_poll_controller,
2663#endif
2644}; 2664};
2645 2665
2646/** 2666/**
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 30fed08d1674..0df36c6ec7f4 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -622,7 +622,7 @@ static const struct net_device_ops w5100_netdev_ops = {
622 622
623static int w5100_hw_probe(struct platform_device *pdev) 623static int w5100_hw_probe(struct platform_device *pdev)
624{ 624{
625 struct wiznet_platform_data *data = pdev->dev.platform_data; 625 struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
626 struct net_device *ndev = platform_get_drvdata(pdev); 626 struct net_device *ndev = platform_get_drvdata(pdev);
627 struct w5100_priv *priv = netdev_priv(ndev); 627 struct w5100_priv *priv = netdev_priv(ndev);
628 const char *name = netdev_name(ndev); 628 const char *name = netdev_name(ndev);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index e92884564e1e..71c27b3292f1 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -542,7 +542,7 @@ static const struct net_device_ops w5300_netdev_ops = {
542 542
543static int w5300_hw_probe(struct platform_device *pdev) 543static int w5300_hw_probe(struct platform_device *pdev)
544{ 544{
545 struct wiznet_platform_data *data = pdev->dev.platform_data; 545 struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
546 struct net_device *ndev = platform_get_drvdata(pdev); 546 struct net_device *ndev = platform_get_drvdata(pdev);
547 struct w5300_priv *priv = netdev_priv(ndev); 547 struct w5300_priv *priv = netdev_priv(ndev);
548 const char *name = netdev_name(ndev); 548 const char *name = netdev_name(ndev);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 58eb4488beff..b88121f240ca 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -243,15 +243,15 @@ static int temac_dma_bd_init(struct net_device *ndev)
243 243
244 /* allocate the tx and rx ring buffer descriptors. */ 244 /* allocate the tx and rx ring buffer descriptors. */
245 /* returns a virtual address and a physical address. */ 245 /* returns a virtual address and a physical address. */
246 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 246 lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
247 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 247 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
248 &lp->tx_bd_p, GFP_KERNEL | __GFP_ZERO); 248 &lp->tx_bd_p, GFP_KERNEL);
249 if (!lp->tx_bd_v) 249 if (!lp->tx_bd_v)
250 goto out; 250 goto out;
251 251
252 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 252 lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
253 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 253 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
254 &lp->rx_bd_p, GFP_KERNEL | __GFP_ZERO); 254 &lp->rx_bd_p, GFP_KERNEL);
255 if (!lp->rx_bd_v) 255 if (!lp->rx_bd_v)
256 goto out; 256 goto out;
257 257
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index fb7d1c28a2ea..b2ff038d6d20 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -201,17 +201,15 @@ static int axienet_dma_bd_init(struct net_device *ndev)
201 /* 201 /*
202 * Allocate the Tx and Rx buffer descriptors. 202 * Allocate the Tx and Rx buffer descriptors.
203 */ 203 */
204 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 204 lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
205 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 205 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
206 &lp->tx_bd_p, 206 &lp->tx_bd_p, GFP_KERNEL);
207 GFP_KERNEL | __GFP_ZERO);
208 if (!lp->tx_bd_v) 207 if (!lp->tx_bd_v)
209 goto out; 208 goto out;
210 209
211 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 210 lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
212 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 211 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
213 &lp->rx_bd_p, 212 &lp->rx_bd_p, GFP_KERNEL);
214 GFP_KERNEL | __GFP_ZERO);
215 if (!lp->rx_bd_v) 213 if (!lp->rx_bd_v)
216 goto out; 214 goto out;
217 215
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index fd4dbdae5331..4c619ea5189f 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1230,8 +1230,7 @@ error:
1230 */ 1230 */
1231static int xemaclite_of_remove(struct platform_device *of_dev) 1231static int xemaclite_of_remove(struct platform_device *of_dev)
1232{ 1232{
1233 struct device *dev = &of_dev->dev; 1233 struct net_device *ndev = platform_get_drvdata(of_dev);
1234 struct net_device *ndev = dev_get_drvdata(dev);
1235 1234
1236 struct net_local *lp = netdev_priv(ndev); 1235 struct net_local *lp = netdev_priv(ndev);
1237 1236
@@ -1250,7 +1249,6 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
1250 lp->phy_node = NULL; 1249 lp->phy_node = NULL;
1251 1250
1252 xemaclite_remove_ndev(ndev, of_dev); 1251 xemaclite_remove_ndev(ndev, of_dev);
1253 dev_set_drvdata(dev, NULL);
1254 1252
1255 return 0; 1253 return 0;
1256} 1254}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 3d689fcb7917..e78802e75ea6 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1384,7 +1384,7 @@ static int eth_init_one(struct platform_device *pdev)
1384{ 1384{
1385 struct port *port; 1385 struct port *port;
1386 struct net_device *dev; 1386 struct net_device *dev;
1387 struct eth_plat_info *plat = pdev->dev.platform_data; 1387 struct eth_plat_info *plat = dev_get_platdata(&pdev->dev);
1388 u32 regs_phys; 1388 u32 regs_phys;
1389 char phy_id[MII_BUS_ID_SIZE + 3]; 1389 char phy_id[MII_BUS_ID_SIZE + 3];
1390 int err; 1390 int err;