aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/magic-number.txt2
-rw-r--r--Documentation/networking/LICENSE.qlge328
-rw-r--r--Documentation/networking/phy.txt3
-rw-r--r--Documentation/zh_CN/magic-number.txt2
-rw-r--r--MAINTAINERS9
-rw-r--r--drivers/atm/lanai.c2
-rw-r--r--drivers/infiniband/core/addr.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c10
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c11
-rw-r--r--drivers/isdn/hardware/eicon/capi20.h60
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c16
-rw-r--r--drivers/net/bonding/bond_alb.c12
-rw-r--r--drivers/net/caif/caif_hsi.c145
-rw-r--r--drivers/net/can/slcan.c6
-rw-r--r--drivers/net/ethernet/3com/3c59x.c5
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c2
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c2
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c5
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c4
-rw-r--r--drivers/net/ethernet/alteon/acenic.c5
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c2
-rw-r--r--drivers/net/ethernet/amd/am79c961a.h2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c1
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1
-rw-r--r--drivers/net/ethernet/amd/declance.c2
-rw-r--r--drivers/net/ethernet/amd/ni65.c4
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c2
-rw-r--r--drivers/net/ethernet/apple/bmac.c8
-rw-r--r--drivers/net/ethernet/apple/mace.c5
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c51
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c36
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h55
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c323
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h6
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c21
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c42
-rw-r--r--drivers/net/ethernet/cadence/macb.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c58
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/21142.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/media.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic2.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/timer.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c5
-rw-r--r--drivers/net/ethernet/dnet.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c73
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h46
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c64
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c46
-rw-r--r--drivers/net/ethernet/ethoc.c3
-rw-r--r--drivers/net/ethernet/freescale/fec.c17
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.h2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_sysfs.c2
-rw-r--r--drivers/net/ethernet/hp/hp100.c1
-rw-r--r--drivers/net/ethernet/i825xx/lp486e.c2
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c25
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_hw.h2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c3
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.h2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c8
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c7
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c7
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c7
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.h2
-rw-r--r--drivers/net/ethernet/ibm/iseries_veth.c4
-rw-r--r--drivers/net/ethernet/icplus/ipg.c1
-rw-r--r--drivers/net/ethernet/intel/e100.c5
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile5
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h10
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h33
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c189
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h11
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c163
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c (renamed from drivers/net/ethernet/intel/e1000e/lib.c)1048
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c377
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c495
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c647
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c50
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c6
-rw-r--r--drivers/net/ethernet/jme.c1
-rw-r--r--drivers/net/ethernet/korina.c6
-rw-r--r--drivers/net/ethernet/lantiq_etop.c4
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c10
-rw-r--r--drivers/net/ethernet/marvell/skge.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c6
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c3
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c1
-rw-r--r--drivers/net/ethernet/netx-eth.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c11
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c5
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h437
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c296
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c109
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c629
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c15
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c132
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c17
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c86
-rw-r--r--drivers/net/ethernet/rdc/r6040.c1
-rw-r--r--drivers/net/ethernet/realtek/8139too.c5
-rw-r--r--drivers/net/ethernet/realtek/Kconfig10
-rw-r--r--drivers/net/ethernet/realtek/r8169.c569
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c1
-rw-r--r--drivers/net/ethernet/s6gmac.c6
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c1
-rw-r--r--drivers/net/ethernet/sfc/Kconfig13
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h18
-rw-r--r--drivers/net/ethernet/sfc/efx.c198
-rw-r--r--drivers/net/ethernet/sfc/efx.h10
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c138
-rw-r--r--drivers/net/ethernet/sfc/falcon.c42
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c12
-rw-r--r--drivers/net/ethernet/sfc/falcon_xmac.c15
-rw-r--r--drivers/net/ethernet/sfc/mac.h21
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c115
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h34
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mac.c61
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c415
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h3542
-rw-r--r--drivers/net/ethernet/sfc/mcdi_phy.c36
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.c2
-rw-r--r--drivers/net/ethernet/sfc/mtd.c13
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h189
-rw-r--r--drivers/net/ethernet/sfc/nic.c69
-rw-r--r--drivers/net/ethernet/sfc/nic.h20
-rw-r--r--drivers/net/ethernet/sfc/qt202x_phy.c6
-rw-r--r--drivers/net/ethernet/sfc/rx.c119
-rw-r--r--drivers/net/ethernet/sfc/selftest.c110
-rw-r--r--drivers/net/ethernet/sfc/selftest.h1
-rw-r--r--drivers/net/ethernet/sfc/siena.c33
-rw-r--r--drivers/net/ethernet/sfc/spi.h2
-rw-r--r--drivers/net/ethernet/sfc/tenxpress.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c4
-rw-r--r--drivers/net/ethernet/sfc/txc43128_phy.c2
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c1
-rw-r--r--drivers/net/ethernet/smsc/epic100.c5
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c1
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c1
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c1
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/sun/sungem.c1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c13
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c1
-rw-r--r--drivers/net/ethernet/ti/cpmac.c5
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c1
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c5
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c5
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c4
-rw-r--r--drivers/net/ethernet/via/via-rhine.c1
-rw-r--r--drivers/net/ethernet/via/via-velocity.c4
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig8
-rw-r--r--drivers/net/ethernet/xilinx/Makefile2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c5
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h508
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c1680
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c238
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c4
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hamradio/baycom_par.c2
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/hippi/rrunner.c8
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/netconsole.c8
-rw-r--r--drivers/net/plip/plip.c4
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/slip/slip.c4
-rw-r--r--drivers/net/tokenring/3c359.c4
-rw-r--r--drivers/net/tokenring/madgemc.c1
-rw-r--r--drivers/net/tokenring/tms380tr.c179
-rw-r--r--drivers/net/usb/Kconfig22
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c228
-rw-r--r--drivers/net/usb/rtl8150.c4
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c44
-rw-r--r--drivers/net/wan/c101.c4
-rw-r--r--drivers/net/wan/dscc4.c8
-rw-r--r--drivers/net/wan/lmc/lmc_main.c1
-rw-r--r--drivers/net/wan/n2.c4
-rw-r--r--drivers/net/wan/pc300too.c1
-rw-r--r--drivers/net/wan/pci200syn.c1
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wan/x25_asy.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c5
-rw-r--r--drivers/net/wireless/atmel.c5
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c18
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c5
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c5
-rw-r--r--drivers/net/wireless/libertas/if_cs.c5
-rw-r--r--drivers/net/wireless/libertas/if_usb.c4
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c4
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c4
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c4
-rw-r--r--drivers/net/wireless/orinoco/main.c6
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c5
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/net/xen-netfront.c62
-rw-r--r--drivers/s390/net/qeth_l3_main.c35
-rw-r--r--include/linux/netlink.h18
-rw-r--r--include/linux/snmp.h1
-rw-r--r--include/linux/tcp.h5
-rw-r--r--include/net/addrconf.h1
-rw-r--r--include/net/caif/caif_hsi.h1
-rw-r--r--include/net/genetlink.h31
-rw-r--r--include/net/ndisc.h1
-rw-r--r--include/net/netlink.h35
-rw-r--r--include/net/tcp.h66
-rw-r--r--net/atm/clip.c17
-rw-r--r--net/caif/caif_socket.c113
-rw-r--r--net/caif/chnl_net.c12
-rw-r--r--net/core/dev.c94
-rw-r--r--net/core/neighbour.c90
-rw-r--r--net/core/netpoll.c71
-rw-r--r--net/decnet/dn_neigh.c24
-rw-r--r--net/ipv4/ip_gre.c19
-rw-r--r--net/ipv4/ipip.c3
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/route.c5
-rw-r--r--net/ipv4/tcp_ipv4.c296
-rw-r--r--net/ipv4/tcp_minisocks.c12
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv6/anycast.c29
-rw-r--r--net/ipv6/ip6_fib.c19
-rw-r--r--net/ipv6/ip6_output.c10
-rw-r--r--net/ipv6/ndisc.c30
-rw-r--r--net/ipv6/reassembly.c7
-rw-r--r--net/ipv6/route.c15
-rw-r--r--net/ipv6/sit.c20
-rw-r--r--net/ipv6/tcp_ipv6.c227
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/netlink/af_netlink.c18
-rw-r--r--net/netlink/genetlink.c31
322 files changed, 11241 insertions, 5873 deletions
diff --git a/Documentation/magic-number.txt b/Documentation/magic-number.txt
index abf481f780ec..82761a31d64d 100644
--- a/Documentation/magic-number.txt
+++ b/Documentation/magic-number.txt
@@ -89,7 +89,7 @@ TTY_DRIVER_MAGIC 0x5402 tty_driver include/linux/tty_driver.h
89MGSLPC_MAGIC 0x5402 mgslpc_info drivers/char/pcmcia/synclink_cs.c 89MGSLPC_MAGIC 0x5402 mgslpc_info drivers/char/pcmcia/synclink_cs.c
90TTY_LDISC_MAGIC 0x5403 tty_ldisc include/linux/tty_ldisc.h 90TTY_LDISC_MAGIC 0x5403 tty_ldisc include/linux/tty_ldisc.h
91USB_SERIAL_MAGIC 0x6702 usb_serial drivers/usb/serial/usb-serial.h 91USB_SERIAL_MAGIC 0x6702 usb_serial drivers/usb/serial/usb-serial.h
92FULL_DUPLEX_MAGIC 0x6969 drivers/net/tulip/de2104x.c 92FULL_DUPLEX_MAGIC 0x6969 drivers/net/ethernet/dec/tulip/de2104x.c
93USB_BLUETOOTH_MAGIC 0x6d02 usb_bluetooth drivers/usb/class/bluetty.c 93USB_BLUETOOTH_MAGIC 0x6d02 usb_bluetooth drivers/usb/class/bluetty.c
94RFCOMM_TTY_MAGIC 0x6d02 net/bluetooth/rfcomm/tty.c 94RFCOMM_TTY_MAGIC 0x6d02 net/bluetooth/rfcomm/tty.c
95USB_SERIAL_PORT_MAGIC 0x7301 usb_serial_port drivers/usb/serial/usb-serial.h 95USB_SERIAL_PORT_MAGIC 0x7301 usb_serial_port drivers/usb/serial/usb-serial.h
diff --git a/Documentation/networking/LICENSE.qlge b/Documentation/networking/LICENSE.qlge
index 123b6edd7f18..ce64e4d15b21 100644
--- a/Documentation/networking/LICENSE.qlge
+++ b/Documentation/networking/LICENSE.qlge
@@ -1,46 +1,288 @@
1Copyright (c) 2003-2008 QLogic Corporation 1Copyright (c) 2003-2011 QLogic Corporation
2QLogic Linux Networking HBA Driver 2QLogic Linux qlge NIC Driver
3 3
4This program includes a device driver for Linux 2.6 that may be
5distributed with QLogic hardware specific firmware binary file.
6You may modify and redistribute the device driver code under the 4You may modify and redistribute the device driver code under the
7GNU General Public License as published by the Free Software 5GNU General Public License (a copy of which is attached hereto as
8Foundation (version 2 or a later version). 6Exhibit A) published by the Free Software Foundation (version 2).
9
10You may redistribute the hardware specific firmware binary file
11under the following terms:
12
13 1. Redistribution of source code (only if applicable),
14 must retain the above copyright notice, this list of
15 conditions and the following disclaimer.
16
17 2. Redistribution in binary form must reproduce the above
18 copyright notice, this list of conditions and the
19 following disclaimer in the documentation and/or other
20 materials provided with the distribution.
21
22 3. The name of QLogic Corporation may not be used to
23 endorse or promote products derived from this software
24 without specific prior written permission
25
26REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
27THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
28EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
30PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
31BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
33TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
35ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
36OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38POSSIBILITY OF SUCH DAMAGE.
39
40USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
41CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
42OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
43TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
44ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
45COMBINATION WITH THIS PROGRAM.
46 7
8
9EXHIBIT A
10
11 GNU GENERAL PUBLIC LICENSE
12 Version 2, June 1991
13
14 Copyright (C) 1989, 1991 Free Software Foundation, Inc.
15 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 Everyone is permitted to copy and distribute verbatim copies
17 of this license document, but changing it is not allowed.
18
19 Preamble
20
21 The licenses for most software are designed to take away your
22freedom to share and change it. By contrast, the GNU General Public
23License is intended to guarantee your freedom to share and change free
24software--to make sure the software is free for all its users. This
25General Public License applies to most of the Free Software
26Foundation's software and to any other program whose authors commit to
27using it. (Some other Free Software Foundation software is covered by
28the GNU Lesser General Public License instead.) You can apply it to
29your programs, too.
30
31 When we speak of free software, we are referring to freedom, not
32price. Our General Public Licenses are designed to make sure that you
33have the freedom to distribute copies of free software (and charge for
34this service if you wish), that you receive source code or can get it
35if you want it, that you can change the software or use pieces of it
36in new free programs; and that you know you can do these things.
37
38 To protect your rights, we need to make restrictions that forbid
39anyone to deny you these rights or to ask you to surrender the rights.
40These restrictions translate to certain responsibilities for you if you
41distribute copies of the software, or if you modify it.
42
43 For example, if you distribute copies of such a program, whether
44gratis or for a fee, you must give the recipients all the rights that
45you have. You must make sure that they, too, receive or can get the
46source code. And you must show them these terms so they know their
47rights.
48
49 We protect your rights with two steps: (1) copyright the software, and
50(2) offer you this license which gives you legal permission to copy,
51distribute and/or modify the software.
52
53 Also, for each author's protection and ours, we want to make certain
54that everyone understands that there is no warranty for this free
55software. If the software is modified by someone else and passed on, we
56want its recipients to know that what they have is not the original, so
57that any problems introduced by others will not reflect on the original
58authors' reputations.
59
60 Finally, any free program is threatened constantly by software
61patents. We wish to avoid the danger that redistributors of a free
62program will individually obtain patent licenses, in effect making the
63program proprietary. To prevent this, we have made it clear that any
64patent must be licensed for everyone's free use or not licensed at all.
65
66 The precise terms and conditions for copying, distribution and
67modification follow.
68
69 GNU GENERAL PUBLIC LICENSE
70 TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
71
72 0. This License applies to any program or other work which contains
73a notice placed by the copyright holder saying it may be distributed
74under the terms of this General Public License. The "Program", below,
75refers to any such program or work, and a "work based on the Program"
76means either the Program or any derivative work under copyright law:
77that is to say, a work containing the Program or a portion of it,
78either verbatim or with modifications and/or translated into another
79language. (Hereinafter, translation is included without limitation in
80the term "modification".) Each licensee is addressed as "you".
81
82Activities other than copying, distribution and modification are not
83covered by this License; they are outside its scope. The act of
84running the Program is not restricted, and the output from the Program
85is covered only if its contents constitute a work based on the
86Program (independent of having been made by running the Program).
87Whether that is true depends on what the Program does.
88
89 1. You may copy and distribute verbatim copies of the Program's
90source code as you receive it, in any medium, provided that you
91conspicuously and appropriately publish on each copy an appropriate
92copyright notice and disclaimer of warranty; keep intact all the
93notices that refer to this License and to the absence of any warranty;
94and give any other recipients of the Program a copy of this License
95along with the Program.
96
97You may charge a fee for the physical act of transferring a copy, and
98you may at your option offer warranty protection in exchange for a fee.
99
100 2. You may modify your copy or copies of the Program or any portion
101of it, thus forming a work based on the Program, and copy and
102distribute such modifications or work under the terms of Section 1
103above, provided that you also meet all of these conditions:
104
105 a) You must cause the modified files to carry prominent notices
106 stating that you changed the files and the date of any change.
107
108 b) You must cause any work that you distribute or publish, that in
109 whole or in part contains or is derived from the Program or any
110 part thereof, to be licensed as a whole at no charge to all third
111 parties under the terms of this License.
112
113 c) If the modified program normally reads commands interactively
114 when run, you must cause it, when started running for such
115 interactive use in the most ordinary way, to print or display an
116 announcement including an appropriate copyright notice and a
117 notice that there is no warranty (or else, saying that you provide
118 a warranty) and that users may redistribute the program under
119 these conditions, and telling the user how to view a copy of this
120 License. (Exception: if the Program itself is interactive but
121 does not normally print such an announcement, your work based on
122 the Program is not required to print an announcement.)
123
124These requirements apply to the modified work as a whole. If
125identifiable sections of that work are not derived from the Program,
126and can be reasonably considered independent and separate works in
127themselves, then this License, and its terms, do not apply to those
128sections when you distribute them as separate works. But when you
129distribute the same sections as part of a whole which is a work based
130on the Program, the distribution of the whole must be on the terms of
131this License, whose permissions for other licensees extend to the
132entire whole, and thus to each and every part regardless of who wrote it.
133
134Thus, it is not the intent of this section to claim rights or contest
135your rights to work written entirely by you; rather, the intent is to
136exercise the right to control the distribution of derivative or
137collective works based on the Program.
138
139In addition, mere aggregation of another work not based on the Program
140with the Program (or with a work based on the Program) on a volume of
141a storage or distribution medium does not bring the other work under
142the scope of this License.
143
144 3. You may copy and distribute the Program (or a work based on it,
145under Section 2) in object code or executable form under the terms of
146Sections 1 and 2 above provided that you also do one of the following:
147
148 a) Accompany it with the complete corresponding machine-readable
149 source code, which must be distributed under the terms of Sections
150 1 and 2 above on a medium customarily used for software interchange; or,
151
152 b) Accompany it with a written offer, valid for at least three
153 years, to give any third party, for a charge no more than your
154 cost of physically performing source distribution, a complete
155 machine-readable copy of the corresponding source code, to be
156 distributed under the terms of Sections 1 and 2 above on a medium
157 customarily used for software interchange; or,
158
159 c) Accompany it with the information you received as to the offer
160 to distribute corresponding source code. (This alternative is
161 allowed only for noncommercial distribution and only if you
162 received the program in object code or executable form with such
163 an offer, in accord with Subsection b above.)
164
165The source code for a work means the preferred form of the work for
166making modifications to it. For an executable work, complete source
167code means all the source code for all modules it contains, plus any
168associated interface definition files, plus the scripts used to
169control compilation and installation of the executable. However, as a
170special exception, the source code distributed need not include
171anything that is normally distributed (in either source or binary
172form) with the major components (compiler, kernel, and so on) of the
173operating system on which the executable runs, unless that component
174itself accompanies the executable.
175
176If distribution of executable or object code is made by offering
177access to copy from a designated place, then offering equivalent
178access to copy the source code from the same place counts as
179distribution of the source code, even though third parties are not
180compelled to copy the source along with the object code.
181
182 4. You may not copy, modify, sublicense, or distribute the Program
183except as expressly provided under this License. Any attempt
184otherwise to copy, modify, sublicense or distribute the Program is
185void, and will automatically terminate your rights under this License.
186However, parties who have received copies, or rights, from you under
187this License will not have their licenses terminated so long as such
188parties remain in full compliance.
189
190 5. You are not required to accept this License, since you have not
191signed it. However, nothing else grants you permission to modify or
192distribute the Program or its derivative works. These actions are
193prohibited by law if you do not accept this License. Therefore, by
194modifying or distributing the Program (or any work based on the
195Program), you indicate your acceptance of this License to do so, and
196all its terms and conditions for copying, distributing or modifying
197the Program or works based on it.
198
199 6. Each time you redistribute the Program (or any work based on the
200Program), the recipient automatically receives a license from the
201original licensor to copy, distribute or modify the Program subject to
202these terms and conditions. You may not impose any further
203restrictions on the recipients' exercise of the rights granted herein.
204You are not responsible for enforcing compliance by third parties to
205this License.
206
207 7. If, as a consequence of a court judgment or allegation of patent
208infringement or for any other reason (not limited to patent issues),
209conditions are imposed on you (whether by court order, agreement or
210otherwise) that contradict the conditions of this License, they do not
211excuse you from the conditions of this License. If you cannot
212distribute so as to satisfy simultaneously your obligations under this
213License and any other pertinent obligations, then as a consequence you
214may not distribute the Program at all. For example, if a patent
215license would not permit royalty-free redistribution of the Program by
216all those who receive copies directly or indirectly through you, then
217the only way you could satisfy both it and this License would be to
218refrain entirely from distribution of the Program.
219
220If any portion of this section is held invalid or unenforceable under
221any particular circumstance, the balance of the section is intended to
222apply and the section as a whole is intended to apply in other
223circumstances.
224
225It is not the purpose of this section to induce you to infringe any
226patents or other property right claims or to contest validity of any
227such claims; this section has the sole purpose of protecting the
228integrity of the free software distribution system, which is
229implemented by public license practices. Many people have made
230generous contributions to the wide range of software distributed
231through that system in reliance on consistent application of that
232system; it is up to the author/donor to decide if he or she is willing
233to distribute software through any other system and a licensee cannot
234impose that choice.
235
236This section is intended to make thoroughly clear what is believed to
237be a consequence of the rest of this License.
238
239 8. If the distribution and/or use of the Program is restricted in
240certain countries either by patents or by copyrighted interfaces, the
241original copyright holder who places the Program under this License
242may add an explicit geographical distribution limitation excluding
243those countries, so that distribution is permitted only in or among
244countries not thus excluded. In such case, this License incorporates
245the limitation as if written in the body of this License.
246
247 9. The Free Software Foundation may publish revised and/or new versions
248of the General Public License from time to time. Such new versions will
249be similar in spirit to the present version, but may differ in detail to
250address new problems or concerns.
251
252Each version is given a distinguishing version number. If the Program
253specifies a version number of this License which applies to it and "any
254later version", you have the option of following the terms and conditions
255either of that version or of any later version published by the Free
256Software Foundation. If the Program does not specify a version number of
257this License, you may choose any version ever published by the Free Software
258Foundation.
259
260 10. If you wish to incorporate parts of the Program into other free
261programs whose distribution conditions are different, write to the author
262to ask for permission. For software which is copyrighted by the Free
263Software Foundation, write to the Free Software Foundation; we sometimes
264make exceptions for this. Our decision will be guided by the two goals
265of preserving the free status of all derivatives of our free software and
266of promoting the sharing and reuse of software generally.
267
268 NO WARRANTY
269
270 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
271FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
272OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
273PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
274OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
275MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
276TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
277PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
278REPAIR OR CORRECTION.
279
280 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
281WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
282REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
283INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
284OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
285TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
286YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
287PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
288POSSIBILITY OF SUCH DAMAGES.
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
index 9eb1ba52013d..95e5f5985a2a 100644
--- a/Documentation/networking/phy.txt
+++ b/Documentation/networking/phy.txt
@@ -62,7 +62,8 @@ The MDIO bus
62 5) The bus must also be declared somewhere as a device, and registered. 62 5) The bus must also be declared somewhere as a device, and registered.
63 63
64 As an example for how one driver implemented an mdio bus driver, see 64 As an example for how one driver implemented an mdio bus driver, see
65 drivers/net/gianfar_mii.c and arch/ppc/syslib/mpc85xx_devices.c 65 drivers/net/ethernet/freescale/fsl_pq_mdio.c and an associated DTS file
66 for one of the users. (e.g. "git grep fsl,.*-mdio arch/powerpc/boot/dts/")
66 67
67Connecting to a PHY 68Connecting to a PHY
68 69
diff --git a/Documentation/zh_CN/magic-number.txt b/Documentation/zh_CN/magic-number.txt
index c278f412dc65..f606ba8598cf 100644
--- a/Documentation/zh_CN/magic-number.txt
+++ b/Documentation/zh_CN/magic-number.txt
@@ -89,7 +89,7 @@ TTY_DRIVER_MAGIC 0x5402 tty_driver include/linux/tty_driver.h
89MGSLPC_MAGIC 0x5402 mgslpc_info drivers/char/pcmcia/synclink_cs.c 89MGSLPC_MAGIC 0x5402 mgslpc_info drivers/char/pcmcia/synclink_cs.c
90TTY_LDISC_MAGIC 0x5403 tty_ldisc include/linux/tty_ldisc.h 90TTY_LDISC_MAGIC 0x5403 tty_ldisc include/linux/tty_ldisc.h
91USB_SERIAL_MAGIC 0x6702 usb_serial drivers/usb/serial/usb-serial.h 91USB_SERIAL_MAGIC 0x6702 usb_serial drivers/usb/serial/usb-serial.h
92FULL_DUPLEX_MAGIC 0x6969 drivers/net/tulip/de2104x.c 92FULL_DUPLEX_MAGIC 0x6969 drivers/net/ethernet/dec/tulip/de2104x.c
93USB_BLUETOOTH_MAGIC 0x6d02 usb_bluetooth drivers/usb/class/bluetty.c 93USB_BLUETOOTH_MAGIC 0x6d02 usb_bluetooth drivers/usb/class/bluetty.c
94RFCOMM_TTY_MAGIC 0x6d02 net/bluetooth/rfcomm/tty.c 94RFCOMM_TTY_MAGIC 0x6d02 net/bluetooth/rfcomm/tty.c
95USB_SERIAL_PORT_MAGIC 0x7301 usb_serial_port drivers/usb/serial/usb-serial.h 95USB_SERIAL_PORT_MAGIC 0x7301 usb_serial_port drivers/usb/serial/usb-serial.h
diff --git a/MAINTAINERS b/MAINTAINERS
index a1fce9a3ab20..b4568a211d26 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1804,7 +1804,8 @@ F: Documentation/zh_CN/
1804CISCO VIC ETHERNET NIC DRIVER 1804CISCO VIC ETHERNET NIC DRIVER
1805M: Christian Benvenuti <benve@cisco.com> 1805M: Christian Benvenuti <benve@cisco.com>
1806M: Roopa Prabhu <roprabhu@cisco.com> 1806M: Roopa Prabhu <roprabhu@cisco.com>
1807M: David Wang <dwang2@cisco.com> 1807M: Neel Patel <neepatel@cisco.com>
1808M: Nishank Trivedi <nistrive@cisco.com>
1808S: Supported 1809S: Supported
1809F: drivers/net/ethernet/cisco/enic/ 1810F: drivers/net/ethernet/cisco/enic/
1810 1811
@@ -7467,6 +7468,12 @@ S: Supported
7467F: Documentation/filesystems/xfs.txt 7468F: Documentation/filesystems/xfs.txt
7468F: fs/xfs/ 7469F: fs/xfs/
7469 7470
7471XILINX AXI ETHERNET DRIVER
7472M: Ariane Keller <ariane.keller@tik.ee.ethz.ch>
7473M: Daniel Borkmann <daniel.borkmann@tik.ee.ethz.ch>
7474S: Maintained
7475F: drivers/net/ethernet/xilinx/xilinx_axienet*
7476
7470XILINX SYSTEMACE DRIVER 7477XILINX SYSTEMACE DRIVER
7471M: Grant Likely <grant.likely@secretlab.ca> 7478M: Grant Likely <grant.likely@secretlab.ca>
7472W: http://www.secretlab.ca/ 7479W: http://www.secretlab.ca/
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index f5569699f31c..68c758871812 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -1572,7 +1572,7 @@ static inline void host_vcc_unbind(struct lanai_dev *lanai,
1572 1572
1573static void lanai_reset(struct lanai_dev *lanai) 1573static void lanai_reset(struct lanai_dev *lanai)
1574{ 1574{
1575 printk(KERN_CRIT DEV_LABEL "(itf %d): *NOT* reseting - not " 1575 printk(KERN_CRIT DEV_LABEL "(itf %d): *NOT* resetting - not "
1576 "implemented\n", lanai->number); 1576 "implemented\n", lanai->number);
1577 /* TODO */ 1577 /* TODO */
1578 /* The following is just a hack until we write the real 1578 /* The following is just a hack until we write the real
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 1612cfd50f39..6ef660c1332f 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -178,22 +178,26 @@ static void queue_req(struct addr_req *req)
178 mutex_unlock(&lock); 178 mutex_unlock(&lock);
179} 179}
180 180
181static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *addr) 181static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr, void *daddr)
182{ 182{
183 struct neighbour *n; 183 struct neighbour *n;
184 int ret; 184 int ret;
185 185
186 n = dst_neigh_lookup(dst, daddr);
187
186 rcu_read_lock(); 188 rcu_read_lock();
187 n = dst_get_neighbour_noref(dst);
188 if (!n || !(n->nud_state & NUD_VALID)) { 189 if (!n || !(n->nud_state & NUD_VALID)) {
189 if (n) 190 if (n)
190 neigh_event_send(n, NULL); 191 neigh_event_send(n, NULL);
191 ret = -ENODATA; 192 ret = -ENODATA;
192 } else { 193 } else {
193 ret = rdma_copy_addr(addr, dst->dev, n->ha); 194 ret = rdma_copy_addr(dev_addr, dst->dev, n->ha);
194 } 195 }
195 rcu_read_unlock(); 196 rcu_read_unlock();
196 197
198 if (n)
199 neigh_release(n);
200
197 return ret; 201 return ret;
198} 202}
199 203
@@ -232,7 +236,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
232 goto put; 236 goto put;
233 } 237 }
234 238
235 ret = dst_fetch_ha(&rt->dst, addr); 239 ret = dst_fetch_ha(&rt->dst, addr, &fl4.daddr);
236put: 240put:
237 ip_rt_put(rt); 241 ip_rt_put(rt);
238out: 242out:
@@ -280,7 +284,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
280 goto put; 284 goto put;
281 } 285 }
282 286
283 ret = dst_fetch_ha(dst, addr); 287 ret = dst_fetch_ha(dst, addr, &fl6.daddr);
284put: 288put:
285 dst_release(dst); 289 dst_release(dst);
286 return ret; 290 return ret;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 0668bb3472d0..0cf61554f176 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1562,11 +1562,11 @@ static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
1562 struct neighbour *n; 1562 struct neighbour *n;
1563 int err, step; 1563 int err, step;
1564 1564
1565 rcu_read_lock(); 1565 n = dst_neigh_lookup(dst, &peer_ip);
1566 n = dst_get_neighbour_noref(dst);
1567 err = -ENODEV;
1568 if (!n) 1566 if (!n)
1569 goto out; 1567 return -ENODEV;
1568
1569 rcu_read_lock();
1570 err = -ENOMEM; 1570 err = -ENOMEM;
1571 if (n->dev->flags & IFF_LOOPBACK) { 1571 if (n->dev->flags & IFF_LOOPBACK) {
1572 struct net_device *pdev; 1572 struct net_device *pdev;
@@ -1614,6 +1614,8 @@ static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
1614out: 1614out:
1615 rcu_read_unlock(); 1615 rcu_read_unlock();
1616 1616
1617 neigh_release(n);
1618
1617 return err; 1619 return err;
1618} 1620}
1619 1621
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 425065b36b8c..fc5192ee928b 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1348,8 +1348,9 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1348 else 1348 else
1349 netdev = nesvnic->netdev; 1349 netdev = nesvnic->netdev;
1350 1350
1351 neigh = dst_neigh_lookup(&rt->dst, &dst_ip);
1352
1351 rcu_read_lock(); 1353 rcu_read_lock();
1352 neigh = dst_get_neighbour_noref(&rt->dst);
1353 if (neigh) { 1354 if (neigh) {
1354 if (neigh->nud_state & NUD_VALID) { 1355 if (neigh->nud_state & NUD_VALID) {
1355 nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X" 1356 nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -1360,8 +1361,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1360 if (!memcmp(nesadapter->arp_table[arpindex].mac_addr, 1361 if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
1361 neigh->ha, ETH_ALEN)) { 1362 neigh->ha, ETH_ALEN)) {
1362 /* Mac address same as in nes_arp_table */ 1363 /* Mac address same as in nes_arp_table */
1363 ip_rt_put(rt); 1364 goto out;
1364 return rc;
1365 } 1365 }
1366 1366
1367 nes_manage_arp_cache(nesvnic->netdev, 1367 nes_manage_arp_cache(nesvnic->netdev,
@@ -1377,7 +1377,12 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1377 neigh_event_send(neigh, NULL); 1377 neigh_event_send(neigh, NULL);
1378 } 1378 }
1379 } 1379 }
1380out:
1380 rcu_read_unlock(); 1381 rcu_read_unlock();
1382
1383 if (neigh)
1384 neigh_release(neigh);
1385
1381 ip_rt_put(rt); 1386 ip_rt_put(rt);
1382 return rc; 1387 return rc;
1383} 1388}
diff --git a/drivers/isdn/hardware/eicon/capi20.h b/drivers/isdn/hardware/eicon/capi20.h
index 7ebcccda74d8..27ecd61888de 100644
--- a/drivers/isdn/hardware/eicon/capi20.h
+++ b/drivers/isdn/hardware/eicon/capi20.h
@@ -117,7 +117,7 @@ typedef struct api_profile_s {
117/*------------------------------------------------------------------*/ 117/*------------------------------------------------------------------*/
118 /* ALERT-REQUEST */ 118 /* ALERT-REQUEST */
119typedef struct { 119typedef struct {
120 byte structs[1]; /* Additional Info */ 120 byte structs[0]; /* Additional Info */
121} _ALT_REQP; 121} _ALT_REQP;
122 /* ALERT-CONFIRM */ 122 /* ALERT-CONFIRM */
123typedef struct { 123typedef struct {
@@ -126,7 +126,7 @@ typedef struct {
126 /* CONNECT-REQUEST */ 126 /* CONNECT-REQUEST */
127typedef struct { 127typedef struct {
128 word CIP_Value; 128 word CIP_Value;
129 byte structs[1]; /* Called party number, 129 byte structs[0]; /* Called party number,
130 Called party subaddress, 130 Called party subaddress,
131 Calling party number, 131 Calling party number,
132 Calling party subaddress, 132 Calling party subaddress,
@@ -143,7 +143,7 @@ typedef struct {
143 /* CONNECT-INDICATION */ 143 /* CONNECT-INDICATION */
144typedef struct { 144typedef struct {
145 word CIP_Value; 145 word CIP_Value;
146 byte structs[1]; /* Called party number, 146 byte structs[0]; /* Called party number,
147 Called party subaddress, 147 Called party subaddress,
148 Calling party number, 148 Calling party number,
149 Calling party subaddress, 149 Calling party subaddress,
@@ -155,24 +155,24 @@ typedef struct {
155 /* CONNECT-RESPONSE */ 155 /* CONNECT-RESPONSE */
156typedef struct { 156typedef struct {
157 word Accept; 157 word Accept;
158 byte structs[1]; /* B_protocol, 158 byte structs[0]; /* B_protocol,
159 Connected party number, 159 Connected party number,
160 Connected party subaddress, 160 Connected party subaddress,
161 LLC */ 161 LLC */
162} _CON_RESP; 162} _CON_RESP;
163 /* CONNECT-ACTIVE-INDICATION */ 163 /* CONNECT-ACTIVE-INDICATION */
164typedef struct { 164typedef struct {
165 byte structs[1]; /* Connected party number, 165 byte structs[0]; /* Connected party number,
166 Connected party subaddress, 166 Connected party subaddress,
167 LLC */ 167 LLC */
168} _CON_A_INDP; 168} _CON_A_INDP;
169 /* CONNECT-ACTIVE-RESPONSE */ 169 /* CONNECT-ACTIVE-RESPONSE */
170typedef struct { 170typedef struct {
171 byte structs[1]; /* empty */ 171 byte structs[0]; /* empty */
172} _CON_A_RESP; 172} _CON_A_RESP;
173 /* DISCONNECT-REQUEST */ 173 /* DISCONNECT-REQUEST */
174typedef struct { 174typedef struct {
175 byte structs[1]; /* Additional Info */ 175 byte structs[0]; /* Additional Info */
176} _DIS_REQP; 176} _DIS_REQP;
177 /* DISCONNECT-CONFIRM */ 177 /* DISCONNECT-CONFIRM */
178typedef struct { 178typedef struct {
@@ -184,13 +184,13 @@ typedef struct {
184} _DIS_INDP; 184} _DIS_INDP;
185 /* DISCONNECT-RESPONSE */ 185 /* DISCONNECT-RESPONSE */
186typedef struct { 186typedef struct {
187 byte structs[1]; /* empty */ 187 byte structs[0]; /* empty */
188} _DIS_RESP; 188} _DIS_RESP;
189 /* LISTEN-REQUEST */ 189 /* LISTEN-REQUEST */
190typedef struct { 190typedef struct {
191 dword Info_Mask; 191 dword Info_Mask;
192 dword CIP_Mask; 192 dword CIP_Mask;
193 byte structs[1]; /* Calling party number, 193 byte structs[0]; /* Calling party number,
194 Calling party subaddress */ 194 Calling party subaddress */
195} _LIS_REQP; 195} _LIS_REQP;
196 /* LISTEN-CONFIRM */ 196 /* LISTEN-CONFIRM */
@@ -199,7 +199,7 @@ typedef struct {
199} _LIS_CONP; 199} _LIS_CONP;
200 /* INFO-REQUEST */ 200 /* INFO-REQUEST */
201typedef struct { 201typedef struct {
202 byte structs[1]; /* Called party number, 202 byte structs[0]; /* Called party number,
203 Additional Info */ 203 Additional Info */
204} _INF_REQP; 204} _INF_REQP;
205 /* INFO-CONFIRM */ 205 /* INFO-CONFIRM */
@@ -209,15 +209,15 @@ typedef struct {
209 /* INFO-INDICATION */ 209 /* INFO-INDICATION */
210typedef struct { 210typedef struct {
211 word Number; 211 word Number;
212 byte structs[1]; /* Info element */ 212 byte structs[0]; /* Info element */
213} _INF_INDP; 213} _INF_INDP;
214 /* INFO-RESPONSE */ 214 /* INFO-RESPONSE */
215typedef struct { 215typedef struct {
216 byte structs[1]; /* empty */ 216 byte structs[0]; /* empty */
217} _INF_RESP; 217} _INF_RESP;
218 /* SELECT-B-REQUEST */ 218 /* SELECT-B-REQUEST */
219typedef struct { 219typedef struct {
220 byte structs[1]; /* B-protocol */ 220 byte structs[0]; /* B-protocol */
221} _SEL_B_REQP; 221} _SEL_B_REQP;
222 /* SELECT-B-CONFIRM */ 222 /* SELECT-B-CONFIRM */
223typedef struct { 223typedef struct {
@@ -226,7 +226,7 @@ typedef struct {
226 /* FACILITY-REQUEST */ 226 /* FACILITY-REQUEST */
227typedef struct { 227typedef struct {
228 word Selector; 228 word Selector;
229 byte structs[1]; /* Facility parameters */ 229 byte structs[0]; /* Facility parameters */
230} _FAC_REQP; 230} _FAC_REQP;
231 /* FACILITY-CONFIRM STRUCT FOR SUPPLEMENT. SERVICES */ 231 /* FACILITY-CONFIRM STRUCT FOR SUPPLEMENT. SERVICES */
232typedef struct { 232typedef struct {
@@ -240,21 +240,21 @@ typedef struct {
240typedef struct { 240typedef struct {
241 word Info; 241 word Info;
242 word Selector; 242 word Selector;
243 byte structs[1]; /* Facility parameters */ 243 byte structs[0]; /* Facility parameters */
244} _FAC_CONP; 244} _FAC_CONP;
245 /* FACILITY-INDICATION */ 245 /* FACILITY-INDICATION */
246typedef struct { 246typedef struct {
247 word Selector; 247 word Selector;
248 byte structs[1]; /* Facility parameters */ 248 byte structs[0]; /* Facility parameters */
249} _FAC_INDP; 249} _FAC_INDP;
250 /* FACILITY-RESPONSE */ 250 /* FACILITY-RESPONSE */
251typedef struct { 251typedef struct {
252 word Selector; 252 word Selector;
253 byte structs[1]; /* Facility parameters */ 253 byte structs[0]; /* Facility parameters */
254} _FAC_RESP; 254} _FAC_RESP;
255 /* CONNECT-B3-REQUEST */ 255 /* CONNECT-B3-REQUEST */
256typedef struct { 256typedef struct {
257 byte structs[1]; /* NCPI */ 257 byte structs[0]; /* NCPI */
258} _CON_B3_REQP; 258} _CON_B3_REQP;
259 /* CONNECT-B3-CONFIRM */ 259 /* CONNECT-B3-CONFIRM */
260typedef struct { 260typedef struct {
@@ -262,24 +262,24 @@ typedef struct {
262} _CON_B3_CONP; 262} _CON_B3_CONP;
263 /* CONNECT-B3-INDICATION */ 263 /* CONNECT-B3-INDICATION */
264typedef struct { 264typedef struct {
265 byte structs[1]; /* NCPI */ 265 byte structs[0]; /* NCPI */
266} _CON_B3_INDP; 266} _CON_B3_INDP;
267 /* CONNECT-B3-RESPONSE */ 267 /* CONNECT-B3-RESPONSE */
268typedef struct { 268typedef struct {
269 word Accept; 269 word Accept;
270 byte structs[1]; /* NCPI */ 270 byte structs[0]; /* NCPI */
271} _CON_B3_RESP; 271} _CON_B3_RESP;
272 /* CONNECT-B3-ACTIVE-INDICATION */ 272 /* CONNECT-B3-ACTIVE-INDICATION */
273typedef struct { 273typedef struct {
274 byte structs[1]; /* NCPI */ 274 byte structs[0]; /* NCPI */
275} _CON_B3_A_INDP; 275} _CON_B3_A_INDP;
276 /* CONNECT-B3-ACTIVE-RESPONSE */ 276 /* CONNECT-B3-ACTIVE-RESPONSE */
277typedef struct { 277typedef struct {
278 byte structs[1]; /* empty */ 278 byte structs[0]; /* empty */
279} _CON_B3_A_RESP; 279} _CON_B3_A_RESP;
280 /* DISCONNECT-B3-REQUEST */ 280 /* DISCONNECT-B3-REQUEST */
281typedef struct { 281typedef struct {
282 byte structs[1]; /* NCPI */ 282 byte structs[0]; /* NCPI */
283} _DIS_B3_REQP; 283} _DIS_B3_REQP;
284 /* DISCONNECT-B3-CONFIRM */ 284 /* DISCONNECT-B3-CONFIRM */
285typedef struct { 285typedef struct {
@@ -288,11 +288,11 @@ typedef struct {
288 /* DISCONNECT-B3-INDICATION */ 288 /* DISCONNECT-B3-INDICATION */
289typedef struct { 289typedef struct {
290 word Info; 290 word Info;
291 byte structs[1]; /* NCPI */ 291 byte structs[0]; /* NCPI */
292} _DIS_B3_INDP; 292} _DIS_B3_INDP;
293 /* DISCONNECT-B3-RESPONSE */ 293 /* DISCONNECT-B3-RESPONSE */
294typedef struct { 294typedef struct {
295 byte structs[1]; /* empty */ 295 byte structs[0]; /* empty */
296} _DIS_B3_RESP; 296} _DIS_B3_RESP;
297 /* DATA-B3-REQUEST */ 297 /* DATA-B3-REQUEST */
298typedef struct { 298typedef struct {
@@ -335,7 +335,7 @@ typedef struct {
335} _DAT_B3_RESP; 335} _DAT_B3_RESP;
336 /* RESET-B3-REQUEST */ 336 /* RESET-B3-REQUEST */
337typedef struct { 337typedef struct {
338 byte structs[1]; /* NCPI */ 338 byte structs[0]; /* NCPI */
339} _RES_B3_REQP; 339} _RES_B3_REQP;
340 /* RESET-B3-CONFIRM */ 340 /* RESET-B3-CONFIRM */
341typedef struct { 341typedef struct {
@@ -343,20 +343,20 @@ typedef struct {
343} _RES_B3_CONP; 343} _RES_B3_CONP;
344 /* RESET-B3-INDICATION */ 344 /* RESET-B3-INDICATION */
345typedef struct { 345typedef struct {
346 byte structs[1]; /* NCPI */ 346 byte structs[0]; /* NCPI */
347} _RES_B3_INDP; 347} _RES_B3_INDP;
348 /* RESET-B3-RESPONSE */ 348 /* RESET-B3-RESPONSE */
349typedef struct { 349typedef struct {
350 byte structs[1]; /* empty */ 350 byte structs[0]; /* empty */
351} _RES_B3_RESP; 351} _RES_B3_RESP;
352 /* CONNECT-B3-T90-ACTIVE-INDICATION */ 352 /* CONNECT-B3-T90-ACTIVE-INDICATION */
353typedef struct { 353typedef struct {
354 byte structs[1]; /* NCPI */ 354 byte structs[0]; /* NCPI */
355} _CON_B3_T90_A_INDP; 355} _CON_B3_T90_A_INDP;
356 /* CONNECT-B3-T90-ACTIVE-RESPONSE */ 356 /* CONNECT-B3-T90-ACTIVE-RESPONSE */
357typedef struct { 357typedef struct {
358 word Reject; 358 word Reject;
359 byte structs[1]; /* NCPI */ 359 byte structs[0]; /* NCPI */
360} _CON_B3_T90_A_RESP; 360} _CON_B3_T90_A_RESP;
361/*------------------------------------------------------------------*/ 361/*------------------------------------------------------------------*/
362/* message structure */ 362/* message structure */
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 22f8ec8b9247..04f115a9c43e 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -1112,7 +1112,7 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
1112 struct l1oip *hc = bch->hw; 1112 struct l1oip *hc = bch->hw;
1113 int ret = -EINVAL; 1113 int ret = -EINVAL;
1114 struct mISDNhead *hh = mISDN_HEAD_P(skb); 1114 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1115 int l, ll, i; 1115 int l, ll;
1116 unsigned char *p; 1116 unsigned char *p;
1117 1117
1118 switch (hh->prim) { 1118 switch (hh->prim) {
@@ -1128,13 +1128,8 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
1128 break; 1128 break;
1129 } 1129 }
1130 /* check for AIS / ulaw-silence */ 1130 /* check for AIS / ulaw-silence */
1131 p = skb->data;
1132 l = skb->len; 1131 l = skb->len;
1133 for (i = 0; i < l; i++) { 1132 if (!memchr_inv(skb->data, 0xff, l)) {
1134 if (*p++ != 0xff)
1135 break;
1136 }
1137 if (i == l) {
1138 if (debug & DEBUG_L1OIP_MSG) 1133 if (debug & DEBUG_L1OIP_MSG)
1139 printk(KERN_DEBUG "%s: got AIS, not sending, " 1134 printk(KERN_DEBUG "%s: got AIS, not sending, "
1140 "but counting\n", __func__); 1135 "but counting\n", __func__);
@@ -1144,13 +1139,8 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
1144 return 0; 1139 return 0;
1145 } 1140 }
1146 /* check for silence */ 1141 /* check for silence */
1147 p = skb->data;
1148 l = skb->len; 1142 l = skb->len;
1149 for (i = 0; i < l; i++) { 1143 if (!memchr_inv(skb->data, 0x2a, l)) {
1150 if (*p++ != 0x2a)
1151 break;
1152 }
1153 if (i == l) {
1154 if (debug & DEBUG_L1OIP_MSG) 1144 if (debug & DEBUG_L1OIP_MSG)
1155 printk(KERN_DEBUG "%s: got silence, not sending" 1145 printk(KERN_DEBUG "%s: got silence, not sending"
1156 ", but counting\n", __func__); 1146 ", but counting\n", __func__);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f820b26b9db3..9abfde479316 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -180,11 +180,9 @@ static int tlb_initialize(struct bonding *bond)
180 int i; 180 int i;
181 181
182 new_hashtbl = kzalloc(size, GFP_KERNEL); 182 new_hashtbl = kzalloc(size, GFP_KERNEL);
183 if (!new_hashtbl) { 183 if (!new_hashtbl)
184 pr_err("%s: Error: Failed to allocate TLB hash table\n",
185 bond->dev->name);
186 return -1; 184 return -1;
187 } 185
188 _lock_tx_hashtbl_bh(bond); 186 _lock_tx_hashtbl_bh(bond);
189 187
190 bond_info->tx_hashtbl = new_hashtbl; 188 bond_info->tx_hashtbl = new_hashtbl;
@@ -784,11 +782,9 @@ static int rlb_initialize(struct bonding *bond)
784 int i; 782 int i;
785 783
786 new_hashtbl = kmalloc(size, GFP_KERNEL); 784 new_hashtbl = kmalloc(size, GFP_KERNEL);
787 if (!new_hashtbl) { 785 if (!new_hashtbl)
788 pr_err("%s: Error: Failed to allocate RLB hash table\n",
789 bond->dev->name);
790 return -1; 786 return -1;
791 } 787
792 _lock_rx_hashtbl_bh(bond); 788 _lock_rx_hashtbl_bh(bond);
793 789
794 bond_info->rx_hashtbl = new_hashtbl; 790 bond_info->rx_hashtbl = new_hashtbl;
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 0a4fc62a381d..c8afd62239e9 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -426,6 +426,35 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
426 return xfer_sz; 426 return xfer_sz;
427} 427}
428 428
429static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
430{
431 int xfer_sz = 0;
432 int nfrms = 0;
433 u16 *plen;
434
435 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
436 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
437
438 pr_err("Invalid descriptor. %x %x\n", desc->header,
439 desc->offset);
440 return -EPROTO;
441 }
442
443 /* Calculate transfer length. */
444 plen = desc->cffrm_len;
445 while (nfrms < CFHSI_MAX_PKTS && *plen) {
446 xfer_sz += *plen;
447 plen++;
448 nfrms++;
449 }
450
451 if (xfer_sz % 4) {
452 pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
453 return -EPROTO;
454 }
455 return xfer_sz;
456}
457
429static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) 458static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
430{ 459{
431 int rx_sz = 0; 460 int rx_sz = 0;
@@ -517,8 +546,10 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
517static void cfhsi_rx_done(struct cfhsi *cfhsi) 546static void cfhsi_rx_done(struct cfhsi *cfhsi)
518{ 547{
519 int res; 548 int res;
520 int desc_pld_len = 0; 549 int desc_pld_len = 0, rx_len, rx_state;
521 struct cfhsi_desc *desc = NULL; 550 struct cfhsi_desc *desc = NULL;
551 u8 *rx_ptr, *rx_buf;
552 struct cfhsi_desc *piggy_desc = NULL;
522 553
523 desc = (struct cfhsi_desc *)cfhsi->rx_buf; 554 desc = (struct cfhsi_desc *)cfhsi->rx_buf;
524 555
@@ -534,65 +565,71 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
534 spin_unlock_bh(&cfhsi->lock); 565 spin_unlock_bh(&cfhsi->lock);
535 566
536 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { 567 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
537 desc_pld_len = cfhsi_rx_desc(desc, cfhsi); 568 desc_pld_len = cfhsi_rx_desc_len(desc);
538 if (desc_pld_len == -ENOMEM) 569
539 goto restart; 570 if (desc_pld_len < 0)
540 if (desc_pld_len == -EPROTO)
541 goto out_of_sync; 571 goto out_of_sync;
572
573 rx_buf = cfhsi->rx_buf;
574 rx_len = desc_pld_len;
575 if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
576 rx_len += CFHSI_DESC_SZ;
577 if (desc_pld_len == 0)
578 rx_buf = cfhsi->rx_flip_buf;
542 } else { 579 } else {
543 int pld_len; 580 rx_buf = cfhsi->rx_flip_buf;
544 581
545 if (!cfhsi->rx_state.piggy_desc) { 582 rx_len = CFHSI_DESC_SZ;
546 pld_len = cfhsi_rx_pld(desc, cfhsi); 583 if (cfhsi->rx_state.pld_len > 0 &&
547 if (pld_len == -ENOMEM) 584 (desc->header & CFHSI_PIGGY_DESC)) {
548 goto restart;
549 if (pld_len == -EPROTO)
550 goto out_of_sync;
551 cfhsi->rx_state.pld_len = pld_len;
552 } else {
553 pld_len = cfhsi->rx_state.pld_len;
554 }
555 585
556 if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) {
557 struct cfhsi_desc *piggy_desc;
558 piggy_desc = (struct cfhsi_desc *) 586 piggy_desc = (struct cfhsi_desc *)
559 (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ + 587 (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
560 pld_len); 588 cfhsi->rx_state.pld_len);
589
561 cfhsi->rx_state.piggy_desc = true; 590 cfhsi->rx_state.piggy_desc = true;
562 591
563 /* Extract piggy-backed descriptor. */ 592 /* Extract payload len from piggy-backed descriptor. */
564 desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi); 593 desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
565 if (desc_pld_len == -ENOMEM) 594 if (desc_pld_len < 0)
566 goto restart; 595 goto out_of_sync;
596
597 if (desc_pld_len > 0)
598 rx_len = desc_pld_len;
599
600 if (desc_pld_len > 0 &&
601 (piggy_desc->header & CFHSI_PIGGY_DESC))
602 rx_len += CFHSI_DESC_SZ;
567 603
568 /* 604 /*
569 * Copy needed information from the piggy-backed 605 * Copy needed information from the piggy-backed
570 * descriptor to the descriptor in the start. 606 * descriptor to the descriptor in the start.
571 */ 607 */
572 memcpy((u8 *)desc, (u8 *)piggy_desc, 608 memcpy(rx_buf, (u8 *)piggy_desc,
573 CFHSI_DESC_SHORT_SZ); 609 CFHSI_DESC_SHORT_SZ);
574 610 /* Mark no embedded frame here */
611 piggy_desc->offset = 0;
575 if (desc_pld_len == -EPROTO) 612 if (desc_pld_len == -EPROTO)
576 goto out_of_sync; 613 goto out_of_sync;
577 } 614 }
578 } 615 }
579 616
580 memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
581 if (desc_pld_len) { 617 if (desc_pld_len) {
582 cfhsi->rx_state.state = CFHSI_RX_STATE_PAYLOAD; 618 rx_state = CFHSI_RX_STATE_PAYLOAD;
583 cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ; 619 rx_ptr = rx_buf + CFHSI_DESC_SZ;
584 cfhsi->rx_len = desc_pld_len;
585 } else { 620 } else {
586 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC; 621 rx_state = CFHSI_RX_STATE_DESC;
587 cfhsi->rx_ptr = cfhsi->rx_buf; 622 rx_ptr = rx_buf;
588 cfhsi->rx_len = CFHSI_DESC_SZ; 623 rx_len = CFHSI_DESC_SZ;
589 } 624 }
590 625
626 /* Initiate next read */
591 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) { 627 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
592 /* Set up new transfer. */ 628 /* Set up new transfer. */
593 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", 629 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
594 __func__); 630 __func__);
595 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, 631
632 res = cfhsi->dev->cfhsi_rx(rx_ptr, rx_len,
596 cfhsi->dev); 633 cfhsi->dev);
597 if (WARN_ON(res < 0)) { 634 if (WARN_ON(res < 0)) {
598 dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n", 635 dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
@@ -601,16 +638,32 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
601 cfhsi->ndev->stats.rx_dropped++; 638 cfhsi->ndev->stats.rx_dropped++;
602 } 639 }
603 } 640 }
604 return;
605 641
606restart: 642 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
607 if (++cfhsi->rx_state.retries > CFHSI_MAX_RX_RETRIES) { 643 /* Extract payload from descriptor */
608 dev_err(&cfhsi->ndev->dev, "%s: No memory available " 644 if (cfhsi_rx_desc(desc, cfhsi) < 0)
609 "in %d iterations.\n", 645 goto out_of_sync;
610 __func__, CFHSI_MAX_RX_RETRIES); 646 } else {
611 BUG(); 647 /* Extract payload */
648 if (cfhsi_rx_pld(desc, cfhsi) < 0)
649 goto out_of_sync;
650 if (piggy_desc) {
651 /* Extract any payload in piggyback descriptor. */
652 if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
653 goto out_of_sync;
654 }
612 } 655 }
613 mod_timer(&cfhsi->rx_slowpath_timer, jiffies + 1); 656
657 /* Update state info */
658 memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
659 cfhsi->rx_state.state = rx_state;
660 cfhsi->rx_ptr = rx_ptr;
661 cfhsi->rx_len = rx_len;
662 cfhsi->rx_state.pld_len = desc_pld_len;
663 cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
664
665 if (rx_buf != cfhsi->rx_buf)
666 swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
614 return; 667 return;
615 668
616out_of_sync: 669out_of_sync:
@@ -1040,6 +1093,12 @@ int cfhsi_probe(struct platform_device *pdev)
1040 goto err_alloc_rx; 1093 goto err_alloc_rx;
1041 } 1094 }
1042 1095
1096 cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1097 if (!cfhsi->rx_flip_buf) {
1098 res = -ENODEV;
1099 goto err_alloc_rx_flip;
1100 }
1101
1043 /* Pre-calculate inactivity timeout. */ 1102 /* Pre-calculate inactivity timeout. */
1044 if (inactivity_timeout != -1) { 1103 if (inactivity_timeout != -1) {
1045 cfhsi->inactivity_timeout = 1104 cfhsi->inactivity_timeout =
@@ -1138,6 +1197,8 @@ int cfhsi_probe(struct platform_device *pdev)
1138 err_activate: 1197 err_activate:
1139 destroy_workqueue(cfhsi->wq); 1198 destroy_workqueue(cfhsi->wq);
1140 err_create_wq: 1199 err_create_wq:
1200 kfree(cfhsi->rx_flip_buf);
1201 err_alloc_rx_flip:
1141 kfree(cfhsi->rx_buf); 1202 kfree(cfhsi->rx_buf);
1142 err_alloc_rx: 1203 err_alloc_rx:
1143 kfree(cfhsi->tx_buf); 1204 kfree(cfhsi->tx_buf);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 3f1ebcc2cb83..98a5a7d867f5 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * slcan.c - serial line CAN interface driver (using tty line discipline) 2 * slcan.c - serial line CAN interface driver (using tty line discipline)
3 * 3 *
4 * This file is derived from linux/drivers/net/slip.c 4 * This file is derived from linux/drivers/net/slip/slip.c
5 * 5 *
6 * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk> 6 * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
7 * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org> 7 * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
@@ -639,10 +639,8 @@ static int __init slcan_init(void)
639 printk(KERN_INFO "slcan: %d dynamic interface channels.\n", maxdev); 639 printk(KERN_INFO "slcan: %d dynamic interface channels.\n", maxdev);
640 640
641 slcan_devs = kzalloc(sizeof(struct net_device *)*maxdev, GFP_KERNEL); 641 slcan_devs = kzalloc(sizeof(struct net_device *)*maxdev, GFP_KERNEL);
642 if (!slcan_devs) { 642 if (!slcan_devs)
643 printk(KERN_ERR "slcan: can't allocate slcan device array!\n");
644 return -ENOMEM; 643 return -ENOMEM;
645 }
646 644
647 /* Fill in our line protocol discipline, and register it */ 645 /* Fill in our line protocol discipline, and register it */
648 status = tty_register_ldisc(N_SLCAN, &slc_ldisc); 646 status = tty_register_ldisc(N_SLCAN, &slc_ldisc);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 8153a3e0a1a4..dc51d9218e6d 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1121,10 +1121,9 @@ static int __devinit vortex_probe1(struct device *gendev,
1121 1121
1122 dev = alloc_etherdev(sizeof(*vp)); 1122 dev = alloc_etherdev(sizeof(*vp));
1123 retval = -ENOMEM; 1123 retval = -ENOMEM;
1124 if (!dev) { 1124 if (!dev)
1125 pr_err(PFX "unable to allocate etherdev, aborting\n");
1126 goto out; 1125 goto out;
1127 } 1126
1128 SET_NETDEV_DEV(dev, gendev); 1127 SET_NETDEV_DEV(dev, gendev);
1129 vp = netdev_priv(dev); 1128 vp = netdev_priv(dev);
1130 1129
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index a8bb30cf512d..bad4fa6815c5 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -97,7 +97,7 @@ config VORTEX
97 available from <http://www.tldp.org/docs.html#howto>. More 97 available from <http://www.tldp.org/docs.html#howto>. More
98 specific information is in 98 specific information is in
99 <file:Documentation/networking/vortex.txt> and in the comments at 99 <file:Documentation/networking/vortex.txt> and in the comments at
100 the beginning of <file:drivers/net/3c59x.c>. 100 the beginning of <file:drivers/net/ethernet/3com/3c59x.c>.
101 101
102 To compile this support as a module, choose M here. 102 To compile this support as a module, choose M here.
103 103
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 0f92e3567f68..c30adcc9828a 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -1,4 +1,4 @@
1/* drivers/net/ax88796.c 1/* drivers/net/ethernet/8390/ax88796.c
2 * 2 *
3 * Copyright 2005,2007 Simtec Electronics 3 * Copyright 2005,2007 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk> 4 * Ben Dooks <ben@simtec.co.uk>
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index bba51cdc74a1..5de394368ff3 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -192,7 +192,7 @@ static int get_prom(struct pcmcia_device *link)
192 unsigned int ioaddr = dev->base_addr; 192 unsigned int ioaddr = dev->base_addr;
193 int i, j; 193 int i, j;
194 194
195 /* This is based on drivers/net/ne.c */ 195 /* This is based on drivers/net/ethernet/8390/ne.c */
196 struct { 196 struct {
197 u_char value, offset; 197 u_char value, offset;
198 } program_seq[] = { 198 } program_seq[] = {
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 053b2551a72d..f2a4e5de18c4 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -326,7 +326,7 @@ static hw_info_t *get_prom(struct pcmcia_device *link)
326 u_char prom[32]; 326 u_char prom[32];
327 int i, j; 327 int i, j;
328 328
329 /* This is lifted straight from drivers/net/ne.c */ 329 /* This is lifted straight from drivers/net/ethernet/8390/ne.c */
330 struct { 330 struct {
331 u_char value, offset; 331 u_char value, offset;
332 } program_seq[] = { 332 } program_seq[] = {
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index cb4f38a17f20..11fc2eccb0fd 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -686,10 +686,9 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
686 } 686 }
687 687
688 dev = alloc_etherdev(sizeof(*np)); 688 dev = alloc_etherdev(sizeof(*np));
689 if (!dev) { 689 if (!dev)
690 printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
691 return -ENOMEM; 690 return -ENOMEM;
692 } 691
693 SET_NETDEV_DEV(dev, &pdev->dev); 692 SET_NETDEV_DEV(dev, &pdev->dev);
694 693
695 irq = pdev->irq; 694 irq = pdev->irq;
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index d812a103e032..525a9768bb54 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1467,10 +1467,8 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1467 int rc; 1467 int rc;
1468 1468
1469 ndev = alloc_etherdev(sizeof(struct bfin_mac_local)); 1469 ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
1470 if (!ndev) { 1470 if (!ndev)
1471 dev_err(&pdev->dev, "Cannot allocate net device!\n");
1472 return -ENOMEM; 1471 return -ENOMEM;
1473 }
1474 1472
1475 SET_NETDEV_DEV(ndev, &pdev->dev); 1473 SET_NETDEV_DEV(ndev, &pdev->dev);
1476 platform_set_drvdata(pdev, ndev); 1474 platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index f872748ab4e6..6c3b1c0adaa0 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -463,11 +463,8 @@ static int __devinit acenic_probe_one(struct pci_dev *pdev,
463 static int boards_found; 463 static int boards_found;
464 464
465 dev = alloc_etherdev(sizeof(struct ace_private)); 465 dev = alloc_etherdev(sizeof(struct ace_private));
466 if (dev == NULL) { 466 if (dev == NULL)
467 printk(KERN_ERR "acenic: Unable to allocate "
468 "net_device structure!\n");
469 return -ENOMEM; 467 return -ENOMEM;
470 }
471 468
472 SET_NETDEV_DEV(dev, &pdev->dev); 469 SET_NETDEV_DEV(dev, &pdev->dev);
473 470
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 238b537b68fe..8350f4b37a8a 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -113,7 +113,7 @@ config DEPCA
113 If you have a network (Ethernet) card of this type, say Y and read 113 If you have a network (Ethernet) card of this type, say Y and read
114 the Ethernet-HOWTO, available from 114 the Ethernet-HOWTO, available from
115 <http://www.tldp.org/docs.html#howto> as well as 115 <http://www.tldp.org/docs.html#howto> as well as
116 <file:drivers/net/depca.c>. 116 <file:drivers/net/ethernet/amd/depca.c>.
117 117
118 To compile this driver as a module, choose M here. The module 118 To compile this driver as a module, choose M here. The module
119 will be called depca. 119 will be called depca.
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 7d5ded80d2d7..216ae87b9459 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/net/am79c961.c 2 * linux/drivers/net/ethernet/amd/am79c961a.c
3 * 3 *
4 * by Russell King <rmk@arm.linux.org.uk> 1995-2001. 4 * by Russell King <rmk@arm.linux.org.uk> 1995-2001.
5 * 5 *
diff --git a/drivers/net/ethernet/amd/am79c961a.h b/drivers/net/ethernet/amd/am79c961a.h
index fd634d32756b..9f384b79507b 100644
--- a/drivers/net/ethernet/amd/am79c961a.h
+++ b/drivers/net/ethernet/amd/am79c961a.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/net/arm/am79c961a.h 2 * linux/drivers/net/ethernet/amd/am79c961a.h
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 33e0a8c20f6b..b8306a589558 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1859,7 +1859,6 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1859 1859
1860 dev = alloc_etherdev(sizeof(struct amd8111e_priv)); 1860 dev = alloc_etherdev(sizeof(struct amd8111e_priv));
1861 if (!dev) { 1861 if (!dev) {
1862 printk(KERN_ERR "amd8111e: Etherdev alloc failed, exiting.\n");
1863 err = -ENOMEM; 1862 err = -ENOMEM;
1864 goto err_free_reg; 1863 goto err_free_reg;
1865 } 1864 }
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 8b95dd314253..a81c871aeacf 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1077,7 +1077,6 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1077 1077
1078 dev = alloc_etherdev(sizeof(struct au1000_private)); 1078 dev = alloc_etherdev(sizeof(struct au1000_private));
1079 if (!dev) { 1079 if (!dev) {
1080 dev_err(&pdev->dev, "alloc_etherdev failed\n");
1081 err = -ENOMEM; 1080 err = -ENOMEM;
1082 goto err_alloc; 1081 goto err_alloc;
1083 } 1082 }
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 73f8d4fa682d..dd82ee2f8d21 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -1052,8 +1052,6 @@ static int __devinit dec_lance_probe(struct device *bdev, const int type)
1052 1052
1053 dev = alloc_etherdev(sizeof(struct lance_private)); 1053 dev = alloc_etherdev(sizeof(struct lance_private));
1054 if (!dev) { 1054 if (!dev) {
1055 printk(KERN_ERR "%s: Unable to allocate etherdev, aborting.\n",
1056 name);
1057 ret = -ENOMEM; 1055 ret = -ENOMEM;
1058 goto err_out; 1056 goto err_out;
1059 } 1057 }
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 6e6aa7213aab..735c213798b1 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -621,10 +621,8 @@ static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
621 } 621 }
622 else { 622 else {
623 ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA); 623 ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
624 if(!ret) { 624 if(!ret)
625 printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
626 return NULL; 625 return NULL;
627 }
628 } 626 }
629 if( (u32) virt_to_phys(ptr+size) > 0x1000000) { 627 if( (u32) virt_to_phys(ptr+size) > 0x1000000) {
630 printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what); 628 printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 20e6dab0186c..1bb388f62e5b 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1649,8 +1649,6 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1649 1649
1650 dev = alloc_etherdev(sizeof(*lp)); 1650 dev = alloc_etherdev(sizeof(*lp));
1651 if (!dev) { 1651 if (!dev) {
1652 if (pcnet32_debug & NETIF_MSG_PROBE)
1653 pr_err("Memory allocation failed\n");
1654 ret = -ENOMEM; 1652 ret = -ENOMEM;
1655 goto err_release_region; 1653 goto err_release_region;
1656 } 1654 }
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index d070b229dbf7..ebc0dba5ba33 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1269,10 +1269,8 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i
1269 memcpy(addr, prop_addr, sizeof(addr)); 1269 memcpy(addr, prop_addr, sizeof(addr));
1270 1270
1271 dev = alloc_etherdev(PRIV_BYTES); 1271 dev = alloc_etherdev(PRIV_BYTES);
1272 if (!dev) { 1272 if (!dev)
1273 printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n");
1274 return -ENOMEM; 1273 return -ENOMEM;
1275 }
1276 1274
1277 bp = netdev_priv(dev); 1275 bp = netdev_priv(dev);
1278 SET_NETDEV_DEV(dev, &mdev->ofdev.dev); 1276 SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
@@ -1660,10 +1658,8 @@ static int __init bmac_init(void)
1660{ 1658{
1661 if (bmac_emergency_rxbuf == NULL) { 1659 if (bmac_emergency_rxbuf == NULL) {
1662 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL); 1660 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1663 if (bmac_emergency_rxbuf == NULL) { 1661 if (bmac_emergency_rxbuf == NULL)
1664 printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
1665 return -ENOMEM; 1662 return -ENOMEM;
1666 }
1667 } 1663 }
1668 1664
1669 return macio_register_driver(&bmac_driver); 1665 return macio_register_driver(&bmac_driver);
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index bec87bd9195c..bd5555dbe027 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -136,10 +136,8 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
136 */ 136 */
137 if (dummy_buf == NULL) { 137 if (dummy_buf == NULL) {
138 dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL); 138 dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL);
139 if (dummy_buf == NULL) { 139 if (dummy_buf == NULL)
140 printk(KERN_ERR "MACE: couldn't allocate dummy buffer\n");
141 return -ENOMEM; 140 return -ENOMEM;
142 }
143 } 141 }
144 142
145 if (macio_request_resources(mdev, "mace")) { 143 if (macio_request_resources(mdev, "mace")) {
@@ -149,7 +147,6 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
149 147
150 dev = alloc_etherdev(PRIV_BYTES); 148 dev = alloc_etherdev(PRIV_BYTES);
151 if (!dev) { 149 if (!dev) {
152 printk(KERN_ERR "MACE: can't allocate ethernet device !\n");
153 rc = -ENOMEM; 150 rc = -ENOMEM;
154 goto err_release; 151 goto err_release;
155 } 152 }
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index b8591246eb4c..7ee4aacb01a4 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2689,7 +2689,6 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
2689 netdev = alloc_etherdev(sizeof(struct atl1c_adapter)); 2689 netdev = alloc_etherdev(sizeof(struct atl1c_adapter));
2690 if (netdev == NULL) { 2690 if (netdev == NULL) {
2691 err = -ENOMEM; 2691 err = -ENOMEM;
2692 dev_err(&pdev->dev, "etherdev alloc failed\n");
2693 goto err_alloc_etherdev; 2692 goto err_alloc_etherdev;
2694 } 2693 }
2695 2694
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index c915c0873810..93ff2b231284 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2300,7 +2300,6 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
2300 netdev = alloc_etherdev(sizeof(struct atl1e_adapter)); 2300 netdev = alloc_etherdev(sizeof(struct atl1e_adapter));
2301 if (netdev == NULL) { 2301 if (netdev == NULL) {
2302 err = -ENOMEM; 2302 err = -ENOMEM;
2303 dev_err(&pdev->dev, "etherdev alloc failed\n");
2304 goto err_alloc_etherdev; 2303 goto err_alloc_etherdev;
2305 } 2304 }
2306 2305
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 3fb66d09ece5..66f53c797e3a 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2138,7 +2138,6 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2138 2138
2139 dev = alloc_etherdev(sizeof(*bp)); 2139 dev = alloc_etherdev(sizeof(*bp));
2140 if (!dev) { 2140 if (!dev) {
2141 dev_err(sdev->dev, "Etherdev alloc failed, aborting\n");
2142 err = -ENOMEM; 2141 err = -ENOMEM;
2143 goto out; 2142 goto out;
2144 } 2143 }
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 021fb818007a..0a4c5405dcf4 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -2625,10 +2625,8 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2625 u32 val; 2625 u32 val;
2626 2626
2627 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL); 2627 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2628 if (good_mbuf == NULL) { 2628 if (good_mbuf == NULL)
2629 pr_err("Failed to allocate memory in %s\n", __func__);
2630 return -ENOMEM; 2629 return -ENOMEM;
2631 }
2632 2630
2633 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 2631 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2634 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE); 2632 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 8c73d34b2ff1..7d184fbffaf6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1,6 +1,6 @@
1/* bnx2x.h: Broadcom Everest network driver. 1/* bnx2x.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation 3 * Copyright (c) 2007-2012 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -23,8 +23,8 @@
23 * (you will need to reboot afterwards) */ 23 * (you will need to reboot afterwards) */
24/* #define BNX2X_STOP_ON_ERROR */ 24/* #define BNX2X_STOP_ON_ERROR */
25 25
26#define DRV_MODULE_VERSION "1.70.35-0" 26#define DRV_MODULE_VERSION "1.72.00-0"
27#define DRV_MODULE_RELDATE "2011/11/10" 27#define DRV_MODULE_RELDATE "2012/01/26"
28#define BNX2X_BC_VER 0x040200 28#define BNX2X_BC_VER 0x040200
29 29
30#if defined(CONFIG_DCB) 30#if defined(CONFIG_DCB)
@@ -1088,7 +1088,8 @@ enum bnx2x_recovery_state {
1088 BNX2X_RECOVERY_DONE, 1088 BNX2X_RECOVERY_DONE,
1089 BNX2X_RECOVERY_INIT, 1089 BNX2X_RECOVERY_INIT,
1090 BNX2X_RECOVERY_WAIT, 1090 BNX2X_RECOVERY_WAIT,
1091 BNX2X_RECOVERY_FAILED 1091 BNX2X_RECOVERY_FAILED,
1092 BNX2X_RECOVERY_NIC_LOADING
1092}; 1093};
1093 1094
1094/* 1095/*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 03f3935fd8c2..6e6a684359b5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1,6 +1,6 @@
1/* bnx2x_cmn.c: Broadcom Everest network driver. 1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation 3 * Copyright (c) 2007-2012 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -1767,12 +1767,27 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1767 1767
1768 bnx2x_napi_enable(bp); 1768 bnx2x_napi_enable(bp);
1769 1769
1770 /* set pf load just before approaching the MCP */
1771 bnx2x_set_pf_load(bp);
1772
1770 /* Send LOAD_REQUEST command to MCP 1773 /* Send LOAD_REQUEST command to MCP
1771 * Returns the type of LOAD command: 1774 * Returns the type of LOAD command:
1772 * if it is the first port to be initialized 1775 * if it is the first port to be initialized
1773 * common blocks should be initialized, otherwise - not 1776 * common blocks should be initialized, otherwise - not
1774 */ 1777 */
1775 if (!BP_NOMCP(bp)) { 1778 if (!BP_NOMCP(bp)) {
1779 /* init fw_seq */
1780 bp->fw_seq =
1781 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
1782 DRV_MSG_SEQ_NUMBER_MASK);
1783 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
1784
1785 /* Get current FW pulse sequence */
1786 bp->fw_drv_pulse_wr_seq =
1787 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
1788 DRV_PULSE_SEQ_MASK);
1789 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
1790
1776 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); 1791 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1777 if (!load_code) { 1792 if (!load_code) {
1778 BNX2X_ERR("MCP response failure, aborting\n"); 1793 BNX2X_ERR("MCP response failure, aborting\n");
@@ -1783,6 +1798,29 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1783 rc = -EBUSY; /* other port in diagnostic mode */ 1798 rc = -EBUSY; /* other port in diagnostic mode */
1784 LOAD_ERROR_EXIT(bp, load_error1); 1799 LOAD_ERROR_EXIT(bp, load_error1);
1785 } 1800 }
1801 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
1802 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
1803 /* build FW version dword */
1804 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1805 (BCM_5710_FW_MINOR_VERSION << 8) +
1806 (BCM_5710_FW_REVISION_VERSION << 16) +
1807 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1808
1809 /* read loaded FW from chip */
1810 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1811
1812 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x",
1813 loaded_fw, my_fw);
1814
1815 /* abort nic load if version mismatch */
1816 if (my_fw != loaded_fw) {
1817 BNX2X_ERR("bnx2x with FW %x already loaded, "
1818 "which mismatches my %x FW. aborting",
1819 loaded_fw, my_fw);
1820 rc = -EBUSY;
1821 LOAD_ERROR_EXIT(bp, load_error2);
1822 }
1823 }
1786 1824
1787 } else { 1825 } else {
1788 int path = BP_PATH(bp); 1826 int path = BP_PATH(bp);
@@ -1949,7 +1987,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1949 if (bp->state == BNX2X_STATE_OPEN) 1987 if (bp->state == BNX2X_STATE_OPEN)
1950 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); 1988 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1951#endif 1989#endif
1952 bnx2x_inc_load_cnt(bp);
1953 1990
1954 /* Wait for all pending SP commands to complete */ 1991 /* Wait for all pending SP commands to complete */
1955 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) { 1992 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
@@ -1989,6 +2026,8 @@ load_error2:
1989 bp->port.pmf = 0; 2026 bp->port.pmf = 0;
1990load_error1: 2027load_error1:
1991 bnx2x_napi_disable(bp); 2028 bnx2x_napi_disable(bp);
2029 /* clear pf_load status, as it was already set */
2030 bnx2x_clear_pf_load(bp);
1992load_error0: 2031load_error0:
1993 bnx2x_free_mem(bp); 2032 bnx2x_free_mem(bp);
1994 2033
@@ -2109,7 +2148,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2109 /* The last driver must disable a "close the gate" if there is no 2148 /* The last driver must disable a "close the gate" if there is no
2110 * parity attention or "process kill" pending. 2149 * parity attention or "process kill" pending.
2111 */ 2150 */
2112 if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp))) 2151 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2113 bnx2x_disable_close_the_gate(bp); 2152 bnx2x_disable_close_the_gate(bp);
2114 2153
2115 return 0; 2154 return 0;
@@ -3415,7 +3454,7 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3415 struct bnx2x *bp = netdev_priv(dev); 3454 struct bnx2x *bp = netdev_priv(dev);
3416 3455
3417 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 3456 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3418 pr_err("Handling parity error recovery. Try again later\n"); 3457 netdev_err(dev, "Handling parity error recovery. Try again later\n");
3419 return -EAGAIN; 3458 return -EAGAIN;
3420 } 3459 }
3421 3460
@@ -3542,7 +3581,7 @@ int bnx2x_resume(struct pci_dev *pdev)
3542 bp = netdev_priv(dev); 3581 bp = netdev_priv(dev);
3543 3582
3544 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 3583 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3545 pr_err("Handling parity error recovery. Try again later\n"); 3584 netdev_err(dev, "Handling parity error recovery. Try again later\n");
3546 return -EAGAIN; 3585 return -EAGAIN;
3547 } 3586 }
3548 3587
@@ -3558,8 +3597,6 @@ int bnx2x_resume(struct pci_dev *pdev)
3558 bnx2x_set_power_state(bp, PCI_D0); 3597 bnx2x_set_power_state(bp, PCI_D0);
3559 netif_device_attach(dev); 3598 netif_device_attach(dev);
3560 3599
3561 /* Since the chip was reset, clear the FW sequence number */
3562 bp->fw_seq = 0;
3563 rc = bnx2x_nic_load(bp, LOAD_OPEN); 3600 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3564 3601
3565 rtnl_unlock(); 3602 rtnl_unlock();
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index bf27c54ff2e0..c7c7bf1e573a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1,6 +1,6 @@
1/* bnx2x_cmn.h: Broadcom Everest network driver. 1/* bnx2x_cmn.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation 3 * Copyright (c) 2007-2012 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -379,8 +379,8 @@ void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
379 unsigned long ramrod_flags); 379 unsigned long ramrod_flags);
380 380
381/* Parity errors related */ 381/* Parity errors related */
382void bnx2x_inc_load_cnt(struct bnx2x *bp); 382void bnx2x_set_pf_load(struct bnx2x *bp);
383u32 bnx2x_dec_load_cnt(struct bnx2x *bp); 383bool bnx2x_clear_pf_load(struct bnx2x *bp);
384bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); 384bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print);
385bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); 385bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
386void bnx2x_set_reset_in_progress(struct bnx2x *bp); 386void bnx2x_set_reset_in_progress(struct bnx2x *bp);
@@ -984,10 +984,11 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
984 /* Function parameters */ 984 /* Function parameters */
985 start_params->mf_mode = bp->mf_mode; 985 start_params->mf_mode = bp->mf_mode;
986 start_params->sd_vlan_tag = bp->mf_ov; 986 start_params->sd_vlan_tag = bp->mf_ov;
987 if (CHIP_IS_E1x(bp)) 987
988 start_params->network_cos_mode = OVERRIDE_COS; 988 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
989 else
990 start_params->network_cos_mode = STATIC_COS; 989 start_params->network_cos_mode = STATIC_COS;
990 else /* CHIP_IS_E1X */
991 start_params->network_cos_mode = FW_WRR;
991 992
992 return bnx2x_func_state_change(bp, &func_params); 993 return bnx2x_func_state_change(bp, &func_params);
993} 994}
@@ -1539,7 +1540,7 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
1539{ 1540{
1540 if (SHMEM2_HAS(bp, drv_flags)) { 1541 if (SHMEM2_HAS(bp, drv_flags)) {
1541 u32 drv_flags; 1542 u32 drv_flags;
1542 bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS); 1543 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
1543 drv_flags = SHMEM2_RD(bp, drv_flags); 1544 drv_flags = SHMEM2_RD(bp, drv_flags);
1544 1545
1545 if (set) 1546 if (set)
@@ -1549,7 +1550,7 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
1549 1550
1550 SHMEM2_WR(bp, drv_flags, drv_flags); 1551 SHMEM2_WR(bp, drv_flags, drv_flags);
1551 DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags); 1552 DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
1552 bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS); 1553 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
1553 } 1554 }
1554} 1555}
1555 1556
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 5051cf3deb20..9a9bd3ab4793 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1,6 +1,6 @@
1/* bnx2x_dcb.c: Broadcom Everest network driver. 1/* bnx2x_dcb.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright 2009-2011 Broadcom Corporation 3 * Copyright 2009-2012 Broadcom Corporation
4 * 4 *
5 * Unless you and Broadcom execute a separate written software license 5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you 6 * agreement governing use of this software, this software is licensed to you
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 2ab9254e2d5e..06c7a0435948 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -1,6 +1,6 @@
1/* bnx2x_dcb.h: Broadcom Everest network driver. 1/* bnx2x_dcb.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright 2009-2011 Broadcom Corporation 3 * Copyright 2009-2012 Broadcom Corporation
4 * 4 *
5 * Unless you and Broadcom execute a separate written software license 5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you 6 * agreement governing use of this software, this software is licensed to you
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
index b983825d0ee9..3e4cff9b1ebe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -1,6 +1,6 @@
1/* bnx2x_dump.h: Broadcom Everest network driver. 1/* bnx2x_dump.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2011 Broadcom Corporation 3 * Copyright (c) 2012 Broadcom Corporation
4 * 4 *
5 * Unless you and Broadcom execute a separate written software license 5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you 6 * agreement governing use of this software, this software is licensed to you
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 31a8b38ab15e..137968d33d1e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1,6 +1,6 @@
1/* bnx2x_ethtool.c: Broadcom Everest network driver. 1/* bnx2x_ethtool.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation 3 * Copyright (c) 2007-2012 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -175,7 +175,11 @@ static const struct {
175 { STATS_OFFSET32(total_tpa_aggregated_frames_hi), 175 { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
176 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, 176 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
177 { STATS_OFFSET32(total_tpa_bytes_hi), 177 { STATS_OFFSET32(total_tpa_bytes_hi),
178 8, STATS_FLAGS_FUNC, "tpa_bytes"} 178 8, STATS_FLAGS_FUNC, "tpa_bytes"},
179 { STATS_OFFSET32(recoverable_error),
180 4, STATS_FLAGS_FUNC, "recoverable_errors" },
181 { STATS_OFFSET32(unrecoverable_error),
182 4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
179}; 183};
180 184
181#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) 185#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
@@ -882,11 +886,27 @@ static int bnx2x_get_eeprom_len(struct net_device *dev)
882 return bp->common.flash_size; 886 return bp->common.flash_size;
883} 887}
884 888
889/* Per pf misc lock must be aquired before the per port mcp lock. Otherwise, had
890 * we done things the other way around, if two pfs from the same port would
891 * attempt to access nvram at the same time, we could run into a scenario such
892 * as:
893 * pf A takes the port lock.
894 * pf B succeeds in taking the same lock since they are from the same port.
895 * pf A takes the per pf misc lock. Performs eeprom access.
896 * pf A finishes. Unlocks the per pf misc lock.
897 * Pf B takes the lock and proceeds to perform it's own access.
898 * pf A unlocks the per port lock, while pf B is still working (!).
899 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
900 * acess corrupted by pf B).*
901 */
885static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) 902static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
886{ 903{
887 int port = BP_PORT(bp); 904 int port = BP_PORT(bp);
888 int count, i; 905 int count, i;
889 u32 val = 0; 906 u32 val;
907
908 /* acquire HW lock: protect against other PFs in PF Direct Assignment */
909 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
890 910
891 /* adjust timeout for emulation/FPGA */ 911 /* adjust timeout for emulation/FPGA */
892 count = BNX2X_NVRAM_TIMEOUT_COUNT; 912 count = BNX2X_NVRAM_TIMEOUT_COUNT;
@@ -917,7 +937,7 @@ static int bnx2x_release_nvram_lock(struct bnx2x *bp)
917{ 937{
918 int port = BP_PORT(bp); 938 int port = BP_PORT(bp);
919 int count, i; 939 int count, i;
920 u32 val = 0; 940 u32 val;
921 941
922 /* adjust timeout for emulation/FPGA */ 942 /* adjust timeout for emulation/FPGA */
923 count = BNX2X_NVRAM_TIMEOUT_COUNT; 943 count = BNX2X_NVRAM_TIMEOUT_COUNT;
@@ -941,6 +961,8 @@ static int bnx2x_release_nvram_lock(struct bnx2x *bp)
941 return -EBUSY; 961 return -EBUSY;
942 } 962 }
943 963
964 /* release HW lock: protect against other PFs in PF Direct Assignment */
965 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
944 return 0; 966 return 0;
945} 967}
946 968
@@ -1370,7 +1392,8 @@ static int bnx2x_set_ringparam(struct net_device *dev,
1370 struct bnx2x *bp = netdev_priv(dev); 1392 struct bnx2x *bp = netdev_priv(dev);
1371 1393
1372 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 1394 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1373 pr_err("Handling parity error recovery. Try again later\n"); 1395 netdev_err(dev, "Handling parity error recovery. "
1396 "Try again later\n");
1374 return -EAGAIN; 1397 return -EAGAIN;
1375 } 1398 }
1376 1399
@@ -2024,7 +2047,8 @@ static void bnx2x_self_test(struct net_device *dev,
2024 struct bnx2x *bp = netdev_priv(dev); 2047 struct bnx2x *bp = netdev_priv(dev);
2025 u8 is_serdes; 2048 u8 is_serdes;
2026 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 2049 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2027 pr_err("Handling parity error recovery. Try again later\n"); 2050 netdev_err(bp->dev, "Handling parity error recovery. "
2051 "Try again later\n");
2028 etest->flags |= ETH_TEST_FL_FAILED; 2052 etest->flags |= ETH_TEST_FL_FAILED;
2029 return; 2053 return;
2030 } 2054 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 998652a1b858..e5c5982ae06d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -1,6 +1,6 @@
1/* bnx2x_fw_defs.h: Broadcom Everest network driver. 1/* bnx2x_fw_defs.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation 3 * Copyright (c) 2007-2012 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index f4a07fbaed05..4bed52ba300d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -1,6 +1,6 @@
1/* bnx2x_fw_file_hdr.h: FW binary file header structure. 1/* bnx2x_fw_file_hdr.h: FW binary file header structure.
2 * 2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation 3 * Copyright (c) 2007-2012 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 3e30c8642c26..78b77de728b0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1,6 +1,6 @@
1/* bnx2x_hsi.h: Broadcom Everest network driver. 1/* bnx2x_hsi.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation 3 * Copyright (c) 2007-2012 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 4d748e77d1ac..29f5c3cca31a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -1,7 +1,7 @@
1/* bnx2x_init.h: Broadcom Everest network driver. 1/* bnx2x_init.h: Broadcom Everest network driver.
2 * Structures and macroes needed during the initialization. 2 * Structures and macroes needed during the initialization.
3 * 3 *
4 * Copyright (c) 2007-2011 Broadcom Corporation 4 * Copyright (c) 2007-2012 Broadcom Corporation
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 7ec1724753ad..fe66d902dc62 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -2,7 +2,7 @@
2 * Static functions needed during the initialization. 2 * Static functions needed during the initialization.
3 * This file is "included" in bnx2x_main.c. 3 * This file is "included" in bnx2x_main.c.
4 * 4 *
5 * Copyright (c) 2007-2011 Broadcom Corporation 5 * Copyright (c) 2007-2012 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -69,12 +69,12 @@ static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len,
69{ 69{
70 if (bp->dmae_ready) 70 if (bp->dmae_ready)
71 bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); 71 bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
72 else if (wb) 72
73 /* 73 /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
74 * Wide bus registers with no dmae need to be written 74 else if (wb && CHIP_IS_E1(bp))
75 * using indirect write.
76 */
77 bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); 75 bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
76
77 /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
78 else 78 else
79 bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len); 79 bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
80} 80}
@@ -99,8 +99,14 @@ static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
99{ 99{
100 if (bp->dmae_ready) 100 if (bp->dmae_ready)
101 bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); 101 bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
102 else 102
103 /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
104 else if (CHIP_IS_E1(bp))
103 bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); 105 bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
106
107 /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
108 else
109 bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
104} 110}
105 111
106static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, 112static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr,
@@ -177,8 +183,14 @@ static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr,
177{ 183{
178 if (bp->dmae_ready) 184 if (bp->dmae_ready)
179 VIRT_WR_DMAE_LEN(bp, data, addr, len, 0); 185 VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
180 else 186
187 /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
188 else if (CHIP_IS_E1(bp))
181 bnx2x_init_ind_wr(bp, addr, data, len); 189 bnx2x_init_ind_wr(bp, addr, data, len);
190
191 /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
192 else
193 bnx2x_init_str_wr(bp, addr, data, len);
182} 194}
183 195
184static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, 196static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo,
@@ -840,25 +852,15 @@ static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
840 } 852 }
841} 853}
842 854
843static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count) 855static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count,
856 u32 base_reg, u32 reg)
844{ 857{
845 int i; 858 int i;
846 u32 wb_data[2]; 859 u32 wb_data[2] = {0, 0};
847
848 wb_data[0] = wb_data[1] = 0;
849
850 for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) { 860 for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
851 REG_WR(bp, QM_REG_BASEADDR + i*4, 861 REG_WR(bp, base_reg + i*4,
852 qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC)); 862 qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
853 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, 863 bnx2x_init_wr_wb(bp, reg + i*8, wb_data, 2);
854 wb_data, 2);
855
856 if (CHIP_IS_E1H(bp)) {
857 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4,
858 qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
859 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
860 wb_data, 2);
861 }
862 } 864 }
863} 865}
864 866
@@ -873,7 +875,12 @@ static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
873 case INITOP_INIT: 875 case INITOP_INIT:
874 /* set in the init-value array */ 876 /* set in the init-value array */
875 case INITOP_SET: 877 case INITOP_SET:
876 bnx2x_qm_set_ptr_table(bp, qm_cid_count); 878 bnx2x_qm_set_ptr_table(bp, qm_cid_count,
879 QM_REG_BASEADDR, QM_REG_PTRTBL);
880 if (CHIP_IS_E1H(bp))
881 bnx2x_qm_set_ptr_table(bp, qm_cid_count,
882 QM_REG_BASEADDR_EXT_A,
883 QM_REG_PTRTBL_EXT_A);
877 break; 884 break;
878 case INITOP_CLEAR: 885 case INITOP_CLEAR:
879 break; 886 break;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 2091e5dbbcdd..2102ad593c14 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -1,4 +1,4 @@
1/* Copyright 2008-2011 Broadcom Corporation 1/* Copyright 2008-2012 Broadcom Corporation
2 * 2 *
3 * Unless you and Broadcom execute a separate written software license 3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you 4 * agreement governing use of this software, this software is licensed to you
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index e02a68a7fb85..9cc7bafb3dfe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
1/* Copyright 2008-2011 Broadcom Corporation 1/* Copyright 2008-2012 Broadcom Corporation
2 * 2 *
3 * Unless you and Broadcom execute a separate written software license 3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you 4 * agreement governing use of this software, this software is licensed to you
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 1e3f978ee6da..ff19c3cf4409 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1,6 +1,6 @@
1/* bnx2x_main.c: Broadcom Everest network driver. 1/* bnx2x_main.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation 3 * Copyright (c) 2007-2012 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -468,7 +468,9 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
468 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 468 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
469 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); 469 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
470 470
471 if (!cnt) { 471 if (!cnt ||
472 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
473 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
472 BNX2X_ERR("DMAE timeout!\n"); 474 BNX2X_ERR("DMAE timeout!\n");
473 rc = DMAE_TIMEOUT; 475 rc = DMAE_TIMEOUT;
474 goto unlock; 476 goto unlock;
@@ -498,9 +500,13 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
498 if (!bp->dmae_ready) { 500 if (!bp->dmae_ready) {
499 u32 *data = bnx2x_sp(bp, wb_data[0]); 501 u32 *data = bnx2x_sp(bp, wb_data[0]);
500 502
501 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)" 503 DP(BNX2X_MSG_OFF,
502 " using indirect\n", dst_addr, len32); 504 "DMAE is not ready (dst_addr %08x len32 %d) using indirect\n",
503 bnx2x_init_ind_wr(bp, dst_addr, data, len32); 505 dst_addr, len32);
506 if (CHIP_IS_E1(bp))
507 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
508 else
509 bnx2x_init_str_wr(bp, dst_addr, data, len32);
504 return; 510 return;
505 } 511 }
506 512
@@ -528,10 +534,16 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
528 u32 *data = bnx2x_sp(bp, wb_data[0]); 534 u32 *data = bnx2x_sp(bp, wb_data[0]);
529 int i; 535 int i;
530 536
531 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)" 537 if (CHIP_IS_E1(bp)) {
532 " using indirect\n", src_addr, len32); 538 DP(BNX2X_MSG_OFF,
533 for (i = 0; i < len32; i++) 539 "DMAE is not ready (src_addr %08x len32 %d) using indirect\n",
534 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); 540 src_addr, len32);
541 for (i = 0; i < len32; i++)
542 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
543 } else
544 for (i = 0; i < len32; i++)
545 data[i] = REG_RD(bp, src_addr + i*4);
546
535 return; 547 return;
536 } 548 }
537 549
@@ -772,6 +784,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
772#endif 784#endif
773 785
774 bp->stats_state = STATS_STATE_DISABLED; 786 bp->stats_state = STATS_STATE_DISABLED;
787 bp->eth_stats.unrecoverable_error++;
775 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); 788 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
776 789
777 BNX2X_ERR("begin crash dump -----------------\n"); 790 BNX2X_ERR("begin crash dump -----------------\n");
@@ -1007,8 +1020,8 @@ void bnx2x_panic_dump(struct bnx2x *bp)
1007 * initialization. 1020 * initialization.
1008 */ 1021 */
1009#define FLR_WAIT_USEC 10000 /* 10 miliseconds */ 1022#define FLR_WAIT_USEC 10000 /* 10 miliseconds */
1010#define FLR_WAIT_INTERAVAL 50 /* usec */ 1023#define FLR_WAIT_INTERVAL 50 /* usec */
1011#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */ 1024#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
1012 1025
1013struct pbf_pN_buf_regs { 1026struct pbf_pN_buf_regs {
1014 int pN; 1027 int pN;
@@ -1041,7 +1054,7 @@ static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1041 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < 1054 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1042 (init_crd - crd_start))) { 1055 (init_crd - crd_start))) {
1043 if (cur_cnt--) { 1056 if (cur_cnt--) {
1044 udelay(FLR_WAIT_INTERAVAL); 1057 udelay(FLR_WAIT_INTERVAL);
1045 crd = REG_RD(bp, regs->crd); 1058 crd = REG_RD(bp, regs->crd);
1046 crd_freed = REG_RD(bp, regs->crd_freed); 1059 crd_freed = REG_RD(bp, regs->crd_freed);
1047 } else { 1060 } else {
@@ -1055,7 +1068,7 @@ static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1055 } 1068 }
1056 } 1069 }
1057 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", 1070 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1058 poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN); 1071 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1059} 1072}
1060 1073
1061static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, 1074static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
@@ -1073,7 +1086,7 @@ static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1073 1086
1074 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { 1087 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1075 if (cur_cnt--) { 1088 if (cur_cnt--) {
1076 udelay(FLR_WAIT_INTERAVAL); 1089 udelay(FLR_WAIT_INTERVAL);
1077 occup = REG_RD(bp, regs->lines_occup); 1090 occup = REG_RD(bp, regs->lines_occup);
1078 freed = REG_RD(bp, regs->lines_freed); 1091 freed = REG_RD(bp, regs->lines_freed);
1079 } else { 1092 } else {
@@ -1087,7 +1100,7 @@ static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1087 } 1100 }
1088 } 1101 }
1089 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", 1102 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1090 poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN); 1103 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1091} 1104}
1092 1105
1093static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, 1106static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
@@ -1097,7 +1110,7 @@ static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1097 u32 val; 1110 u32 val;
1098 1111
1099 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) 1112 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1100 udelay(FLR_WAIT_INTERAVAL); 1113 udelay(FLR_WAIT_INTERVAL);
1101 1114
1102 return val; 1115 return val;
1103} 1116}
@@ -1210,7 +1223,7 @@ static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
1210 int ret = 0; 1223 int ret = 0;
1211 1224
1212 if (REG_RD(bp, comp_addr)) { 1225 if (REG_RD(bp, comp_addr)) {
1213 BNX2X_ERR("Cleanup complete is not 0\n"); 1226 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1214 return 1; 1227 return 1;
1215 } 1228 }
1216 1229
@@ -1219,7 +1232,7 @@ static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
1219 op_gen.command |= OP_GEN_AGG_VECT(clnup_func); 1232 op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
1220 op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 1233 op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1221 1234
1222 DP(BNX2X_MSG_SP, "FW Final cleanup\n"); 1235 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1223 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command); 1236 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
1224 1237
1225 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { 1238 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
@@ -1334,6 +1347,7 @@ static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1334 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 1347 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1335 1348
1336 /* Poll HW usage counters */ 1349 /* Poll HW usage counters */
1350 DP(BNX2X_MSG_SP, "Polling usage counters\n");
1337 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) 1351 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1338 return -EBUSY; 1352 return -EBUSY;
1339 1353
@@ -3713,11 +3727,11 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3713 */ 3727 */
3714void bnx2x_set_reset_global(struct bnx2x *bp) 3728void bnx2x_set_reset_global(struct bnx2x *bp)
3715{ 3729{
3716 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3730 u32 val;
3717 3731 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3732 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3718 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); 3733 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
3719 barrier(); 3734 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3720 mmiowb();
3721} 3735}
3722 3736
3723/* 3737/*
@@ -3727,11 +3741,11 @@ void bnx2x_set_reset_global(struct bnx2x *bp)
3727 */ 3741 */
3728static inline void bnx2x_clear_reset_global(struct bnx2x *bp) 3742static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
3729{ 3743{
3730 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3744 u32 val;
3731 3745 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3746 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3732 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); 3747 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
3733 barrier(); 3748 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3734 mmiowb();
3735} 3749}
3736 3750
3737/* 3751/*
@@ -3754,15 +3768,17 @@ static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
3754 */ 3768 */
3755static inline void bnx2x_set_reset_done(struct bnx2x *bp) 3769static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3756{ 3770{
3757 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3771 u32 val;
3758 u32 bit = BP_PATH(bp) ? 3772 u32 bit = BP_PATH(bp) ?
3759 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 3773 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
3774 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3775 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3760 3776
3761 /* Clear the bit */ 3777 /* Clear the bit */
3762 val &= ~bit; 3778 val &= ~bit;
3763 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 3779 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
3764 barrier(); 3780
3765 mmiowb(); 3781 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3766} 3782}
3767 3783
3768/* 3784/*
@@ -3772,15 +3788,16 @@ static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3772 */ 3788 */
3773void bnx2x_set_reset_in_progress(struct bnx2x *bp) 3789void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3774{ 3790{
3775 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3791 u32 val;
3776 u32 bit = BP_PATH(bp) ? 3792 u32 bit = BP_PATH(bp) ?
3777 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 3793 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
3794 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3795 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3778 3796
3779 /* Set the bit */ 3797 /* Set the bit */
3780 val |= bit; 3798 val |= bit;
3781 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 3799 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
3782 barrier(); 3800 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3783 mmiowb();
3784} 3801}
3785 3802
3786/* 3803/*
@@ -3798,25 +3815,28 @@ bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
3798} 3815}
3799 3816
3800/* 3817/*
3801 * Increment the load counter for the current engine. 3818 * set pf load for the current pf.
3802 * 3819 *
3803 * should be run under rtnl lock 3820 * should be run under rtnl lock
3804 */ 3821 */
3805void bnx2x_inc_load_cnt(struct bnx2x *bp) 3822void bnx2x_set_pf_load(struct bnx2x *bp)
3806{ 3823{
3807 u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3824 u32 val1, val;
3808 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 3825 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
3809 BNX2X_PATH0_LOAD_CNT_MASK; 3826 BNX2X_PATH0_LOAD_CNT_MASK;
3810 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 3827 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
3811 BNX2X_PATH0_LOAD_CNT_SHIFT; 3828 BNX2X_PATH0_LOAD_CNT_SHIFT;
3812 3829
3830 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3831 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3832
3813 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); 3833 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3814 3834
3815 /* get the current counter value */ 3835 /* get the current counter value */
3816 val1 = (val & mask) >> shift; 3836 val1 = (val & mask) >> shift;
3817 3837
3818 /* increment... */ 3838 /* set bit of that PF */
3819 val1++; 3839 val1 |= (1 << bp->pf_num);
3820 3840
3821 /* clear the old value */ 3841 /* clear the old value */
3822 val &= ~mask; 3842 val &= ~mask;
@@ -3825,34 +3845,35 @@ void bnx2x_inc_load_cnt(struct bnx2x *bp)
3825 val |= ((val1 << shift) & mask); 3845 val |= ((val1 << shift) & mask);
3826 3846
3827 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 3847 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
3828 barrier(); 3848 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3829 mmiowb();
3830} 3849}
3831 3850
3832/** 3851/**
3833 * bnx2x_dec_load_cnt - decrement the load counter 3852 * bnx2x_clear_pf_load - clear pf load mark
3834 * 3853 *
3835 * @bp: driver handle 3854 * @bp: driver handle
3836 * 3855 *
3837 * Should be run under rtnl lock. 3856 * Should be run under rtnl lock.
3838 * Decrements the load counter for the current engine. Returns 3857 * Decrements the load counter for the current engine. Returns
3839 * the new counter value. 3858 * whether other functions are still loaded
3840 */ 3859 */
3841u32 bnx2x_dec_load_cnt(struct bnx2x *bp) 3860bool bnx2x_clear_pf_load(struct bnx2x *bp)
3842{ 3861{
3843 u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3862 u32 val1, val;
3844 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 3863 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
3845 BNX2X_PATH0_LOAD_CNT_MASK; 3864 BNX2X_PATH0_LOAD_CNT_MASK;
3846 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 3865 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
3847 BNX2X_PATH0_LOAD_CNT_SHIFT; 3866 BNX2X_PATH0_LOAD_CNT_SHIFT;
3848 3867
3868 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3869 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3849 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); 3870 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3850 3871
3851 /* get the current counter value */ 3872 /* get the current counter value */
3852 val1 = (val & mask) >> shift; 3873 val1 = (val & mask) >> shift;
3853 3874
3854 /* decrement... */ 3875 /* clear bit of that PF */
3855 val1--; 3876 val1 &= ~(1 << bp->pf_num);
3856 3877
3857 /* clear the old value */ 3878 /* clear the old value */
3858 val &= ~mask; 3879 val &= ~mask;
@@ -3861,18 +3882,16 @@ u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3861 val |= ((val1 << shift) & mask); 3882 val |= ((val1 << shift) & mask);
3862 3883
3863 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 3884 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
3864 barrier(); 3885 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3865 mmiowb(); 3886 return val1 != 0;
3866
3867 return val1;
3868} 3887}
3869 3888
3870/* 3889/*
3871 * Read the load counter for the current engine. 3890 * Read the load status for the current engine.
3872 * 3891 *
3873 * should be run under rtnl lock 3892 * should be run under rtnl lock
3874 */ 3893 */
3875static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine) 3894static inline bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
3876{ 3895{
3877 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : 3896 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
3878 BNX2X_PATH0_LOAD_CNT_MASK); 3897 BNX2X_PATH0_LOAD_CNT_MASK);
@@ -3884,23 +3903,23 @@ static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
3884 3903
3885 val = (val & mask) >> shift; 3904 val = (val & mask) >> shift;
3886 3905
3887 DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val); 3906 DP(NETIF_MSG_HW, "load mask for engine %d = 0x%x\n", engine, val);
3888 3907
3889 return val; 3908 return val != 0;
3890} 3909}
3891 3910
3892/* 3911/*
3893 * Reset the load counter for the current engine. 3912 * Reset the load status for the current engine.
3894 *
3895 * should be run under rtnl lock
3896 */ 3913 */
3897static inline void bnx2x_clear_load_cnt(struct bnx2x *bp) 3914static inline void bnx2x_clear_load_status(struct bnx2x *bp)
3898{ 3915{
3899 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3916 u32 val;
3900 u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 3917 u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
3901 BNX2X_PATH0_LOAD_CNT_MASK); 3918 BNX2X_PATH0_LOAD_CNT_MASK);
3902 3919 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3920 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3903 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask)); 3921 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
3922 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3904} 3923}
3905 3924
3906static inline void _print_next_block(int idx, const char *blk) 3925static inline void _print_next_block(int idx, const char *blk)
@@ -5423,6 +5442,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
5423 5442
5424 /* init shortcut */ 5443 /* init shortcut */
5425 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); 5444 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
5445
5426 /* Setup SB indicies */ 5446 /* Setup SB indicies */
5427 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 5447 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5428 5448
@@ -6687,13 +6707,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
6687 u16 cdu_ilt_start; 6707 u16 cdu_ilt_start;
6688 u32 addr, val; 6708 u32 addr, val;
6689 u32 main_mem_base, main_mem_size, main_mem_prty_clr; 6709 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
6690 int i, main_mem_width; 6710 int i, main_mem_width, rc;
6691 6711
6692 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); 6712 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
6693 6713
6694 /* FLR cleanup - hmmm */ 6714 /* FLR cleanup - hmmm */
6695 if (!CHIP_IS_E1x(bp)) 6715 if (!CHIP_IS_E1x(bp)) {
6696 bnx2x_pf_flr_clnup(bp); 6716 rc = bnx2x_pf_flr_clnup(bp);
6717 if (rc)
6718 return rc;
6719 }
6697 6720
6698 /* set MSI reconfigure capability */ 6721 /* set MSI reconfigure capability */
6699 if (bp->common.int_block == INT_BLOCK_HC) { 6722 if (bp->common.int_block == INT_BLOCK_HC) {
@@ -8458,13 +8481,38 @@ int bnx2x_leader_reset(struct bnx2x *bp)
8458{ 8481{
8459 int rc = 0; 8482 int rc = 0;
8460 bool global = bnx2x_reset_is_global(bp); 8483 bool global = bnx2x_reset_is_global(bp);
8484 u32 load_code;
8485
8486 /* if not going to reset MCP - load "fake" driver to reset HW while
8487 * driver is owner of the HW
8488 */
8489 if (!global && !BP_NOMCP(bp)) {
8490 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
8491 if (!load_code) {
8492 BNX2X_ERR("MCP response failure, aborting\n");
8493 rc = -EAGAIN;
8494 goto exit_leader_reset;
8495 }
8496 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
8497 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
8498 BNX2X_ERR("MCP unexpected resp, aborting\n");
8499 rc = -EAGAIN;
8500 goto exit_leader_reset2;
8501 }
8502 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
8503 if (!load_code) {
8504 BNX2X_ERR("MCP response failure, aborting\n");
8505 rc = -EAGAIN;
8506 goto exit_leader_reset2;
8507 }
8508 }
8461 8509
8462 /* Try to recover after the failure */ 8510 /* Try to recover after the failure */
8463 if (bnx2x_process_kill(bp, global)) { 8511 if (bnx2x_process_kill(bp, global)) {
8464 netdev_err(bp->dev, "Something bad had happen on engine %d! " 8512 netdev_err(bp->dev, "Something bad had happen on engine %d! "
8465 "Aii!\n", BP_PATH(bp)); 8513 "Aii!\n", BP_PATH(bp));
8466 rc = -EAGAIN; 8514 rc = -EAGAIN;
8467 goto exit_leader_reset; 8515 goto exit_leader_reset2;
8468 } 8516 }
8469 8517
8470 /* 8518 /*
@@ -8475,6 +8523,12 @@ int bnx2x_leader_reset(struct bnx2x *bp)
8475 if (global) 8523 if (global)
8476 bnx2x_clear_reset_global(bp); 8524 bnx2x_clear_reset_global(bp);
8477 8525
8526exit_leader_reset2:
8527 /* unload "fake driver" if it was loaded */
8528 if (!global && !BP_NOMCP(bp)) {
8529 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
8530 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
8531 }
8478exit_leader_reset: 8532exit_leader_reset:
8479 bp->is_leader = 0; 8533 bp->is_leader = 0;
8480 bnx2x_release_leader_lock(bp); 8534 bnx2x_release_leader_lock(bp);
@@ -8511,13 +8565,16 @@ static inline void bnx2x_recovery_failed(struct bnx2x *bp)
8511static void bnx2x_parity_recover(struct bnx2x *bp) 8565static void bnx2x_parity_recover(struct bnx2x *bp)
8512{ 8566{
8513 bool global = false; 8567 bool global = false;
8568 u32 error_recovered, error_unrecovered;
8569 bool is_parity;
8514 8570
8515 DP(NETIF_MSG_HW, "Handling parity\n"); 8571 DP(NETIF_MSG_HW, "Handling parity\n");
8516 while (1) { 8572 while (1) {
8517 switch (bp->recovery_state) { 8573 switch (bp->recovery_state) {
8518 case BNX2X_RECOVERY_INIT: 8574 case BNX2X_RECOVERY_INIT:
8519 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); 8575 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8520 bnx2x_chk_parity_attn(bp, &global, false); 8576 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
8577 WARN_ON(!is_parity);
8521 8578
8522 /* Try to get a LEADER_LOCK HW lock */ 8579 /* Try to get a LEADER_LOCK HW lock */
8523 if (bnx2x_trylock_leader_lock(bp)) { 8580 if (bnx2x_trylock_leader_lock(bp)) {
@@ -8541,15 +8598,6 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
8541 8598
8542 bp->recovery_state = BNX2X_RECOVERY_WAIT; 8599 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8543 8600
8544 /*
8545 * Reset MCP command sequence number and MCP mail box
8546 * sequence as we are going to reset the MCP.
8547 */
8548 if (global) {
8549 bp->fw_seq = 0;
8550 bp->fw_drv_pulse_wr_seq = 0;
8551 }
8552
8553 /* Ensure "is_leader", MCP command sequence and 8601 /* Ensure "is_leader", MCP command sequence and
8554 * "recovery_state" update values are seen on other 8602 * "recovery_state" update values are seen on other
8555 * CPUs. 8603 * CPUs.
@@ -8561,10 +8609,10 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
8561 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); 8609 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8562 if (bp->is_leader) { 8610 if (bp->is_leader) {
8563 int other_engine = BP_PATH(bp) ? 0 : 1; 8611 int other_engine = BP_PATH(bp) ? 0 : 1;
8564 u32 other_load_counter = 8612 bool other_load_status =
8565 bnx2x_get_load_cnt(bp, other_engine); 8613 bnx2x_get_load_status(bp, other_engine);
8566 u32 load_counter = 8614 bool load_status =
8567 bnx2x_get_load_cnt(bp, BP_PATH(bp)); 8615 bnx2x_get_load_status(bp, BP_PATH(bp));
8568 global = bnx2x_reset_is_global(bp); 8616 global = bnx2x_reset_is_global(bp);
8569 8617
8570 /* 8618 /*
@@ -8575,8 +8623,8 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
8575 * the the gates will remain closed for that 8623 * the the gates will remain closed for that
8576 * engine. 8624 * engine.
8577 */ 8625 */
8578 if (load_counter || 8626 if (load_status ||
8579 (global && other_load_counter)) { 8627 (global && other_load_status)) {
8580 /* Wait until all other functions get 8628 /* Wait until all other functions get
8581 * down. 8629 * down.
8582 */ 8630 */
@@ -8633,13 +8681,34 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
8633 return; 8681 return;
8634 } 8682 }
8635 8683
8636 if (bnx2x_nic_load(bp, LOAD_NORMAL)) 8684 error_recovered =
8637 bnx2x_recovery_failed(bp); 8685 bp->eth_stats.recoverable_error;
8638 else { 8686 error_unrecovered =
8687 bp->eth_stats.unrecoverable_error;
8688 bp->recovery_state =
8689 BNX2X_RECOVERY_NIC_LOADING;
8690 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
8691 error_unrecovered++;
8692 netdev_err(bp->dev,
8693 "Recovery failed. "
8694 "Power cycle "
8695 "needed\n");
8696 /* Disconnect this device */
8697 netif_device_detach(bp->dev);
8698 /* Shut down the power */
8699 bnx2x_set_power_state(
8700 bp, PCI_D3hot);
8701 smp_mb();
8702 } else {
8639 bp->recovery_state = 8703 bp->recovery_state =
8640 BNX2X_RECOVERY_DONE; 8704 BNX2X_RECOVERY_DONE;
8705 error_recovered++;
8641 smp_mb(); 8706 smp_mb();
8642 } 8707 }
8708 bp->eth_stats.recoverable_error =
8709 error_recovered;
8710 bp->eth_stats.unrecoverable_error =
8711 error_unrecovered;
8643 8712
8644 return; 8713 return;
8645 } 8714 }
@@ -8795,11 +8864,13 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8795{ 8864{
8796 u32 val; 8865 u32 val;
8797 8866
8798 /* Check if there is any driver already loaded */ 8867 /* possibly another driver is trying to reset the chip */
8799 val = REG_RD(bp, MISC_REG_UNPREPARED); 8868 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
8800 if (val == 0x1) { 8869
8870 /* check if doorbell queue is reset */
8871 if (REG_RD(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET)
8872 & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
8801 8873
8802 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
8803 /* 8874 /*
8804 * Check if it is the UNDI driver 8875 * Check if it is the UNDI driver
8805 * UNDI driver initializes CID offset for normal bell to 0x7 8876 * UNDI driver initializes CID offset for normal bell to 0x7
@@ -8887,14 +8958,11 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8887 8958
8888 /* restore our func and fw_seq */ 8959 /* restore our func and fw_seq */
8889 bp->pf_num = orig_pf_num; 8960 bp->pf_num = orig_pf_num;
8890 bp->fw_seq =
8891 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
8892 DRV_MSG_SEQ_NUMBER_MASK);
8893 } 8961 }
8894
8895 /* now it's safe to release the lock */
8896 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
8897 } 8962 }
8963
8964 /* now it's safe to release the lock */
8965 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
8898} 8966}
8899 8967
8900static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) 8968static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
@@ -9915,16 +9983,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9915 9983
9916 bnx2x_get_cnic_info(bp); 9984 bnx2x_get_cnic_info(bp);
9917 9985
9918 /* Get current FW pulse sequence */
9919 if (!BP_NOMCP(bp)) {
9920 int mb_idx = BP_FW_MB_IDX(bp);
9921
9922 bp->fw_drv_pulse_wr_seq =
9923 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
9924 DRV_PULSE_SEQ_MASK);
9925 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
9926 }
9927
9928 return rc; 9986 return rc;
9929} 9987}
9930 9988
@@ -10094,14 +10152,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10094 if (!BP_NOMCP(bp)) 10152 if (!BP_NOMCP(bp))
10095 bnx2x_undi_unload(bp); 10153 bnx2x_undi_unload(bp);
10096 10154
10097 /* init fw_seq after undi_unload! */
10098 if (!BP_NOMCP(bp)) {
10099 bp->fw_seq =
10100 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
10101 DRV_MSG_SEQ_NUMBER_MASK);
10102 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10103 }
10104
10105 if (CHIP_REV_IS_FPGA(bp)) 10155 if (CHIP_REV_IS_FPGA(bp))
10106 dev_err(&bp->pdev->dev, "FPGA detected\n"); 10156 dev_err(&bp->pdev->dev, "FPGA detected\n");
10107 10157
@@ -10183,14 +10233,14 @@ static int bnx2x_open(struct net_device *dev)
10183 struct bnx2x *bp = netdev_priv(dev); 10233 struct bnx2x *bp = netdev_priv(dev);
10184 bool global = false; 10234 bool global = false;
10185 int other_engine = BP_PATH(bp) ? 0 : 1; 10235 int other_engine = BP_PATH(bp) ? 0 : 1;
10186 u32 other_load_counter, load_counter; 10236 bool other_load_status, load_status;
10187 10237
10188 netif_carrier_off(dev); 10238 netif_carrier_off(dev);
10189 10239
10190 bnx2x_set_power_state(bp, PCI_D0); 10240 bnx2x_set_power_state(bp, PCI_D0);
10191 10241
10192 other_load_counter = bnx2x_get_load_cnt(bp, other_engine); 10242 other_load_status = bnx2x_get_load_status(bp, other_engine);
10193 load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp)); 10243 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
10194 10244
10195 /* 10245 /*
10196 * If parity had happen during the unload, then attentions 10246 * If parity had happen during the unload, then attentions
@@ -10216,8 +10266,8 @@ static int bnx2x_open(struct net_device *dev)
10216 * global blocks only the first in the chip should try 10266 * global blocks only the first in the chip should try
10217 * to recover. 10267 * to recover.
10218 */ 10268 */
10219 if ((!load_counter && 10269 if ((!load_status &&
10220 (!global || !other_load_counter)) && 10270 (!global || !other_load_status)) &&
10221 bnx2x_trylock_leader_lock(bp) && 10271 bnx2x_trylock_leader_lock(bp) &&
10222 !bnx2x_leader_reset(bp)) { 10272 !bnx2x_leader_reset(bp)) {
10223 netdev_info(bp->dev, "Recovered in open\n"); 10273 netdev_info(bp->dev, "Recovered in open\n");
@@ -10536,6 +10586,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10536{ 10586{
10537 struct bnx2x *bp; 10587 struct bnx2x *bp;
10538 int rc; 10588 int rc;
10589 u32 pci_cfg_dword;
10539 bool chip_is_e1x = (board_type == BCM57710 || 10590 bool chip_is_e1x = (board_type == BCM57710 ||
10540 board_type == BCM57711 || 10591 board_type == BCM57711 ||
10541 board_type == BCM57711E); 10592 board_type == BCM57711E);
@@ -10546,7 +10597,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10546 bp->dev = dev; 10597 bp->dev = dev;
10547 bp->pdev = pdev; 10598 bp->pdev = pdev;
10548 bp->flags = 0; 10599 bp->flags = 0;
10549 bp->pf_num = PCI_FUNC(pdev->devfn);
10550 10600
10551 rc = pci_enable_device(pdev); 10601 rc = pci_enable_device(pdev);
10552 if (rc) { 10602 if (rc) {
@@ -10613,6 +10663,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10613 goto err_out_release; 10663 goto err_out_release;
10614 } 10664 }
10615 10665
10666 /* In E1/E1H use pci device function given by kernel.
10667 * In E2/E3 read physical function from ME register since these chips
10668 * support Physical Device Assignment where kernel BDF maybe arbitrary
10669 * (depending on hypervisor).
10670 */
10671 if (chip_is_e1x)
10672 bp->pf_num = PCI_FUNC(pdev->devfn);
10673 else {/* chip is E2/3*/
10674 pci_read_config_dword(bp->pdev,
10675 PCICFG_ME_REGISTER, &pci_cfg_dword);
10676 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
10677 ME_REG_ABS_PF_NUM_SHIFT);
10678 }
10679 DP(BNX2X_MSG_SP, "me reg PF num: %d\n", bp->pf_num);
10680
10616 bnx2x_set_power_state(bp, PCI_D0); 10681 bnx2x_set_power_state(bp, PCI_D0);
10617 10682
10618 /* clean indirect addresses */ 10683 /* clean indirect addresses */
@@ -10642,7 +10707,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10642 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 10707 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
10643 10708
10644 /* Reset the load counter */ 10709 /* Reset the load counter */
10645 bnx2x_clear_load_cnt(bp); 10710 bnx2x_clear_load_status(bp);
10646 10711
10647 dev->watchdog_timeo = TX_TIMEOUT; 10712 dev->watchdog_timeo = TX_TIMEOUT;
10648 10713
@@ -10829,10 +10894,8 @@ static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
10829do { \ 10894do { \
10830 u32 len = be32_to_cpu(fw_hdr->arr.len); \ 10895 u32 len = be32_to_cpu(fw_hdr->arr.len); \
10831 bp->arr = kmalloc(len, GFP_KERNEL); \ 10896 bp->arr = kmalloc(len, GFP_KERNEL); \
10832 if (!bp->arr) { \ 10897 if (!bp->arr) \
10833 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
10834 goto lbl; \ 10898 goto lbl; \
10835 } \
10836 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ 10899 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
10837 (u8 *)bp->arr, len); \ 10900 (u8 *)bp->arr, len); \
10838} while (0) 10901} while (0)
@@ -11070,10 +11133,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11070 11133
11071 /* dev zeroed in init_etherdev */ 11134 /* dev zeroed in init_etherdev */
11072 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); 11135 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
11073 if (!dev) { 11136 if (!dev)
11074 dev_err(&pdev->dev, "Cannot allocate net device\n");
11075 return -ENOMEM; 11137 return -ENOMEM;
11076 }
11077 11138
11078 bp = netdev_priv(dev); 11139 bp = netdev_priv(dev);
11079 11140
@@ -11295,13 +11356,6 @@ static void bnx2x_eeh_recover(struct bnx2x *bp)
11295 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 11356 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11296 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 11357 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11297 BNX2X_ERR("BAD MCP validity signature\n"); 11358 BNX2X_ERR("BAD MCP validity signature\n");
11298
11299 if (!BP_NOMCP(bp)) {
11300 bp->fw_seq =
11301 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
11302 DRV_MSG_SEQ_NUMBER_MASK);
11303 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11304 }
11305} 11359}
11306 11360
11307/** 11361/**
@@ -11557,6 +11611,13 @@ static int bnx2x_cnic_sp_queue(struct net_device *dev,
11557 return -EIO; 11611 return -EIO;
11558#endif 11612#endif
11559 11613
11614 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
11615 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
11616 netdev_err(dev, "Handling parity error recovery. Try again "
11617 "later\n");
11618 return -EAGAIN;
11619 }
11620
11560 spin_lock_bh(&bp->spq_lock); 11621 spin_lock_bh(&bp->spq_lock);
11561 11622
11562 for (i = 0; i < count; i++) { 11623 for (i = 0; i < count; i++) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index dddbcf6e154e..c95d9dcac6df 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1,6 +1,6 @@
1/* bnx2x_reg.h: Broadcom Everest network driver. 1/* bnx2x_reg.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation 3 * Copyright (c) 2007-2012 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -5731,6 +5731,7 @@
5731#define MISC_REGISTERS_GPIO_PORT_SHIFT 4 5731#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
5732#define MISC_REGISTERS_GPIO_SET_POS 8 5732#define MISC_REGISTERS_GPIO_SET_POS 8
5733#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 5733#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
5734#define MISC_REGISTERS_RESET_REG_1_RST_DORQ (0x1<<19)
5734#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29) 5735#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
5735#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7) 5736#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
5736#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26) 5737#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
@@ -5783,15 +5784,17 @@
5783#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1 5784#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1
5784#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 5785#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
5785#define MISC_REGISTERS_SPIO_SET_POS 8 5786#define MISC_REGISTERS_SPIO_SET_POS 8
5786#define HW_LOCK_DRV_FLAGS 10
5787#define HW_LOCK_MAX_RESOURCE_VALUE 31 5787#define HW_LOCK_MAX_RESOURCE_VALUE 31
5788#define HW_LOCK_RESOURCE_DRV_FLAGS 10
5788#define HW_LOCK_RESOURCE_GPIO 1 5789#define HW_LOCK_RESOURCE_GPIO 1
5789#define HW_LOCK_RESOURCE_MDIO 0 5790#define HW_LOCK_RESOURCE_MDIO 0
5791#define HW_LOCK_RESOURCE_NVRAM 12
5790#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 5792#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
5791#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 5793#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
5792#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 5794#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
5793#define HW_LOCK_RESOURCE_SPIO 2 5795#define HW_LOCK_RESOURCE_RECOVERY_REG 11
5794#define HW_LOCK_RESOURCE_RESET 5 5796#define HW_LOCK_RESOURCE_RESET 5
5797#define HW_LOCK_RESOURCE_SPIO 2
5795#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) 5798#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
5796#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) 5799#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
5797#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) 5800#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
@@ -6023,7 +6026,8 @@
6023#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23) 6026#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23)
6024#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24) 6027#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24)
6025#define PCICFG_GRC_ADDRESS 0x78 6028#define PCICFG_GRC_ADDRESS 0x78
6026#define PCICFG_GRC_DATA 0x80 6029#define PCICFG_GRC_DATA 0x80
6030#define PCICFG_ME_REGISTER 0x98
6027#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0 6031#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0
6028#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16) 6032#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16)
6029#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27) 6033#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index cb6339c35571..ac15f747f8da 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -1,6 +1,6 @@
1/* bnx2x_sp.c: Broadcom Everest network driver. 1/* bnx2x_sp.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright 2011 Broadcom Corporation 3 * Copyright (c) 2011-2012 Broadcom Corporation
4 * 4 *
5 * Unless you and Broadcom execute a separate written software license 5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you 6 * agreement governing use of this software, this software is licensed to you
@@ -1836,6 +1836,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1836 rc = exeq->remove(bp, exeq->owner, exeq_pos); 1836 rc = exeq->remove(bp, exeq->owner, exeq_pos);
1837 if (rc) { 1837 if (rc) {
1838 BNX2X_ERR("Failed to remove command\n"); 1838 BNX2X_ERR("Failed to remove command\n");
1839 spin_unlock_bh(&exeq->lock);
1839 return rc; 1840 return rc;
1840 } 1841 }
1841 list_del(&exeq_pos->link); 1842 list_del(&exeq_pos->link);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 66da39f0c84a..71e039b618a7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -1,6 +1,6 @@
1/* bnx2x_sp.h: Broadcom Everest network driver. 1/* bnx2x_sp.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright 2011 Broadcom Corporation 3 * Copyright (c) 2011-2012 Broadcom Corporation
4 * 4 *
5 * Unless you and Broadcom execute a separate written software license 5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you 6 * agreement governing use of this software, this software is licensed to you
@@ -803,10 +803,10 @@ enum bnx2x_q_type {
803}; 803};
804 804
805#define BNX2X_PRIMARY_CID_INDEX 0 805#define BNX2X_PRIMARY_CID_INDEX 0
806#define BNX2X_MULTI_TX_COS_E1X 1 806#define BNX2X_MULTI_TX_COS_E1X 3 /* QM only */
807#define BNX2X_MULTI_TX_COS_E2_E3A0 2 807#define BNX2X_MULTI_TX_COS_E2_E3A0 2
808#define BNX2X_MULTI_TX_COS_E3B0 3 808#define BNX2X_MULTI_TX_COS_E3B0 3
809#define BNX2X_MULTI_TX_COS BNX2X_MULTI_TX_COS_E3B0 809#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */
810 810
811 811
812struct bnx2x_queue_init_params { 812struct bnx2x_queue_init_params {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index bc0121ac291e..7b9b304b9107 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1,6 +1,6 @@
1/* bnx2x_stats.c: Broadcom Everest network driver. 1/* bnx2x_stats.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation 3 * Copyright (c) 2007-2012 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 683deb053109..7e979686cd68 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -1,6 +1,6 @@
1/* bnx2x_stats.h: Broadcom Everest network driver. 1/* bnx2x_stats.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation 3 * Copyright (c) 2007-2012 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -199,6 +199,10 @@ struct bnx2x_eth_stats {
199 u32 pfc_frames_received_lo; 199 u32 pfc_frames_received_lo;
200 u32 pfc_frames_sent_hi; 200 u32 pfc_frames_sent_hi;
201 u32 pfc_frames_sent_lo; 201 u32 pfc_frames_sent_lo;
202
203 /* Recovery */
204 u32 recoverable_error;
205 u32 unrecoverable_error;
202}; 206};
203 207
204 208
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 084904ceaa30..49e7a258da8a 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2623,8 +2623,6 @@ static int __devinit sbmac_probe(struct platform_device *pldev)
2623 */ 2623 */
2624 dev = alloc_etherdev(sizeof(struct sbmac_softc)); 2624 dev = alloc_etherdev(sizeof(struct sbmac_softc));
2625 if (!dev) { 2625 if (!dev) {
2626 printk(KERN_ERR "%s: unable to allocate etherdev\n",
2627 dev_name(&pldev->dev));
2628 err = -ENOMEM; 2626 err = -ENOMEM;
2629 goto out_unmap; 2627 goto out_unmap;
2630 } 2628 }
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a1f2e0fed78b..3bf3adca3695 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -15471,7 +15471,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
15471 15471
15472 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); 15472 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15473 if (!dev) { 15473 if (!dev) {
15474 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15475 err = -ENOMEM; 15474 err = -ENOMEM;
15476 goto err_out_power_down; 15475 goto err_out_power_down;
15477 } 15476 }
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 871c6309334c..48f877337390 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -297,6 +297,7 @@ enum bfa_mode {
297#define BFA_FLASH_PART_ENTRY_SIZE 32 /* partition entry size */ 297#define BFA_FLASH_PART_ENTRY_SIZE 32 /* partition entry size */
298#define BFA_FLASH_PART_MAX 32 /* maximal # of partitions */ 298#define BFA_FLASH_PART_MAX 32 /* maximal # of partitions */
299#define BFA_TOTAL_FLASH_SIZE 0x400000 299#define BFA_TOTAL_FLASH_SIZE 0x400000
300#define BFA_FLASH_PART_FWIMG 2
300#define BFA_FLASH_PART_MFG 7 301#define BFA_FLASH_PART_MFG 7
301 302
302/* 303/*
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index be7d91e4b785..ff78f770dec9 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3284,7 +3284,6 @@ bnad_pci_probe(struct pci_dev *pdev,
3284 */ 3284 */
3285 netdev = alloc_etherdev(sizeof(struct bnad)); 3285 netdev = alloc_etherdev(sizeof(struct bnad));
3286 if (!netdev) { 3286 if (!netdev) {
3287 dev_err(&pdev->dev, "netdev allocation failed\n");
3288 err = -ENOMEM; 3287 err = -ENOMEM;
3289 return err; 3288 return err;
3290 } 3289 }
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 592ad3929f53..c9fdceb135f3 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -62,8 +62,6 @@ bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
62 if (!fw_debug->debug_buffer) { 62 if (!fw_debug->debug_buffer) {
63 kfree(fw_debug); 63 kfree(fw_debug);
64 fw_debug = NULL; 64 fw_debug = NULL;
65 pr_warn("bna %s: Failed to allocate fwtrc buffer\n",
66 pci_name(bnad->pcidev));
67 return -ENOMEM; 65 return -ENOMEM;
68 } 66 }
69 67
@@ -105,8 +103,6 @@ bnad_debugfs_open_fwsave(struct inode *inode, struct file *file)
105 if (!fw_debug->debug_buffer) { 103 if (!fw_debug->debug_buffer) {
106 kfree(fw_debug); 104 kfree(fw_debug);
107 fw_debug = NULL; 105 fw_debug = NULL;
108 pr_warn("bna %s: Failed to allocate fwsave buffer\n",
109 pci_name(bnad->pcidev));
110 return -ENOMEM; 106 return -ENOMEM;
111 } 107 }
112 108
@@ -208,8 +204,6 @@ bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file)
208 if (!drv_info->debug_buffer) { 204 if (!drv_info->debug_buffer) {
209 kfree(drv_info); 205 kfree(drv_info);
210 drv_info = NULL; 206 drv_info = NULL;
211 pr_warn("bna %s: Failed to allocate drv info buffer\n",
212 pci_name(bnad->pcidev));
213 return -ENOMEM; 207 return -ENOMEM;
214 } 208 }
215 209
@@ -348,11 +342,8 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
348 342
349 /* Allocate memory to store the user space buf */ 343 /* Allocate memory to store the user space buf */
350 kern_buf = kzalloc(nbytes, GFP_KERNEL); 344 kern_buf = kzalloc(nbytes, GFP_KERNEL);
351 if (!kern_buf) { 345 if (!kern_buf)
352 pr_warn("bna %s: Failed to allocate user buffer\n",
353 pci_name(bnad->pcidev));
354 return -ENOMEM; 346 return -ENOMEM;
355 }
356 347
357 if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) { 348 if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
358 kfree(kern_buf); 349 kfree(kern_buf);
@@ -373,11 +364,8 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
373 bnad->reglen = 0; 364 bnad->reglen = 0;
374 365
375 bnad->regdata = kzalloc(len << 2, GFP_KERNEL); 366 bnad->regdata = kzalloc(len << 2, GFP_KERNEL);
376 if (!bnad->regdata) { 367 if (!bnad->regdata)
377 pr_warn("bna %s: Failed to allocate regrd buffer\n",
378 pci_name(bnad->pcidev));
379 return -ENOMEM; 368 return -ENOMEM;
380 }
381 369
382 bnad->reglen = len << 2; 370 bnad->reglen = len << 2;
383 rb = bfa_ioc_bar0(ioc); 371 rb = bfa_ioc_bar0(ioc);
@@ -421,11 +409,8 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
421 409
422 /* Allocate memory to store the user space buf */ 410 /* Allocate memory to store the user space buf */
423 kern_buf = kzalloc(nbytes, GFP_KERNEL); 411 kern_buf = kzalloc(nbytes, GFP_KERNEL);
424 if (!kern_buf) { 412 if (!kern_buf)
425 pr_warn("bna %s: Failed to allocate user buffer\n",
426 pci_name(bnad->pcidev));
427 return -ENOMEM; 413 return -ENOMEM;
428 }
429 414
430 if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) { 415 if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
431 kfree(kern_buf); 416 kfree(kern_buf);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 9b44ec8096ba..a27c601af3d1 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -1072,6 +1072,47 @@ done:
1072 return ret; 1072 return ret;
1073} 1073}
1074 1074
1075static int
1076bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
1077{
1078 struct bnad *bnad = netdev_priv(netdev);
1079 struct bnad_iocmd_comp fcomp;
1080 const struct firmware *fw;
1081 int ret = 0;
1082
1083 ret = request_firmware(&fw, eflash->data, &bnad->pcidev->dev);
1084 if (ret) {
1085 pr_err("BNA: Can't locate firmware %s\n", eflash->data);
1086 goto out;
1087 }
1088
1089 fcomp.bnad = bnad;
1090 fcomp.comp_status = 0;
1091
1092 init_completion(&fcomp.comp);
1093 spin_lock_irq(&bnad->bna_lock);
1094 ret = bfa_nw_flash_update_part(&bnad->bna.flash, BFA_FLASH_PART_FWIMG,
1095 bnad->id, (u8 *)fw->data, fw->size, 0,
1096 bnad_cb_completion, &fcomp);
1097 if (ret != BFA_STATUS_OK) {
1098 pr_warn("BNA: Flash update failed with err: %d\n", ret);
1099 ret = -EIO;
1100 spin_unlock_irq(&bnad->bna_lock);
1101 goto out;
1102 }
1103
1104 spin_unlock_irq(&bnad->bna_lock);
1105 wait_for_completion(&fcomp.comp);
1106 if (fcomp.comp_status != BFA_STATUS_OK) {
1107 ret = -EIO;
1108 pr_warn("BNA: Firmware image update to flash failed with: %d\n",
1109 fcomp.comp_status);
1110 }
1111out:
1112 release_firmware(fw);
1113 return ret;
1114}
1115
1075static const struct ethtool_ops bnad_ethtool_ops = { 1116static const struct ethtool_ops bnad_ethtool_ops = {
1076 .get_settings = bnad_get_settings, 1117 .get_settings = bnad_get_settings,
1077 .set_settings = bnad_set_settings, 1118 .set_settings = bnad_set_settings,
@@ -1090,6 +1131,7 @@ static const struct ethtool_ops bnad_ethtool_ops = {
1090 .get_eeprom_len = bnad_get_eeprom_len, 1131 .get_eeprom_len = bnad_get_eeprom_len,
1091 .get_eeprom = bnad_get_eeprom, 1132 .get_eeprom = bnad_get_eeprom,
1092 .set_eeprom = bnad_set_eeprom, 1133 .set_eeprom = bnad_set_eeprom,
1134 .flash_device = bnad_flash_device,
1093}; 1135};
1094 1136
1095void 1137void
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 23200680d4c1..3c315f46859b 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1308,10 +1308,8 @@ static int __init macb_probe(struct platform_device *pdev)
1308 1308
1309 err = -ENOMEM; 1309 err = -ENOMEM;
1310 dev = alloc_etherdev(sizeof(*bp)); 1310 dev = alloc_etherdev(sizeof(*bp));
1311 if (!dev) { 1311 if (!dev)
1312 dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
1313 goto err_out; 1312 goto err_out;
1314 }
1315 1313
1316 SET_NETDEV_DEV(dev, &pdev->dev); 1314 SET_NETDEV_DEV(dev, &pdev->dev);
1317 1315
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index e53365a71484..9045a451d4a9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2596,8 +2596,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2596 netdev = alloc_etherdev_mq(sizeof(struct port_info), 2596 netdev = alloc_etherdev_mq(sizeof(struct port_info),
2597 MAX_PORT_QSETS); 2597 MAX_PORT_QSETS);
2598 if (netdev == NULL) { 2598 if (netdev == NULL) {
2599 dev_err(&pdev->dev, "cannot allocate netdev for"
2600 " port %d\n", port_id);
2601 t4vf_free_vi(adapter, viid); 2599 t4vf_free_vi(adapter, viid);
2602 err = -ENOMEM; 2600 err = -ENOMEM;
2603 goto err_free_dev; 2601 goto err_free_dev;
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index ee93a2087fe6..54fd9c314b38 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -32,13 +32,13 @@
32 32
33#define DRV_NAME "enic" 33#define DRV_NAME "enic"
34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
35#define DRV_VERSION "2.1.1.31" 35#define DRV_VERSION "2.1.1.33"
36#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
39 39
40#define ENIC_WQ_MAX 1 40#define ENIC_WQ_MAX 1
41#define ENIC_RQ_MAX 1 41#define ENIC_RQ_MAX 8
42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) 42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) 43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
44 44
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index ab3f67f980d8..2838891a94ee 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2280,10 +2280,8 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2280 */ 2280 */
2281 2281
2282 netdev = alloc_etherdev(sizeof(struct enic)); 2282 netdev = alloc_etherdev(sizeof(struct enic));
2283 if (!netdev) { 2283 if (!netdev)
2284 pr_err("Etherdev alloc failed, aborting\n");
2285 return -ENOMEM; 2284 return -ENOMEM;
2286 }
2287 2285
2288 pci_set_drvdata(pdev, netdev); 2286 pci_set_drvdata(pdev, netdev);
2289 2287
@@ -2388,7 +2386,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2388 /* Allocate structure for port profiles */ 2386 /* Allocate structure for port profiles */
2389 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); 2387 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
2390 if (!enic->pp) { 2388 if (!enic->pp) {
2391 pr_err("port profile alloc failed, aborting\n");
2392 err = -ENOMEM; 2389 err = -ENOMEM;
2393 goto err_out_disable_sriov_pp; 2390 goto err_out_disable_sriov_pp;
2394 } 2391 }
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 31e7f9bc2067..298ad6f865be 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -439,11 +439,12 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
439 a1 = sizeof(struct vnic_devcmd_fw_info); 439 a1 = sizeof(struct vnic_devcmd_fw_info);
440 440
441 /* only get fw_info once and cache it */ 441 /* only get fw_info once and cache it */
442 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait); 442 if (vnic_dev_capable(vdev, CMD_MCPU_FW_INFO))
443 if (err == ERR_ECMDUNKNOWN) { 443 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO,
444 &a0, &a1, wait);
445 else
444 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD, 446 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
445 &a0, &a1, wait); 447 &a0, &a1, wait);
446 }
447 } 448 }
448 449
449 *fw_info = vdev->fw_info; 450 *fw_info = vdev->fw_info;
@@ -504,13 +505,11 @@ int vnic_dev_enable_wait(struct vnic_dev *vdev)
504{ 505{
505 u64 a0 = 0, a1 = 0; 506 u64 a0 = 0, a1 = 0;
506 int wait = 1000; 507 int wait = 1000;
507 int err;
508 508
509 err = vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait); 509 if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
510 if (err == ERR_ECMDUNKNOWN) 510 return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
511 else
511 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); 512 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
512
513 return err;
514} 513}
515 514
516int vnic_dev_disable(struct vnic_dev *vdev) 515int vnic_dev_disable(struct vnic_dev *vdev)
@@ -574,16 +573,15 @@ int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg)
574 int wait = 1000; 573 int wait = 1000;
575 int err; 574 int err;
576 575
577 err = vnic_dev_cmd(vdev, CMD_HANG_RESET, &a0, &a1, wait); 576 if (vnic_dev_capable(vdev, CMD_HANG_RESET)) {
578 if (err == ERR_ECMDUNKNOWN) { 577 return vnic_dev_cmd(vdev, CMD_HANG_RESET,
578 &a0, &a1, wait);
579 } else {
579 err = vnic_dev_soft_reset(vdev, arg); 580 err = vnic_dev_soft_reset(vdev, arg);
580 if (err) 581 if (err)
581 return err; 582 return err;
582
583 return vnic_dev_init(vdev, 0); 583 return vnic_dev_init(vdev, 0);
584 } 584 }
585
586 return err;
587} 585}
588 586
589int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done) 587int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
@@ -594,11 +592,13 @@ int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
594 592
595 *done = 0; 593 *done = 0;
596 594
597 err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS, &a0, &a1, wait); 595 if (vnic_dev_capable(vdev, CMD_HANG_RESET_STATUS)) {
598 if (err) { 596 err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS,
599 if (err == ERR_ECMDUNKNOWN) 597 &a0, &a1, wait);
600 return vnic_dev_soft_reset_done(vdev, done); 598 if (err)
601 return err; 599 return err;
600 } else {
601 return vnic_dev_soft_reset_done(vdev, done);
602 } 602 }
603 603
604 *done = (a0 == 0); 604 *done = (a0 == 0);
@@ -691,13 +691,12 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
691{ 691{
692 u64 a0 = ig_vlan_rewrite_mode, a1 = 0; 692 u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
693 int wait = 1000; 693 int wait = 1000;
694 int err;
695 694
696 err = vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, &a0, &a1, wait); 695 if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
697 if (err == ERR_ECMDUNKNOWN) 696 return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
697 &a0, &a1, wait);
698 else
698 return 0; 699 return 0;
699
700 return err;
701} 700}
702 701
703static int vnic_dev_notify_setcmd(struct vnic_dev *vdev, 702static int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
@@ -835,7 +834,10 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
835 834
836 memset(vdev->args, 0, sizeof(vdev->args)); 835 memset(vdev->args, 0, sizeof(vdev->args));
837 836
838 err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait); 837 if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT))
838 err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait);
839 else
840 err = ERR_ECMDUNKNOWN;
839 841
840 /* Use defaults when firmware doesn't support the devcmd at all or 842 /* Use defaults when firmware doesn't support the devcmd at all or
841 * supports it for only specific hardware 843 * supports it for only specific hardware
@@ -848,9 +850,11 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
848 return 0; 850 return 0;
849 } 851 }
850 852
851 vdev->intr_coal_timer_info.mul = (u32) vdev->args[0]; 853 if (!err) {
852 vdev->intr_coal_timer_info.div = (u32) vdev->args[1]; 854 vdev->intr_coal_timer_info.mul = (u32) vdev->args[0];
853 vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2]; 855 vdev->intr_coal_timer_info.div = (u32) vdev->args[1];
856 vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2];
857 }
854 858
855 return err; 859 return err;
856} 860}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index 34105e0951a5..7e1488fc8ab2 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -38,10 +38,8 @@ static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
38 38
39 for (i = 0; i < blks; i++) { 39 for (i = 0; i < blks; i++) {
40 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC); 40 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
41 if (!rq->bufs[i]) { 41 if (!rq->bufs[i])
42 pr_err("Failed to alloc rq_bufs\n");
43 return -ENOMEM; 42 return -ENOMEM;
44 }
45 } 43 }
46 44
47 for (i = 0; i < blks; i++) { 45 for (i = 0; i < blks; i++) {
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index df61bd932ea6..5e0d7a2be9bc 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -38,10 +38,8 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
38 38
39 for (i = 0; i < blks; i++) { 39 for (i = 0; i < blks; i++) {
40 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); 40 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
41 if (!wq->bufs[i]) { 41 if (!wq->bufs[i])
42 pr_err("Failed to alloc wq_bufs\n");
43 return -ENOMEM; 42 return -ENOMEM;
44 }
45 } 43 }
46 44
47 for (i = 0; i < blks; i++) { 45 for (i = 0; i < blks; i++) {
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index f801754c71a7..493cc6202081 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1373,10 +1373,8 @@ dm9000_probe(struct platform_device *pdev)
1373 1373
1374 /* Init network device */ 1374 /* Init network device */
1375 ndev = alloc_etherdev(sizeof(struct board_info)); 1375 ndev = alloc_etherdev(sizeof(struct board_info));
1376 if (!ndev) { 1376 if (!ndev)
1377 dev_err(&pdev->dev, "could not allocate device.\n");
1378 return -ENOMEM; 1377 return -ENOMEM;
1379 }
1380 1378
1381 SET_NETDEV_DEV(ndev, &pdev->dev); 1379 SET_NETDEV_DEV(ndev, &pdev->dev);
1382 1380
diff --git a/drivers/net/ethernet/dec/tulip/21142.c b/drivers/net/ethernet/dec/tulip/21142.c
index 25b8deedbef8..369858272650 100644
--- a/drivers/net/ethernet/dec/tulip/21142.c
+++ b/drivers/net/ethernet/dec/tulip/21142.c
@@ -1,5 +1,5 @@
1/* 1/*
2 drivers/net/tulip/21142.c 2 drivers/net/ethernet/dec/tulip/21142.c
3 3
4 Copyright 2000,2001 The Linux Kernel Team 4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker. 5 Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index 14d5b611783d..ed7d1dcd9566 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -1,5 +1,5 @@
1/* 1/*
2 drivers/net/tulip/eeprom.c 2 drivers/net/ethernet/dec/tulip/eeprom.c
3 3
4 Copyright 2000,2001 The Linux Kernel Team 4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker. 5 Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index 4fb8c8c0a420..feaee7424bd9 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -1,5 +1,5 @@
1/* 1/*
2 drivers/net/tulip/interrupt.c 2 drivers/net/ethernet/dec/tulip/interrupt.c
3 3
4 Copyright 2000,2001 The Linux Kernel Team 4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker. 5 Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c
index beeb17b52ad4..ae937c6749e7 100644
--- a/drivers/net/ethernet/dec/tulip/media.c
+++ b/drivers/net/ethernet/dec/tulip/media.c
@@ -1,5 +1,5 @@
1/* 1/*
2 drivers/net/tulip/media.c 2 drivers/net/ethernet/dec/tulip/media.c
3 3
4 Copyright 2000,2001 The Linux Kernel Team 4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker. 5 Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 9c16e4ad02a6..5364563c4378 100644
--- a/drivers/net/ethernet/dec/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -1,5 +1,5 @@
1/* 1/*
2 drivers/net/tulip/pnic.c 2 drivers/net/ethernet/dec/tulip/pnic.c
3 3
4 Copyright 2000,2001 The Linux Kernel Team 4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker. 5 Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/ethernet/dec/tulip/pnic2.c b/drivers/net/ethernet/dec/tulip/pnic2.c
index 04a7e477eaff..5895fc43f6e0 100644
--- a/drivers/net/ethernet/dec/tulip/pnic2.c
+++ b/drivers/net/ethernet/dec/tulip/pnic2.c
@@ -1,5 +1,5 @@
1/* 1/*
2 drivers/net/tulip/pnic2.c 2 drivers/net/ethernet/dec/tulip/pnic2.c
3 3
4 Copyright 2000,2001 The Linux Kernel Team 4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker. 5 Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/ethernet/dec/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c
index 19078d28ffb9..768379b8aee9 100644
--- a/drivers/net/ethernet/dec/tulip/timer.c
+++ b/drivers/net/ethernet/dec/tulip/timer.c
@@ -1,5 +1,5 @@
1/* 1/*
2 drivers/net/tulip/timer.c 2 drivers/net/ethernet/dec/tulip/timer.c
3 3
4 Copyright 2000,2001 The Linux Kernel Team 4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker. 5 Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h
index fb3887c18dc6..38431a155f09 100644
--- a/drivers/net/ethernet/dec/tulip/tulip.h
+++ b/drivers/net/ethernet/dec/tulip/tulip.h
@@ -1,5 +1,5 @@
1/* 1/*
2 drivers/net/tulip/tulip.h 2 drivers/net/ethernet/dec/tulip/tulip.h
3 3
4 Copyright 2000,2001 The Linux Kernel Team 4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker. 5 Written/copyright 1994-2001 by Donald Becker.
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 4eb0d76145c2..17ecb18341c9 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1424,10 +1424,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1424 1424
1425 /* alloc_etherdev ensures aligned and zeroed private structures */ 1425 /* alloc_etherdev ensures aligned and zeroed private structures */
1426 dev = alloc_etherdev (sizeof (*tp)); 1426 dev = alloc_etherdev (sizeof (*tp));
1427 if (!dev) { 1427 if (!dev)
1428 pr_err("ether device alloc failed, aborting\n");
1429 return -ENOMEM; 1428 return -ENOMEM;
1430 }
1431 1429
1432 SET_NETDEV_DEV(dev, &pdev->dev); 1430 SET_NETDEV_DEV(dev, &pdev->dev);
1433 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) { 1431 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 988b8eb24d37..b7c73eefb54b 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -222,10 +222,9 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
222 is available. 222 is available.
223 */ 223 */
224 dev = alloc_etherdev(sizeof(struct xircom_private)); 224 dev = alloc_etherdev(sizeof(struct xircom_private));
225 if (!dev) { 225 if (!dev)
226 pr_err("%s: failed to allocate etherdev\n", __func__);
227 goto device_fail; 226 goto device_fail;
228 } 227
229 private = netdev_priv(dev); 228 private = netdev_priv(dev);
230 229
231 /* Allocate the send/receive buffers */ 230 /* Allocate the send/receive buffers */
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 925c9bafc9b9..fe48cb7dde21 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -854,10 +854,8 @@ static int __devinit dnet_probe(struct platform_device *pdev)
854 854
855 err = -ENOMEM; 855 err = -ENOMEM;
856 dev = alloc_etherdev(sizeof(*bp)); 856 dev = alloc_etherdev(sizeof(*bp));
857 if (!dev) { 857 if (!dev)
858 dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
859 goto err_out_release_mem; 858 goto err_out_release_mem;
860 }
861 859
862 /* TODO: Actually, we have some interesting features... */ 860 /* TODO: Actually, we have some interesting features... */
863 dev->features |= 0; 861 dev->features |= 0;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index cbdec2536da6..74aa14811977 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -74,6 +74,9 @@ static inline char *nic_name(struct pci_dev *pdev)
74 74
75/* Number of bytes of an RX frame that are copied to skb->data */ 75/* Number of bytes of an RX frame that are copied to skb->data */
76#define BE_HDR_LEN ((u16) 64) 76#define BE_HDR_LEN ((u16) 64)
77/* allocate extra space to allow tunneling decapsulation without head reallocation */
78#define BE_RX_SKB_ALLOC_SIZE (BE_HDR_LEN + 64)
79
77#define BE_MAX_JUMBO_FRAME_SIZE 9018 80#define BE_MAX_JUMBO_FRAME_SIZE 9018
78#define BE_MIN_MTU 256 81#define BE_MIN_MTU 256
79 82
@@ -262,7 +265,6 @@ struct be_drv_stats {
262 u32 rx_drops_no_erx_descr; 265 u32 rx_drops_no_erx_descr;
263 u32 rx_drops_no_tpre_descr; 266 u32 rx_drops_no_tpre_descr;
264 u32 rx_drops_too_many_frags; 267 u32 rx_drops_too_many_frags;
265 u32 rx_drops_invalid_ring;
266 u32 forwarded_packets; 268 u32 forwarded_packets;
267 u32 rx_drops_mtu; 269 u32 rx_drops_mtu;
268 u32 rx_crc_errors; 270 u32 rx_crc_errors;
@@ -273,7 +275,7 @@ struct be_drv_stats {
273 u32 rx_in_range_errors; 275 u32 rx_in_range_errors;
274 u32 rx_out_range_errors; 276 u32 rx_out_range_errors;
275 u32 rx_frame_too_long; 277 u32 rx_frame_too_long;
276 u32 rx_address_match_errors; 278 u32 rx_address_mismatch_drops;
277 u32 rx_dropped_too_small; 279 u32 rx_dropped_too_small;
278 u32 rx_dropped_too_short; 280 u32 rx_dropped_too_short;
279 u32 rx_dropped_header_too_small; 281 u32 rx_dropped_header_too_small;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 0fcb45624796..29dff7de66b6 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1257,11 +1257,13 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
1257 } 1257 }
1258 req = embedded_payload(wrb); 1258 req = embedded_payload(wrb);
1259 1259
1260 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1261 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1262
1260 if (adapter->generation == BE_GEN3 || lancer_chip(adapter)) 1263 if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
1261 req->hdr.version = 1; 1264 req->hdr.version = 1;
1262 1265
1263 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1266 req->hdr.domain = dom;
1264 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1265 1267
1266 status = be_mcc_notify_wait(adapter); 1268 status = be_mcc_notify_wait(adapter);
1267 if (!status) { 1269 if (!status) {
@@ -2298,52 +2300,81 @@ err:
2298 2300
2299/* Uses synchronous MCCQ */ 2301/* Uses synchronous MCCQ */
2300int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain, 2302int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
2301 u32 *pmac_id) 2303 bool *pmac_id_active, u32 *pmac_id, u8 *mac)
2302{ 2304{
2303 struct be_mcc_wrb *wrb; 2305 struct be_mcc_wrb *wrb;
2304 struct be_cmd_req_get_mac_list *req; 2306 struct be_cmd_req_get_mac_list *req;
2305 int status; 2307 int status;
2306 int mac_count; 2308 int mac_count;
2309 struct be_dma_mem get_mac_list_cmd;
2310 int i;
2311
2312 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2313 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2314 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2315 get_mac_list_cmd.size,
2316 &get_mac_list_cmd.dma);
2317
2318 if (!get_mac_list_cmd.va) {
2319 dev_err(&adapter->pdev->dev,
2320 "Memory allocation failure during GET_MAC_LIST\n");
2321 return -ENOMEM;
2322 }
2307 2323
2308 spin_lock_bh(&adapter->mcc_lock); 2324 spin_lock_bh(&adapter->mcc_lock);
2309 2325
2310 wrb = wrb_from_mccq(adapter); 2326 wrb = wrb_from_mccq(adapter);
2311 if (!wrb) { 2327 if (!wrb) {
2312 status = -EBUSY; 2328 status = -EBUSY;
2313 goto err; 2329 goto out;
2314 } 2330 }
2315 req = embedded_payload(wrb); 2331
2332 req = get_mac_list_cmd.va;
2316 2333
2317 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2334 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2318 OPCODE_COMMON_GET_MAC_LIST, sizeof(*req), 2335 OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
2319 wrb, NULL); 2336 wrb, &get_mac_list_cmd);
2320 2337
2321 req->hdr.domain = domain; 2338 req->hdr.domain = domain;
2339 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2340 req->perm_override = 1;
2322 2341
2323 status = be_mcc_notify_wait(adapter); 2342 status = be_mcc_notify_wait(adapter);
2324 if (!status) { 2343 if (!status) {
2325 struct be_cmd_resp_get_mac_list *resp = 2344 struct be_cmd_resp_get_mac_list *resp =
2326 embedded_payload(wrb); 2345 get_mac_list_cmd.va;
2327 int i; 2346 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2328 u8 *ctxt = &resp->context[0][0]; 2347 /* Mac list returned could contain one or more active mac_ids
2329 status = -EIO; 2348 * or one or more pseudo permanant mac addresses. If an active
2330 mac_count = resp->mac_count; 2349 * mac_id is present, return first active mac_id found
2331 be_dws_le_to_cpu(&resp->context, sizeof(resp->context)); 2350 */
2332 for (i = 0; i < mac_count; i++) { 2351 for (i = 0; i < mac_count; i++) {
2333 if (!AMAP_GET_BITS(struct amap_get_mac_list_context, 2352 struct get_list_macaddr *mac_entry;
2334 act, ctxt)) { 2353 u16 mac_addr_size;
2335 *pmac_id = AMAP_GET_BITS 2354 u32 mac_id;
2336 (struct amap_get_mac_list_context, 2355
2337 macid, ctxt); 2356 mac_entry = &resp->macaddr_list[i];
2338 status = 0; 2357 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2339 break; 2358 /* mac_id is a 32 bit value and mac_addr size
2359 * is 6 bytes
2360 */
2361 if (mac_addr_size == sizeof(u32)) {
2362 *pmac_id_active = true;
2363 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2364 *pmac_id = le32_to_cpu(mac_id);
2365 goto out;
2340 } 2366 }
2341 ctxt += sizeof(struct amap_get_mac_list_context) / 8;
2342 } 2367 }
2368 /* If no active mac_id found, return first pseudo mac addr */
2369 *pmac_id_active = false;
2370 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2371 ETH_ALEN);
2343 } 2372 }
2344 2373
2345err: 2374out:
2346 spin_unlock_bh(&adapter->mcc_lock); 2375 spin_unlock_bh(&adapter->mcc_lock);
2376 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2377 get_mac_list_cmd.va, get_mac_list_cmd.dma);
2347 return status; 2378 return status;
2348} 2379}
2349 2380
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index dca89249088f..5bb66c80f05e 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -592,8 +592,8 @@ struct be_port_rxf_stats_v0 {
592 u32 rx_in_range_errors; /* dword 10*/ 592 u32 rx_in_range_errors; /* dword 10*/
593 u32 rx_out_range_errors; /* dword 11*/ 593 u32 rx_out_range_errors; /* dword 11*/
594 u32 rx_frame_too_long; /* dword 12*/ 594 u32 rx_frame_too_long; /* dword 12*/
595 u32 rx_address_match_errors; /* dword 13*/ 595 u32 rx_address_mismatch_drops; /* dword 13*/
596 u32 rx_vlan_mismatch; /* dword 14*/ 596 u32 rx_vlan_mismatch_drops; /* dword 14*/
597 u32 rx_dropped_too_small; /* dword 15*/ 597 u32 rx_dropped_too_small; /* dword 15*/
598 u32 rx_dropped_too_short; /* dword 16*/ 598 u32 rx_dropped_too_short; /* dword 16*/
599 u32 rx_dropped_header_too_small; /* dword 17*/ 599 u32 rx_dropped_header_too_small; /* dword 17*/
@@ -799,8 +799,8 @@ struct lancer_pport_stats {
799 u32 rx_control_frames_unknown_opcode_hi; 799 u32 rx_control_frames_unknown_opcode_hi;
800 u32 rx_in_range_errors; 800 u32 rx_in_range_errors;
801 u32 rx_out_of_range_errors; 801 u32 rx_out_of_range_errors;
802 u32 rx_address_match_errors; 802 u32 rx_address_mismatch_drops;
803 u32 rx_vlan_mismatch_errors; 803 u32 rx_vlan_mismatch_drops;
804 u32 rx_dropped_too_small; 804 u32 rx_dropped_too_small;
805 u32 rx_dropped_too_short; 805 u32 rx_dropped_too_short;
806 u32 rx_dropped_header_too_small; 806 u32 rx_dropped_header_too_small;
@@ -1346,22 +1346,36 @@ struct be_cmd_resp_set_func_cap {
1346 1346
1347/******************** GET/SET_MACLIST **************************/ 1347/******************** GET/SET_MACLIST **************************/
1348#define BE_MAX_MAC 64 1348#define BE_MAX_MAC 64
1349struct amap_get_mac_list_context {
1350 u8 macid[31];
1351 u8 act;
1352} __packed;
1353
1354struct be_cmd_req_get_mac_list { 1349struct be_cmd_req_get_mac_list {
1355 struct be_cmd_req_hdr hdr; 1350 struct be_cmd_req_hdr hdr;
1356 u32 rsvd; 1351 u8 mac_type;
1352 u8 perm_override;
1353 u16 iface_id;
1354 u32 mac_id;
1355 u32 rsvd[3];
1356} __packed;
1357
1358struct get_list_macaddr {
1359 u16 mac_addr_size;
1360 union {
1361 u8 macaddr[6];
1362 struct {
1363 u8 rsvd[2];
1364 u32 mac_id;
1365 } __packed s_mac_id;
1366 } __packed mac_addr_id;
1357} __packed; 1367} __packed;
1358 1368
1359struct be_cmd_resp_get_mac_list { 1369struct be_cmd_resp_get_mac_list {
1360 struct be_cmd_resp_hdr hdr; 1370 struct be_cmd_resp_hdr hdr;
1361 u8 mac_count; 1371 struct get_list_macaddr fd_macaddr; /* Factory default mac */
1362 u8 rsvd1; 1372 struct get_list_macaddr macid_macaddr; /* soft mac */
1363 u16 rsvd2; 1373 u8 true_mac_count;
1364 u8 context[sizeof(struct amap_get_mac_list_context) / 8][BE_MAX_MAC]; 1374 u8 pseudo_mac_count;
1375 u8 mac_list_size;
1376 u8 rsvd;
1377 /* perm override mac */
1378 struct get_list_macaddr macaddr_list[BE_MAX_MAC];
1365} __packed; 1379} __packed;
1366 1380
1367struct be_cmd_req_set_mac_list { 1381struct be_cmd_req_set_mac_list {
@@ -1384,7 +1398,7 @@ struct be_port_rxf_stats_v1 {
1384 u32 rx_in_range_errors; 1398 u32 rx_in_range_errors;
1385 u32 rx_out_range_errors; 1399 u32 rx_out_range_errors;
1386 u32 rx_frame_too_long; 1400 u32 rx_frame_too_long;
1387 u32 rx_address_match_errors; 1401 u32 rx_address_mismatch_drops;
1388 u32 rx_dropped_too_small; 1402 u32 rx_dropped_too_small;
1389 u32 rx_dropped_too_short; 1403 u32 rx_dropped_too_short;
1390 u32 rx_dropped_header_too_small; 1404 u32 rx_dropped_header_too_small;
@@ -1575,7 +1589,7 @@ extern int be_cmd_req_native_mode(struct be_adapter *adapter);
1575extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); 1589extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
1576extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); 1590extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
1577extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain, 1591extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
1578 u32 *pmac_id); 1592 bool *pmac_id_active, u32 *pmac_id, u8 *mac);
1579extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, 1593extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
1580 u8 mac_count, u32 domain); 1594 u8 mac_count, u32 domain);
1581 1595
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 802e5ddef8a8..dc1383c396c0 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -42,15 +42,42 @@ static const struct be_ethtool_stat et_stats[] = {
42 {DRVSTAT_INFO(rx_alignment_symbol_errors)}, 42 {DRVSTAT_INFO(rx_alignment_symbol_errors)},
43 {DRVSTAT_INFO(rx_pause_frames)}, 43 {DRVSTAT_INFO(rx_pause_frames)},
44 {DRVSTAT_INFO(rx_control_frames)}, 44 {DRVSTAT_INFO(rx_control_frames)},
45 /* Received packets dropped when the Ethernet length field
46 * is not equal to the actual Ethernet data length.
47 */
45 {DRVSTAT_INFO(rx_in_range_errors)}, 48 {DRVSTAT_INFO(rx_in_range_errors)},
49 /* Received packets dropped when their length field is >= 1501 bytes
50 * and <= 1535 bytes.
51 */
46 {DRVSTAT_INFO(rx_out_range_errors)}, 52 {DRVSTAT_INFO(rx_out_range_errors)},
53 /* Received packets dropped when they are longer than 9216 bytes */
47 {DRVSTAT_INFO(rx_frame_too_long)}, 54 {DRVSTAT_INFO(rx_frame_too_long)},
48 {DRVSTAT_INFO(rx_address_match_errors)}, 55 /* Received packets dropped when they don't pass the unicast or
56 * multicast address filtering.
57 */
58 {DRVSTAT_INFO(rx_address_mismatch_drops)},
59 /* Received packets dropped when IP packet length field is less than
60 * the IP header length field.
61 */
49 {DRVSTAT_INFO(rx_dropped_too_small)}, 62 {DRVSTAT_INFO(rx_dropped_too_small)},
63 /* Received packets dropped when IP length field is greater than
64 * the actual packet length.
65 */
50 {DRVSTAT_INFO(rx_dropped_too_short)}, 66 {DRVSTAT_INFO(rx_dropped_too_short)},
67 /* Received packets dropped when the IP header length field is less
68 * than 5.
69 */
51 {DRVSTAT_INFO(rx_dropped_header_too_small)}, 70 {DRVSTAT_INFO(rx_dropped_header_too_small)},
71 /* Received packets dropped when the TCP header length field is less
72 * than 5 or the TCP header length + IP header length is more
73 * than IP packet length.
74 */
52 {DRVSTAT_INFO(rx_dropped_tcp_length)}, 75 {DRVSTAT_INFO(rx_dropped_tcp_length)},
53 {DRVSTAT_INFO(rx_dropped_runt)}, 76 {DRVSTAT_INFO(rx_dropped_runt)},
77 /* Number of received packets dropped when a fifo for descriptors going
78 * into the packet demux block overflows. In normal operation, this
79 * fifo must never overflow.
80 */
54 {DRVSTAT_INFO(rxpp_fifo_overflow_drop)}, 81 {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
55 {DRVSTAT_INFO(rx_input_fifo_overflow_drop)}, 82 {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
56 {DRVSTAT_INFO(rx_ip_checksum_errs)}, 83 {DRVSTAT_INFO(rx_ip_checksum_errs)},
@@ -59,16 +86,35 @@ static const struct be_ethtool_stat et_stats[] = {
59 {DRVSTAT_INFO(tx_pauseframes)}, 86 {DRVSTAT_INFO(tx_pauseframes)},
60 {DRVSTAT_INFO(tx_controlframes)}, 87 {DRVSTAT_INFO(tx_controlframes)},
61 {DRVSTAT_INFO(rx_priority_pause_frames)}, 88 {DRVSTAT_INFO(rx_priority_pause_frames)},
89 /* Received packets dropped when an internal fifo going into
90 * main packet buffer tank (PMEM) overflows.
91 */
62 {DRVSTAT_INFO(pmem_fifo_overflow_drop)}, 92 {DRVSTAT_INFO(pmem_fifo_overflow_drop)},
63 {DRVSTAT_INFO(jabber_events)}, 93 {DRVSTAT_INFO(jabber_events)},
94 /* Received packets dropped due to lack of available HW packet buffers
95 * used to temporarily hold the received packets.
96 */
64 {DRVSTAT_INFO(rx_drops_no_pbuf)}, 97 {DRVSTAT_INFO(rx_drops_no_pbuf)},
65 {DRVSTAT_INFO(rx_drops_no_txpb)}, 98 /* Received packets dropped due to input receive buffer
99 * descriptor fifo overflowing.
100 */
66 {DRVSTAT_INFO(rx_drops_no_erx_descr)}, 101 {DRVSTAT_INFO(rx_drops_no_erx_descr)},
102 /* Packets dropped because the internal FIFO to the offloaded TCP
103 * receive processing block is full. This could happen only for
104 * offloaded iSCSI or FCoE trarffic.
105 */
67 {DRVSTAT_INFO(rx_drops_no_tpre_descr)}, 106 {DRVSTAT_INFO(rx_drops_no_tpre_descr)},
107 /* Received packets dropped when they need more than 8
108 * receive buffers. This cannot happen as the driver configures
109 * 2048 byte receive buffers.
110 */
68 {DRVSTAT_INFO(rx_drops_too_many_frags)}, 111 {DRVSTAT_INFO(rx_drops_too_many_frags)},
69 {DRVSTAT_INFO(rx_drops_invalid_ring)},
70 {DRVSTAT_INFO(forwarded_packets)}, 112 {DRVSTAT_INFO(forwarded_packets)},
113 /* Received packets dropped when the frame length
114 * is more than 9018 bytes
115 */
71 {DRVSTAT_INFO(rx_drops_mtu)}, 116 {DRVSTAT_INFO(rx_drops_mtu)},
117 /* Number of packets dropped due to random early drop function */
72 {DRVSTAT_INFO(eth_red_drops)}, 118 {DRVSTAT_INFO(eth_red_drops)},
73 {DRVSTAT_INFO(be_on_die_temperature)} 119 {DRVSTAT_INFO(be_on_die_temperature)}
74}; 120};
@@ -84,8 +130,15 @@ static const struct be_ethtool_stat et_rx_stats[] = {
84 {DRVSTAT_RX_INFO(rx_events)}, 130 {DRVSTAT_RX_INFO(rx_events)},
85 {DRVSTAT_RX_INFO(rx_compl)}, 131 {DRVSTAT_RX_INFO(rx_compl)},
86 {DRVSTAT_RX_INFO(rx_mcast_pkts)}, 132 {DRVSTAT_RX_INFO(rx_mcast_pkts)},
133 /* Number of page allocation failures while posting receive buffers
134 * to HW.
135 */
87 {DRVSTAT_RX_INFO(rx_post_fail)}, 136 {DRVSTAT_RX_INFO(rx_post_fail)},
137 /* Recevied packets dropped due to skb allocation failure */
88 {DRVSTAT_RX_INFO(rx_drops_no_skbs)}, 138 {DRVSTAT_RX_INFO(rx_drops_no_skbs)},
139 /* Received packets dropped due to lack of available fetched buffers
140 * posted by the driver.
141 */
89 {DRVSTAT_RX_INFO(rx_drops_no_frags)} 142 {DRVSTAT_RX_INFO(rx_drops_no_frags)}
90}; 143};
91#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats)) 144#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
@@ -97,9 +150,14 @@ static const struct be_ethtool_stat et_tx_stats[] = {
97 {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */ 150 {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
98 {DRVSTAT_TX_INFO(tx_bytes)}, 151 {DRVSTAT_TX_INFO(tx_bytes)},
99 {DRVSTAT_TX_INFO(tx_pkts)}, 152 {DRVSTAT_TX_INFO(tx_pkts)},
153 /* Number of skbs queued for trasmission by the driver */
100 {DRVSTAT_TX_INFO(tx_reqs)}, 154 {DRVSTAT_TX_INFO(tx_reqs)},
155 /* Number of TX work request blocks DMAed to HW */
101 {DRVSTAT_TX_INFO(tx_wrbs)}, 156 {DRVSTAT_TX_INFO(tx_wrbs)},
102 {DRVSTAT_TX_INFO(tx_compl)}, 157 {DRVSTAT_TX_INFO(tx_compl)},
158 /* Number of times the TX queue was stopped due to lack
159 * of spaces in the TXQ.
160 */
103 {DRVSTAT_TX_INFO(tx_stops)} 161 {DRVSTAT_TX_INFO(tx_stops)}
104}; 162};
105#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats)) 163#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index e703d64434f8..780498784d8e 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -286,7 +286,9 @@ static void populate_be2_stats(struct be_adapter *adapter)
286 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow; 286 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
287 drvs->rx_dropped_header_too_small = 287 drvs->rx_dropped_header_too_small =
288 port_stats->rx_dropped_header_too_small; 288 port_stats->rx_dropped_header_too_small;
289 drvs->rx_address_match_errors = port_stats->rx_address_match_errors; 289 drvs->rx_address_mismatch_drops =
290 port_stats->rx_address_mismatch_drops +
291 port_stats->rx_vlan_mismatch_drops;
290 drvs->rx_alignment_symbol_errors = 292 drvs->rx_alignment_symbol_errors =
291 port_stats->rx_alignment_symbol_errors; 293 port_stats->rx_alignment_symbol_errors;
292 294
@@ -298,9 +300,7 @@ static void populate_be2_stats(struct be_adapter *adapter)
298 else 300 else
299 drvs->jabber_events = rxf_stats->port0_jabber_events; 301 drvs->jabber_events = rxf_stats->port0_jabber_events;
300 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; 302 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
301 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
302 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; 303 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
303 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
304 drvs->forwarded_packets = rxf_stats->forwarded_packets; 304 drvs->forwarded_packets = rxf_stats->forwarded_packets;
305 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu; 305 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
306 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; 306 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
@@ -337,7 +337,7 @@ static void populate_be3_stats(struct be_adapter *adapter)
337 port_stats->rx_dropped_header_too_small; 337 port_stats->rx_dropped_header_too_small;
338 drvs->rx_input_fifo_overflow_drop = 338 drvs->rx_input_fifo_overflow_drop =
339 port_stats->rx_input_fifo_overflow_drop; 339 port_stats->rx_input_fifo_overflow_drop;
340 drvs->rx_address_match_errors = port_stats->rx_address_match_errors; 340 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
341 drvs->rx_alignment_symbol_errors = 341 drvs->rx_alignment_symbol_errors =
342 port_stats->rx_alignment_symbol_errors; 342 port_stats->rx_alignment_symbol_errors;
343 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop; 343 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
@@ -345,9 +345,7 @@ static void populate_be3_stats(struct be_adapter *adapter)
345 drvs->tx_controlframes = port_stats->tx_controlframes; 345 drvs->tx_controlframes = port_stats->tx_controlframes;
346 drvs->jabber_events = port_stats->jabber_events; 346 drvs->jabber_events = port_stats->jabber_events;
347 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; 347 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
348 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
349 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; 348 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
350 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
351 drvs->forwarded_packets = rxf_stats->forwarded_packets; 349 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu; 350 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; 351 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
@@ -380,13 +378,14 @@ static void populate_lancer_stats(struct be_adapter *adapter)
380 drvs->rx_dropped_header_too_small = 378 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small; 379 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow; 380 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors; 381 drvs->rx_address_mismatch_drops =
382 pport_stats->rx_address_mismatch_drops +
383 pport_stats->rx_vlan_mismatch_drops;
384 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo; 384 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
385 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow; 385 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
386 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo; 386 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
387 drvs->tx_controlframes = pport_stats->tx_control_frames_lo; 387 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
388 drvs->jabber_events = pport_stats->rx_jabbers; 388 drvs->jabber_events = pport_stats->rx_jabbers;
389 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
390 drvs->forwarded_packets = pport_stats->num_forwards_lo; 389 drvs->forwarded_packets = pport_stats->num_forwards_lo;
391 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo; 390 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
392 drvs->rx_drops_too_many_frags = 391 drvs->rx_drops_too_many_frags =
@@ -1189,7 +1188,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1189 struct net_device *netdev = adapter->netdev; 1188 struct net_device *netdev = adapter->netdev;
1190 struct sk_buff *skb; 1189 struct sk_buff *skb;
1191 1190
1192 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN); 1191 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1193 if (unlikely(!skb)) { 1192 if (unlikely(!skb)) {
1194 rx_stats(rxo)->rx_drops_no_skbs++; 1193 rx_stats(rxo)->rx_drops_no_skbs++;
1195 be_rx_compl_discard(adapter, rxo, rxcp); 1194 be_rx_compl_discard(adapter, rxo, rxcp);
@@ -2609,19 +2608,28 @@ static void be_setup_init(struct be_adapter *adapter)
2609 adapter->eq_next_idx = 0; 2608 adapter->eq_next_idx = 0;
2610} 2609}
2611 2610
2612static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac) 2611static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
2613{ 2612{
2614 u32 pmac_id; 2613 u32 pmac_id;
2615 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id); 2614 int status;
2616 if (status != 0) 2615 bool pmac_id_active;
2617 goto do_none; 2616
2618 status = be_cmd_mac_addr_query(adapter, mac, 2617 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2619 MAC_ADDRESS_TYPE_NETWORK, 2618 &pmac_id, mac);
2620 false, adapter->if_handle, pmac_id);
2621 if (status != 0) 2619 if (status != 0)
2622 goto do_none; 2620 goto do_none;
2623 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle, 2621
2624 &adapter->pmac_id, 0); 2622 if (pmac_id_active) {
2623 status = be_cmd_mac_addr_query(adapter, mac,
2624 MAC_ADDRESS_TYPE_NETWORK,
2625 false, adapter->if_handle, pmac_id);
2626
2627 if (!status)
2628 adapter->pmac_id = pmac_id;
2629 } else {
2630 status = be_cmd_pmac_add(adapter, mac,
2631 adapter->if_handle, &adapter->pmac_id, 0);
2632 }
2625do_none: 2633do_none:
2626 return status; 2634 return status;
2627} 2635}
@@ -2686,7 +2694,7 @@ static int be_setup(struct be_adapter *adapter)
2686 */ 2694 */
2687 if (!be_physfn(adapter)) { 2695 if (!be_physfn(adapter)) {
2688 if (lancer_chip(adapter)) 2696 if (lancer_chip(adapter))
2689 status = be_configure_mac_from_list(adapter, mac); 2697 status = be_add_mac_from_list(adapter, mac);
2690 else 2698 else
2691 status = be_cmd_mac_addr_query(adapter, mac, 2699 status = be_cmd_mac_addr_query(adapter, mac,
2692 MAC_ADDRESS_TYPE_NETWORK, false, 2700 MAC_ADDRESS_TYPE_NETWORK, false,
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 60f0e788cc25..0b723ff2294b 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/net/ethoc.c 2 * linux/drivers/net/ethernet/ethoc.c
3 * 3 *
4 * Copyright (C) 2007-2008 Avionic Design Development GmbH 4 * Copyright (C) 2007-2008 Avionic Design Development GmbH
5 * Copyright (C) 2008-2009 Avionic Design GmbH 5 * Copyright (C) 2008-2009 Avionic Design GmbH
@@ -913,7 +913,6 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
913 /* allocate networking device */ 913 /* allocate networking device */
914 netdev = alloc_etherdev(sizeof(struct ethoc)); 914 netdev = alloc_etherdev(sizeof(struct ethoc));
915 if (!netdev) { 915 if (!netdev) {
916 dev_err(&pdev->dev, "cannot allocate network device\n");
917 ret = -ENOMEM; 916 ret = -ENOMEM;
918 goto out; 917 goto out;
919 } 918 }
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 1c7aad8fa19c..336edd7e0b78 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -1739,21 +1739,6 @@ static struct platform_driver fec_driver = {
1739 .remove = __devexit_p(fec_drv_remove), 1739 .remove = __devexit_p(fec_drv_remove),
1740}; 1740};
1741 1741
1742static int __init 1742module_platform_driver(fec_driver);
1743fec_enet_module_init(void)
1744{
1745 printk(KERN_INFO "FEC Ethernet Driver\n");
1746
1747 return platform_driver_register(&fec_driver);
1748}
1749
1750static void __exit
1751fec_enet_cleanup(void)
1752{
1753 platform_driver_unregister(&fec_driver);
1754}
1755
1756module_exit(fec_enet_cleanup);
1757module_init(fec_enet_module_init);
1758 1743
1759MODULE_LICENSE("GPL"); 1744MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.h b/drivers/net/ethernet/freescale/fec_mpc52xx.h
index 41d2dffde55b..10afa54dd062 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.h
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/drivers/net/fec_mpc52xx/fec.h 2 * drivers/net/ethernet/freescale/fec_mpc52xx.h
3 * 3 *
4 * Driver for the MPC5200 Fast Ethernet Controller 4 * Driver for the MPC5200 Fast Ethernet Controller
5 * 5 *
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 39d160d353a4..adb0ae4e4195 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/gianfar.c 2 * drivers/net/ethernet/freescale/gianfar.c
3 * 3 *
4 * Gianfar Ethernet Driver 4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers 5 * This driver is designed for the non-CPM ethernet controllers
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 40c33a7554c0..4fe0f342acec 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/gianfar.h 2 * drivers/net/ethernet/freescale/gianfar.h
3 * 3 *
4 * Gianfar Ethernet Driver 4 * Gianfar Ethernet Driver
5 * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 5 * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 5a3b2e5b2880..5a78d55f46e7 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/gianfar_ethtool.c 2 * drivers/net/ethernet/freescale/gianfar_ethtool.c
3 * 3 *
4 * Gianfar Ethernet Driver 4 * Gianfar Ethernet Driver
5 * Ethtool support for Gianfar Enet 5 * Ethtool support for Gianfar Enet
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
index 64f4094ac7f1..cd14a4d449c2 100644
--- a/drivers/net/ethernet/freescale/gianfar_sysfs.c
+++ b/drivers/net/ethernet/freescale/gianfar_sysfs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/gianfar_sysfs.c 2 * drivers/net/ethernet/freescale/gianfar_sysfs.c
3 * 3 *
4 * Gianfar Ethernet Driver 4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers 5 * This driver is designed for the non-CPM ethernet controllers
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 6a5ee0776b28..3598c5408e79 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -2992,7 +2992,6 @@ static int __init hp100_isa_init(void)
2992 for (i = 0; i < HP100_DEVICES && hp100_port[i] != -1; ++i) { 2992 for (i = 0; i < HP100_DEVICES && hp100_port[i] != -1; ++i) {
2993 dev = alloc_etherdev(sizeof(struct hp100_private)); 2993 dev = alloc_etherdev(sizeof(struct hp100_private));
2994 if (!dev) { 2994 if (!dev) {
2995 printk(KERN_WARNING "hp100: no memory for network device\n");
2996 while (cards > 0) 2995 while (cards > 0)
2997 cleanup_dev(hp100_devlist[--cards]); 2996 cleanup_dev(hp100_devlist[--cards]);
2998 2997
diff --git a/drivers/net/ethernet/i825xx/lp486e.c b/drivers/net/ethernet/i825xx/lp486e.c
index 414044b3cb11..02df5f5accb1 100644
--- a/drivers/net/ethernet/i825xx/lp486e.c
+++ b/drivers/net/ethernet/i825xx/lp486e.c
@@ -454,8 +454,6 @@ init_rx_bufs(struct net_device *dev, int num) {
454 } 454 }
455 455
456 rfd->rbd = rbd; 456 rfd->rbd = rbd;
457 } else {
458 printk("Could not kmalloc rbd\n");
459 } 457 }
460 } 458 }
461 lp->rbd_tail->next = rfd->rbd; 459 lp->rbd_tail->next = rfd->rbd;
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 6ef5e11d1c84..296cf8a0ee51 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -28,7 +28,6 @@ static int automatic_resume = 0; /* experimental .. better should be zero */
28static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */ 28static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */
29static int fifo=0x8; /* don't change */ 29static int fifo=0x8; /* don't change */
30 30
31#include <linux/module.h>
32#include <linux/kernel.h> 31#include <linux/kernel.h>
33#include <linux/string.h> 32#include <linux/string.h>
34#include <linux/errno.h> 33#include <linux/errno.h>
@@ -1151,28 +1150,6 @@ static void set_multicast_list(struct net_device *dev)
1151 netif_wake_queue(dev); 1150 netif_wake_queue(dev);
1152} 1151}
1153 1152
1154#ifdef MODULE
1155#error This code is not currently supported as a module
1156static struct net_device *dev_sun3_82586;
1157
1158int init_module(void)
1159{
1160 dev_sun3_82586 = sun3_82586_probe(-1);
1161 if (IS_ERR(dev_sun3_82586))
1162 return PTR_ERR(dev_sun3_82586);
1163 return 0;
1164}
1165
1166void cleanup_module(void)
1167{
1168 unsigned long ioaddr = dev_sun3_82586->base_addr;
1169 unregister_netdev(dev_sun3_82586);
1170 release_region(ioaddr, SUN3_82586_TOTAL_SIZE);
1171 iounmap((void *)ioaddr);
1172 free_netdev(dev_sun3_82586);
1173}
1174#endif /* MODULE */
1175
1176#if 0 1153#if 0
1177/* 1154/*
1178 * DUMP .. we expect a not running CMD unit and enough space 1155 * DUMP .. we expect a not running CMD unit and enough space
@@ -1209,5 +1186,3 @@ void sun3_82586_dump(struct net_device *dev,void *ptr)
1209 printk("\n"); 1186 printk("\n");
1210} 1187}
1211#endif 1188#endif
1212
1213MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index 6650068c996c..b8e46cc31e53 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/net/ehea/ehea.h 2 * linux/drivers/net/ethernet/ibm/ehea/ehea.h
3 * 3 *
4 * eHEA ethernet device driver for IBM eServer System p 4 * eHEA ethernet device driver for IBM eServer System p
5 * 5 *
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 6bdd8e36e564..95837b99a464 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/net/ehea/ehea_ethtool.c 2 * linux/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
3 * 3 *
4 * eHEA ethernet device driver for IBM eServer System p 4 * eHEA ethernet device driver for IBM eServer System p
5 * 5 *
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_hw.h b/drivers/net/ethernet/ibm/ehea/ehea_hw.h
index 1a2fe4dc3eb3..180d4128a711 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_hw.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/net/ehea/ehea_hw.h 2 * linux/drivers/net/ethernet/ibm/ehea/ehea_hw.h
3 * 3 *
4 * eHEA ethernet device driver for IBM eServer System p 4 * eHEA ethernet device driver for IBM eServer System p
5 * 5 *
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 5d5fb2627184..8b73dd472475 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/net/ehea/ehea_main.c 2 * linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
3 * 3 *
4 * eHEA ethernet device driver for IBM eServer System p 4 * eHEA ethernet device driver for IBM eServer System p
5 * 5 *
@@ -2980,7 +2980,6 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2980 dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES); 2980 dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
2981 2981
2982 if (!dev) { 2982 if (!dev) {
2983 pr_err("no mem for net_device\n");
2984 ret = -ENOMEM; 2983 ret = -ENOMEM;
2985 goto out_err; 2984 goto out_err;
2986 } 2985 }
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.c b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
index 0506967b9044..30f903332e92 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/net/ehea/ehea_phyp.c 2 * linux/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
3 * 3 *
4 * eHEA ethernet device driver for IBM eServer System p 4 * eHEA ethernet device driver for IBM eServer System p
5 * 5 *
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
index 2f8174c248bc..52c456ec4d6c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/net/ehea/ehea_phyp.h 2 * linux/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
3 * 3 *
4 * eHEA ethernet device driver for IBM eServer System p 4 * eHEA ethernet device driver for IBM eServer System p
5 * 5 *
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index c25b05b94daa..4fb47f14dbfe 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/net/ehea/ehea_qmr.c 2 * linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
3 * 3 *
4 * eHEA ethernet device driver for IBM eServer System p 4 * eHEA ethernet device driver for IBM eServer System p
5 * 5 *
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.h b/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
index 337a47ecf4aa..8e4a70c20ab7 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/net/ehea/ehea_qmr.h 2 * linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
3 * 3 *
4 * eHEA ethernet device driver for IBM eServer System p 4 * eHEA ethernet device driver for IBM eServer System p
5 * 5 *
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 2abce965c7bd..dac7ffb4eaf1 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/ibm_newemac/core.c 2 * drivers/net/ethernet/ibm/emac/core.c
3 * 3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller. 4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 * 5 *
@@ -2706,11 +2706,9 @@ static int __devinit emac_probe(struct platform_device *ofdev)
2706 /* Allocate our net_device structure */ 2706 /* Allocate our net_device structure */
2707 err = -ENOMEM; 2707 err = -ENOMEM;
2708 ndev = alloc_etherdev(sizeof(struct emac_instance)); 2708 ndev = alloc_etherdev(sizeof(struct emac_instance));
2709 if (!ndev) { 2709 if (!ndev)
2710 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2711 np->full_name);
2712 goto err_gone; 2710 goto err_gone;
2713 } 2711
2714 dev = netdev_priv(ndev); 2712 dev = netdev_priv(ndev);
2715 dev->ndev = ndev; 2713 dev->ndev = ndev;
2716 dev->ofdev = ofdev; 2714 dev->ofdev = ofdev;
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index fa3ec57935fa..bade29690c71 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/ibm_newemac/core.h 2 * drivers/net/ethernet/ibm/emac/core.h
3 * 3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller. 4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 * 5 *
diff --git a/drivers/net/ethernet/ibm/emac/debug.c b/drivers/net/ethernet/ibm/emac/debug.c
index 8c6c1e2a8750..b16b4828b64d 100644
--- a/drivers/net/ethernet/ibm/emac/debug.c
+++ b/drivers/net/ethernet/ibm/emac/debug.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/ibm_newemac/debug.c 2 * drivers/net/ethernet/ibm/emac/debug.c
3 * 3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines. 4 * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
5 * 5 *
diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h
index 90477fe69d0c..59a92d5870b5 100644
--- a/drivers/net/ethernet/ibm/emac/debug.h
+++ b/drivers/net/ethernet/ibm/emac/debug.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/ibm_newemac/debug.h 2 * drivers/net/ethernet/ibm/emac/debug.h
3 * 3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines. 4 * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
5 * 5 *
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
index 1568278d759a..b44bd243fb58 100644
--- a/drivers/net/ethernet/ibm/emac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/ibm_newemac/emac.h 2 * drivers/net/ethernet/ibm/emac/emac.h
3 * 3 *
4 * Register definitions for PowerPC 4xx on-chip ethernet contoller 4 * Register definitions for PowerPC 4xx on-chip ethernet contoller
5 * 5 *
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index f3c50b97ec61..479e43e2f1ef 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/ibm_newemac/mal.c 2 * drivers/net/ethernet/ibm/emac/mal.c
3 * 3 *
4 * Memory Access Layer (MAL) support 4 * Memory Access Layer (MAL) support
5 * 5 *
diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h
index d06f985bda32..e431a32e3d69 100644
--- a/drivers/net/ethernet/ibm/emac/mal.h
+++ b/drivers/net/ethernet/ibm/emac/mal.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/ibm_newemac/mal.h 2 * drivers/net/ethernet/ibm/emac/mal.h
3 * 3 *
4 * Memory Access Layer (MAL) support 4 * Memory Access Layer (MAL) support
5 * 5 *
diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c
index ab4e5969fe65..d3b9d103353e 100644
--- a/drivers/net/ethernet/ibm/emac/phy.c
+++ b/drivers/net/ethernet/ibm/emac/phy.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/ibm_newemac/phy.c 2 * drivers/net/ethernet/ibm/emac/phy.c
3 * 3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, PHY support. 4 * Driver for PowerPC 4xx on-chip ethernet controller, PHY support.
5 * Borrowed from sungem_phy.c, though I only kept the generic MII 5 * Borrowed from sungem_phy.c, though I only kept the generic MII
diff --git a/drivers/net/ethernet/ibm/emac/phy.h b/drivers/net/ethernet/ibm/emac/phy.h
index 5d2bf4cbe50b..d7e41ec37467 100644
--- a/drivers/net/ethernet/ibm/emac/phy.h
+++ b/drivers/net/ethernet/ibm/emac/phy.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/ibm_newemac/phy.h 2 * drivers/net/ethernet/ibm/emac/phy.h
3 * 3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, PHY support 4 * Driver for PowerPC 4xx on-chip ethernet controller, PHY support
5 * 5 *
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index 4fa53f3def64..d3123282e18e 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/ibm_newemac/rgmii.c 2 * drivers/net/ethernet/ibm/emac/rgmii.c
3 * 3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support. 4 * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support.
5 * 5 *
@@ -237,11 +237,8 @@ static int __devinit rgmii_probe(struct platform_device *ofdev)
237 237
238 rc = -ENOMEM; 238 rc = -ENOMEM;
239 dev = kzalloc(sizeof(struct rgmii_instance), GFP_KERNEL); 239 dev = kzalloc(sizeof(struct rgmii_instance), GFP_KERNEL);
240 if (dev == NULL) { 240 if (dev == NULL)
241 printk(KERN_ERR "%s: could not allocate RGMII device!\n",
242 np->full_name);
243 goto err_gone; 241 goto err_gone;
244 }
245 242
246 mutex_init(&dev->lock); 243 mutex_init(&dev->lock);
247 dev->ofdev = ofdev; 244 dev->ofdev = ofdev;
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.h b/drivers/net/ethernet/ibm/emac/rgmii.h
index 9296b6c5f920..668bceeff4a2 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.h
+++ b/drivers/net/ethernet/ibm/emac/rgmii.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/ibm_newemac/rgmii.h 2 * drivers/net/ethernet/ibm/emac/rgmii.h
3 * 3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support. 4 * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support.
5 * 5 *
diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c
index 5f51bf7c9dc5..872912ef518d 100644
--- a/drivers/net/ethernet/ibm/emac/tah.c
+++ b/drivers/net/ethernet/ibm/emac/tah.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/ibm_newemac/tah.c 2 * drivers/net/ethernet/ibm/emac/tah.c
3 * 3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, TAH support. 4 * Driver for PowerPC 4xx on-chip ethernet controller, TAH support.
5 * 5 *
@@ -96,11 +96,8 @@ static int __devinit tah_probe(struct platform_device *ofdev)
96 96
97 rc = -ENOMEM; 97 rc = -ENOMEM;
98 dev = kzalloc(sizeof(struct tah_instance), GFP_KERNEL); 98 dev = kzalloc(sizeof(struct tah_instance), GFP_KERNEL);
99 if (dev == NULL) { 99 if (dev == NULL)
100 printk(KERN_ERR "%s: could not allocate TAH device!\n",
101 np->full_name);
102 goto err_gone; 100 goto err_gone;
103 }
104 101
105 mutex_init(&dev->lock); 102 mutex_init(&dev->lock);
106 dev->ofdev = ofdev; 103 dev->ofdev = ofdev;
diff --git a/drivers/net/ethernet/ibm/emac/tah.h b/drivers/net/ethernet/ibm/emac/tah.h
index 3437ab4964c7..350b7096a041 100644
--- a/drivers/net/ethernet/ibm/emac/tah.h
+++ b/drivers/net/ethernet/ibm/emac/tah.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/ibm_newemac/tah.h 2 * drivers/net/ethernet/ibm/emac/tah.h
3 * 3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, TAH support. 4 * Driver for PowerPC 4xx on-chip ethernet controller, TAH support.
5 * 5 *
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 97449e786d61..415e9b4d5408 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/ibm_newemac/zmii.c 2 * drivers/net/ethernet/ibm/emac/zmii.c
3 * 3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support. 4 * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support.
5 * 5 *
@@ -240,11 +240,8 @@ static int __devinit zmii_probe(struct platform_device *ofdev)
240 240
241 rc = -ENOMEM; 241 rc = -ENOMEM;
242 dev = kzalloc(sizeof(struct zmii_instance), GFP_KERNEL); 242 dev = kzalloc(sizeof(struct zmii_instance), GFP_KERNEL);
243 if (dev == NULL) { 243 if (dev == NULL)
244 printk(KERN_ERR "%s: could not allocate ZMII device!\n",
245 np->full_name);
246 goto err_gone; 244 goto err_gone;
247 }
248 245
249 mutex_init(&dev->lock); 246 mutex_init(&dev->lock);
250 dev->ofdev = ofdev; 247 dev->ofdev = ofdev;
diff --git a/drivers/net/ethernet/ibm/emac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h
index ceaed823a83c..455bfb085493 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.h
+++ b/drivers/net/ethernet/ibm/emac/zmii.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/ibm_newemac/zmii.h 2 * drivers/net/ethernet/ibm/emac/zmii.h
3 * 3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support. 4 * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support.
5 * 5 *
diff --git a/drivers/net/ethernet/ibm/iseries_veth.c b/drivers/net/ethernet/ibm/iseries_veth.c
index acc31af6594a..1cafa6562a06 100644
--- a/drivers/net/ethernet/ibm/iseries_veth.c
+++ b/drivers/net/ethernet/ibm/iseries_veth.c
@@ -1032,10 +1032,8 @@ static struct net_device *veth_probe_one(int vlan,
1032 } 1032 }
1033 1033
1034 dev = alloc_etherdev(sizeof (struct veth_port)); 1034 dev = alloc_etherdev(sizeof (struct veth_port));
1035 if (! dev) { 1035 if (!dev)
1036 veth_error("Unable to allocate net_device structure!\n");
1037 return NULL; 1036 return NULL;
1038 }
1039 1037
1040 port = netdev_priv(dev); 1038 port = netdev_priv(dev);
1041 1039
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 075451d0207d..3c636f16a3cb 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -2233,7 +2233,6 @@ static int __devinit ipg_probe(struct pci_dev *pdev,
2233 */ 2233 */
2234 dev = alloc_etherdev(sizeof(struct ipg_nic_private)); 2234 dev = alloc_etherdev(sizeof(struct ipg_nic_private));
2235 if (!dev) { 2235 if (!dev) {
2236 pr_err("%s: alloc_etherdev failed\n", pci_name(pdev));
2237 rc = -ENOMEM; 2236 rc = -ENOMEM;
2238 goto err_disable_0; 2237 goto err_disable_0;
2239 } 2238 }
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 9436397e5725..485ab8cdac48 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2751,11 +2751,8 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2751 struct nic *nic; 2751 struct nic *nic;
2752 int err; 2752 int err;
2753 2753
2754 if (!(netdev = alloc_etherdev(sizeof(struct nic)))) { 2754 if (!(netdev = alloc_etherdev(sizeof(struct nic))))
2755 if (((1 << debug) - 1) & NETIF_MSG_PROBE)
2756 pr_err("Etherdev alloc failed, aborting\n");
2757 return -ENOMEM; 2755 return -ENOMEM;
2758 }
2759 2756
2760 netdev->netdev_ops = &e100_netdev_ops; 2757 netdev->netdev_ops = &e100_netdev_ops;
2761 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops); 2758 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 669ca3800c01..363fd395c75b 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -730,10 +730,8 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
730 eeprom.offset = 0; 730 eeprom.offset = 0;
731 731
732 data = kmalloc(eeprom.len, GFP_KERNEL); 732 data = kmalloc(eeprom.len, GFP_KERNEL);
733 if (!data) { 733 if (!data)
734 pr_err("Unable to allocate memory to dump EEPROM data\n");
735 return; 734 return;
736 }
737 735
738 ops->get_eeprom(netdev, &eeprom, data); 736 ops->get_eeprom(netdev, &eeprom, data);
739 737
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index e1159e54334a..82a5d87c90c0 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -667,8 +667,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
667 udelay(1); 667 udelay(1);
668 668
669 if (hw->phy.autoneg_wait_to_complete) { 669 if (hw->phy.autoneg_wait_to_complete) {
670 e_dbg("Waiting for forced speed/duplex link " 670 e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n");
671 "on GG82563 phy.\n");
672 671
673 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 672 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
674 100000, &link); 673 100000, &link);
@@ -1502,8 +1501,7 @@ const struct e1000_info e1000_es2_info = {
1502 | FLAG_RX_NEEDS_RESTART /* errata */ 1501 | FLAG_RX_NEEDS_RESTART /* errata */
1503 | FLAG_TARC_SET_BIT_ZERO /* errata */ 1502 | FLAG_TARC_SET_BIT_ZERO /* errata */
1504 | FLAG_APME_CHECK_PORT_B 1503 | FLAG_APME_CHECK_PORT_B
1505 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ 1504 | FLAG_DISABLE_FC_PAUSE_TIME, /* errata */
1506 | FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
1507 .flags2 = FLAG2_DMA_BURST, 1505 .flags2 = FLAG2_DMA_BURST,
1508 .pba = 38, 1506 .pba = 38,
1509 .max_hw_frame_size = DEFAULT_JUMBO, 1507 .max_hw_frame_size = DEFAULT_JUMBO,
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index a3e65fd26e09..844907da8aaf 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -1227,6 +1227,10 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
1227 case e1000_82572: 1227 case e1000_82572:
1228 reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); 1228 reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
1229 break; 1229 break;
1230 case e1000_82574:
1231 case e1000_82583:
1232 reg |= (1 << 26);
1233 break;
1230 default: 1234 default:
1231 break; 1235 break;
1232 } 1236 }
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index 948c05db5d68..591b71324505 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel PRO/1000 Linux driver 3# Intel PRO/1000 Linux driver
4# Copyright(c) 1999 - 2011 Intel Corporation. 4# Copyright(c) 1999 - 2012 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
@@ -33,5 +33,6 @@
33obj-$(CONFIG_E1000E) += e1000e.o 33obj-$(CONFIG_E1000E) += e1000e.o
34 34
35e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \ 35e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \
36 lib.o phy.o param.o ethtool.o netdev.o 36 mac.o manage.o nvm.o phy.o \
37 param.o ethtool.o netdev.o
37 38
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index c516a7440bec..1af30b967a4f 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -126,6 +126,13 @@
126 E1000_RXDEXT_STATERR_CXE | \ 126 E1000_RXDEXT_STATERR_CXE | \
127 E1000_RXDEXT_STATERR_RXE) 127 E1000_RXDEXT_STATERR_RXE)
128 128
129#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
130#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
131#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
132#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
133#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
134#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
135
129#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 136#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
130 137
131/* Management Control */ 138/* Management Control */
@@ -326,6 +333,7 @@
326/* Receive Checksum Control */ 333/* Receive Checksum Control */
327#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ 334#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
328#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ 335#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
336#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
329 337
330/* Header split receive */ 338/* Header split receive */
331#define E1000_RFCTL_NFSW_DIS 0x00000040 339#define E1000_RFCTL_NFSW_DIS 0x00000040
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index f478a22ed577..45e5ae8a9fb7 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -234,6 +234,7 @@ struct e1000_buffer {
234}; 234};
235 235
236struct e1000_ring { 236struct e1000_ring {
237 struct e1000_adapter *adapter; /* back pointer to adapter */
237 void *desc; /* pointer to ring memory */ 238 void *desc; /* pointer to ring memory */
238 dma_addr_t dma; /* phys address of ring */ 239 dma_addr_t dma; /* phys address of ring */
239 unsigned int size; /* length of ring in bytes */ 240 unsigned int size; /* length of ring in bytes */
@@ -242,8 +243,8 @@ struct e1000_ring {
242 u16 next_to_use; 243 u16 next_to_use;
243 u16 next_to_clean; 244 u16 next_to_clean;
244 245
245 u16 head; 246 void __iomem *head;
246 u16 tail; 247 void __iomem *tail;
247 248
248 /* array of buffer information structs */ 249 /* array of buffer information structs */
249 struct e1000_buffer *buffer_info; 250 struct e1000_buffer *buffer_info;
@@ -251,7 +252,7 @@ struct e1000_ring {
251 char name[IFNAMSIZ + 5]; 252 char name[IFNAMSIZ + 5];
252 u32 ims_val; 253 u32 ims_val;
253 u32 itr_val; 254 u32 itr_val;
254 u16 itr_register; 255 void __iomem *itr_register;
255 int set_itr; 256 int set_itr;
256 257
257 struct sk_buff *rx_skb_top; 258 struct sk_buff *rx_skb_top;
@@ -334,11 +335,10 @@ struct e1000_adapter {
334 /* 335 /*
335 * Rx 336 * Rx
336 */ 337 */
337 bool (*clean_rx) (struct e1000_adapter *adapter, 338 bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
338 int *work_done, int work_to_do) 339 int work_to_do) ____cacheline_aligned_in_smp;
339 ____cacheline_aligned_in_smp; 340 void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count,
340 void (*alloc_rx_buf) (struct e1000_adapter *adapter, 341 gfp_t gfp);
341 int cleaned_count, gfp_t gfp);
342 struct e1000_ring *rx_ring; 342 struct e1000_ring *rx_ring;
343 343
344 u32 rx_int_delay; 344 u32 rx_int_delay;
@@ -398,6 +398,9 @@ struct e1000_adapter {
398 398
399 bool idle_check; 399 bool idle_check;
400 int phy_hang_count; 400 int phy_hang_count;
401
402 u16 tx_ring_count;
403 u16 rx_ring_count;
401}; 404};
402 405
403struct e1000_info { 406struct e1000_info {
@@ -417,7 +420,7 @@ struct e1000_info {
417#define FLAG_HAS_FLASH (1 << 1) 420#define FLAG_HAS_FLASH (1 << 1)
418#define FLAG_HAS_HW_VLAN_FILTER (1 << 2) 421#define FLAG_HAS_HW_VLAN_FILTER (1 << 2)
419#define FLAG_HAS_WOL (1 << 3) 422#define FLAG_HAS_WOL (1 << 3)
420#define FLAG_HAS_ERT (1 << 4) 423/* reserved bit4 */
421#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) 424#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
422#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) 425#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
423#define FLAG_HAS_JUMBO_FRAMES (1 << 7) 426#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
@@ -427,7 +430,7 @@ struct e1000_info {
427#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) 430#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
428#define FLAG_IS_QUAD_PORT_A (1 << 12) 431#define FLAG_IS_QUAD_PORT_A (1 << 12)
429#define FLAG_IS_QUAD_PORT (1 << 13) 432#define FLAG_IS_QUAD_PORT (1 << 13)
430#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN (1 << 14) 433/* reserved bit14 */
431#define FLAG_APME_IN_WUC (1 << 15) 434#define FLAG_APME_IN_WUC (1 << 15)
432#define FLAG_APME_IN_CTRL3 (1 << 16) 435#define FLAG_APME_IN_CTRL3 (1 << 16)
433#define FLAG_APME_CHECK_PORT_B (1 << 17) 436#define FLAG_APME_CHECK_PORT_B (1 << 17)
@@ -492,10 +495,10 @@ extern void e1000e_down(struct e1000_adapter *adapter);
492extern void e1000e_reinit_locked(struct e1000_adapter *adapter); 495extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
493extern void e1000e_reset(struct e1000_adapter *adapter); 496extern void e1000e_reset(struct e1000_adapter *adapter);
494extern void e1000e_power_up_phy(struct e1000_adapter *adapter); 497extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
495extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter); 498extern int e1000e_setup_rx_resources(struct e1000_ring *ring);
496extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); 499extern int e1000e_setup_tx_resources(struct e1000_ring *ring);
497extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); 500extern void e1000e_free_rx_resources(struct e1000_ring *ring);
498extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); 501extern void e1000e_free_tx_resources(struct e1000_ring *ring);
499extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, 502extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
500 struct rtnl_link_stats64 503 struct rtnl_link_stats64
501 *stats); 504 *stats);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index fb2c28e799a2..92d5b6278955 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -34,6 +34,7 @@
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/vmalloc.h>
37 38
38#include "e1000.h" 39#include "e1000.h"
39 40
@@ -605,94 +606,112 @@ static void e1000_get_ringparam(struct net_device *netdev,
605 struct ethtool_ringparam *ring) 606 struct ethtool_ringparam *ring)
606{ 607{
607 struct e1000_adapter *adapter = netdev_priv(netdev); 608 struct e1000_adapter *adapter = netdev_priv(netdev);
608 struct e1000_ring *tx_ring = adapter->tx_ring;
609 struct e1000_ring *rx_ring = adapter->rx_ring;
610 609
611 ring->rx_max_pending = E1000_MAX_RXD; 610 ring->rx_max_pending = E1000_MAX_RXD;
612 ring->tx_max_pending = E1000_MAX_TXD; 611 ring->tx_max_pending = E1000_MAX_TXD;
613 ring->rx_pending = rx_ring->count; 612 ring->rx_pending = adapter->rx_ring_count;
614 ring->tx_pending = tx_ring->count; 613 ring->tx_pending = adapter->tx_ring_count;
615} 614}
616 615
617static int e1000_set_ringparam(struct net_device *netdev, 616static int e1000_set_ringparam(struct net_device *netdev,
618 struct ethtool_ringparam *ring) 617 struct ethtool_ringparam *ring)
619{ 618{
620 struct e1000_adapter *adapter = netdev_priv(netdev); 619 struct e1000_adapter *adapter = netdev_priv(netdev);
621 struct e1000_ring *tx_ring, *tx_old; 620 struct e1000_ring *temp_tx = NULL, *temp_rx = NULL;
622 struct e1000_ring *rx_ring, *rx_old; 621 int err = 0, size = sizeof(struct e1000_ring);
623 int err; 622 bool set_tx = false, set_rx = false;
623 u16 new_rx_count, new_tx_count;
624 624
625 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 625 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
626 return -EINVAL; 626 return -EINVAL;
627 627
628 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 628 new_rx_count = clamp_t(u32, ring->rx_pending, E1000_MIN_RXD,
629 usleep_range(1000, 2000); 629 E1000_MAX_RXD);
630 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
630 631
631 if (netif_running(adapter->netdev)) 632 new_tx_count = clamp_t(u32, ring->tx_pending, E1000_MIN_TXD,
632 e1000e_down(adapter); 633 E1000_MAX_TXD);
634 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
633 635
634 tx_old = adapter->tx_ring; 636 if ((new_tx_count == adapter->tx_ring_count) &&
635 rx_old = adapter->rx_ring; 637 (new_rx_count == adapter->rx_ring_count))
638 /* nothing to do */
639 return 0;
636 640
637 err = -ENOMEM; 641 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
638 tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL); 642 usleep_range(1000, 2000);
639 if (!tx_ring)
640 goto err_alloc_tx;
641 643
642 rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL); 644 if (!netif_running(adapter->netdev)) {
643 if (!rx_ring) 645 /* Set counts now and allocate resources during open() */
644 goto err_alloc_rx; 646 adapter->tx_ring->count = new_tx_count;
647 adapter->rx_ring->count = new_rx_count;
648 adapter->tx_ring_count = new_tx_count;
649 adapter->rx_ring_count = new_rx_count;
650 goto clear_reset;
651 }
645 652
646 adapter->tx_ring = tx_ring; 653 set_tx = (new_tx_count != adapter->tx_ring_count);
647 adapter->rx_ring = rx_ring; 654 set_rx = (new_rx_count != adapter->rx_ring_count);
648 655
649 rx_ring->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); 656 /* Allocate temporary storage for ring updates */
650 rx_ring->count = min(rx_ring->count, (u32)(E1000_MAX_RXD)); 657 if (set_tx) {
651 rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE); 658 temp_tx = vmalloc(size);
659 if (!temp_tx) {
660 err = -ENOMEM;
661 goto free_temp;
662 }
663 }
664 if (set_rx) {
665 temp_rx = vmalloc(size);
666 if (!temp_rx) {
667 err = -ENOMEM;
668 goto free_temp;
669 }
670 }
652 671
653 tx_ring->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); 672 e1000e_down(adapter);
654 tx_ring->count = min(tx_ring->count, (u32)(E1000_MAX_TXD));
655 tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE);
656 673
657 if (netif_running(adapter->netdev)) { 674 /*
658 /* Try to get new resources before deleting old */ 675 * We can't just free everything and then setup again, because the
659 err = e1000e_setup_rx_resources(adapter); 676 * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring
677 * structs. First, attempt to allocate new resources...
678 */
679 if (set_tx) {
680 memcpy(temp_tx, adapter->tx_ring, size);
681 temp_tx->count = new_tx_count;
682 err = e1000e_setup_tx_resources(temp_tx);
660 if (err) 683 if (err)
661 goto err_setup_rx; 684 goto err_setup;
662 err = e1000e_setup_tx_resources(adapter); 685 }
686 if (set_rx) {
687 memcpy(temp_rx, adapter->rx_ring, size);
688 temp_rx->count = new_rx_count;
689 err = e1000e_setup_rx_resources(temp_rx);
663 if (err) 690 if (err)
664 goto err_setup_tx; 691 goto err_setup_rx;
692 }
665 693
666 /* 694 /* ...then free the old resources and copy back any new ring data */
667 * restore the old in order to free it, 695 if (set_tx) {
668 * then add in the new 696 e1000e_free_tx_resources(adapter->tx_ring);
669 */ 697 memcpy(adapter->tx_ring, temp_tx, size);
670 adapter->rx_ring = rx_old; 698 adapter->tx_ring_count = new_tx_count;
671 adapter->tx_ring = tx_old; 699 }
672 e1000e_free_rx_resources(adapter); 700 if (set_rx) {
673 e1000e_free_tx_resources(adapter); 701 e1000e_free_rx_resources(adapter->rx_ring);
674 kfree(tx_old); 702 memcpy(adapter->rx_ring, temp_rx, size);
675 kfree(rx_old); 703 adapter->rx_ring_count = new_rx_count;
676 adapter->rx_ring = rx_ring;
677 adapter->tx_ring = tx_ring;
678 err = e1000e_up(adapter);
679 if (err)
680 goto err_setup;
681 } 704 }
682 705
683 clear_bit(__E1000_RESETTING, &adapter->state);
684 return 0;
685err_setup_tx:
686 e1000e_free_rx_resources(adapter);
687err_setup_rx: 706err_setup_rx:
688 adapter->rx_ring = rx_old; 707 if (err && set_tx)
689 adapter->tx_ring = tx_old; 708 e1000e_free_tx_resources(temp_tx);
690 kfree(rx_ring);
691err_alloc_rx:
692 kfree(tx_ring);
693err_alloc_tx:
694 e1000e_up(adapter);
695err_setup: 709err_setup:
710 e1000e_up(adapter);
711free_temp:
712 vfree(temp_tx);
713 vfree(temp_rx);
714clear_reset:
696 clear_bit(__E1000_RESETTING, &adapter->state); 715 clear_bit(__E1000_RESETTING, &adapter->state);
697 return err; 716 return err;
698} 717}
@@ -1955,6 +1974,53 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
1955 } 1974 }
1956} 1975}
1957 1976
1977static int e1000_get_rxnfc(struct net_device *netdev,
1978 struct ethtool_rxnfc *info, u32 *rule_locs)
1979{
1980 info->data = 0;
1981
1982 switch (info->cmd) {
1983 case ETHTOOL_GRXFH: {
1984 struct e1000_adapter *adapter = netdev_priv(netdev);
1985 struct e1000_hw *hw = &adapter->hw;
1986 u32 mrqc = er32(MRQC);
1987
1988 if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
1989 return 0;
1990
1991 switch (info->flow_type) {
1992 case TCP_V4_FLOW:
1993 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1994 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1995 /* fall through */
1996 case UDP_V4_FLOW:
1997 case SCTP_V4_FLOW:
1998 case AH_ESP_V4_FLOW:
1999 case IPV4_FLOW:
2000 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
2001 info->data |= RXH_IP_SRC | RXH_IP_DST;
2002 break;
2003 case TCP_V6_FLOW:
2004 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
2005 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2006 /* fall through */
2007 case UDP_V6_FLOW:
2008 case SCTP_V6_FLOW:
2009 case AH_ESP_V6_FLOW:
2010 case IPV6_FLOW:
2011 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
2012 info->data |= RXH_IP_SRC | RXH_IP_DST;
2013 break;
2014 default:
2015 break;
2016 }
2017 return 0;
2018 }
2019 default:
2020 return -EOPNOTSUPP;
2021 }
2022}
2023
1958static const struct ethtool_ops e1000_ethtool_ops = { 2024static const struct ethtool_ops e1000_ethtool_ops = {
1959 .get_settings = e1000_get_settings, 2025 .get_settings = e1000_get_settings,
1960 .set_settings = e1000_set_settings, 2026 .set_settings = e1000_set_settings,
@@ -1981,6 +2047,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
1981 .get_sset_count = e1000e_get_sset_count, 2047 .get_sset_count = e1000e_get_sset_count,
1982 .get_coalesce = e1000_get_coalesce, 2048 .get_coalesce = e1000_get_coalesce,
1983 .set_coalesce = e1000_set_coalesce, 2049 .set_coalesce = e1000_set_coalesce,
2050 .get_rxnfc = e1000_get_rxnfc,
1984}; 2051};
1985 2052
1986void e1000e_set_ethtool_ops(struct net_device *netdev) 2053void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 29670397079b..197059bb9abf 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -204,6 +204,7 @@ enum e1e_registers {
204 E1000_WUC = 0x05800, /* Wakeup Control - RW */ 204 E1000_WUC = 0x05800, /* Wakeup Control - RW */
205 E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */ 205 E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */
206 E1000_WUS = 0x05810, /* Wakeup Status - RO */ 206 E1000_WUS = 0x05810, /* Wakeup Status - RO */
207 E1000_MRQC = 0x05818, /* Multiple Receive Control - RW */
207 E1000_MANC = 0x05820, /* Management Control - RW */ 208 E1000_MANC = 0x05820, /* Management Control - RW */
208 E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */ 209 E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */
209 E1000_HOST_IF = 0x08800, /* Host Interface */ 210 E1000_HOST_IF = 0x08800, /* Host Interface */
@@ -219,6 +220,10 @@ enum e1e_registers {
219 E1000_SWSM = 0x05B50, /* SW Semaphore */ 220 E1000_SWSM = 0x05B50, /* SW Semaphore */
220 E1000_FWSM = 0x05B54, /* FW Semaphore */ 221 E1000_FWSM = 0x05B54, /* FW Semaphore */
221 E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */ 222 E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
223 E1000_RETA_BASE = 0x05C00, /* Redirection Table - RW */
224#define E1000_RETA(_n) (E1000_RETA_BASE + ((_n) * 4))
225 E1000_RSSRK_BASE = 0x05C80, /* RSS Random Key - RW */
226#define E1000_RSSRK(_n) (E1000_RSSRK_BASE + ((_n) * 4))
222 E1000_FFLT_DBG = 0x05F04, /* Debug Register */ 227 E1000_FFLT_DBG = 0x05F04, /* Debug Register */
223 E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */ 228 E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */
224#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4)) 229#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4))
@@ -964,8 +969,8 @@ struct e1000_dev_spec_ich8lan {
964struct e1000_hw { 969struct e1000_hw {
965 struct e1000_adapter *adapter; 970 struct e1000_adapter *adapter;
966 971
967 u8 __iomem *hw_addr; 972 void __iomem *hw_addr;
968 u8 __iomem *flash_address; 973 void __iomem *flash_address;
969 974
970 struct e1000_mac_info mac; 975 struct e1000_mac_info mac;
971 struct e1000_fc_info fc; 976 struct e1000_fc_info fc;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index e2a80a283fd3..907b17b2e66a 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -145,6 +145,8 @@
145#define I82579_EMI_ADDR 0x10 145#define I82579_EMI_ADDR 0x10
146#define I82579_EMI_DATA 0x11 146#define I82579_EMI_DATA 0x11
147#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ 147#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
148#define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */
149#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
148 150
149/* Strapping Option Register - RO */ 151/* Strapping Option Register - RO */
150#define E1000_STRAP 0x0000C 152#define E1000_STRAP 0x0000C
@@ -304,7 +306,6 @@ static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
304static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) 306static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
305{ 307{
306 struct e1000_phy_info *phy = &hw->phy; 308 struct e1000_phy_info *phy = &hw->phy;
307 u32 fwsm;
308 s32 ret_val = 0; 309 s32 ret_val = 0;
309 310
310 phy->addr = 1; 311 phy->addr = 1;
@@ -323,14 +324,14 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
323 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 324 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
324 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 325 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
325 326
326 /* 327 if (!e1000_check_reset_block(hw)) {
327 * The MAC-PHY interconnect may still be in SMBus mode 328 u32 fwsm = er32(FWSM);
328 * after Sx->S0. If the manageability engine (ME) is 329
329 * disabled, then toggle the LANPHYPC Value bit to force 330 /*
330 * the interconnect to PCIe mode. 331 * The MAC-PHY interconnect may still be in SMBus mode after
331 */ 332 * Sx->S0. If resetting the PHY is not blocked, toggle the
332 fwsm = er32(FWSM); 333 * LANPHYPC Value bit to force the interconnect to PCIe mode.
333 if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) { 334 */
334 e1000_toggle_lanphypc_value_ich8lan(hw); 335 e1000_toggle_lanphypc_value_ich8lan(hw);
335 msleep(50); 336 msleep(50);
336 337
@@ -338,25 +339,26 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
338 * Gate automatic PHY configuration by hardware on 339 * Gate automatic PHY configuration by hardware on
339 * non-managed 82579 340 * non-managed 82579
340 */ 341 */
341 if (hw->mac.type == e1000_pch2lan) 342 if ((hw->mac.type == e1000_pch2lan) &&
343 !(fwsm & E1000_ICH_FWSM_FW_VALID))
342 e1000_gate_hw_phy_config_ich8lan(hw, true); 344 e1000_gate_hw_phy_config_ich8lan(hw, true);
343 }
344 345
345 /* 346 /*
346 * Reset the PHY before any access to it. Doing so, ensures that 347 * Reset the PHY before any access to it. Doing so, ensures
347 * the PHY is in a known good state before we read/write PHY registers. 348 * that the PHY is in a known good state before we read/write
348 * The generic reset is sufficient here, because we haven't determined 349 * PHY registers. The generic reset is sufficient here,
349 * the PHY type yet. 350 * because we haven't determined the PHY type yet.
350 */ 351 */
351 ret_val = e1000e_phy_hw_reset_generic(hw); 352 ret_val = e1000e_phy_hw_reset_generic(hw);
352 if (ret_val) 353 if (ret_val)
353 goto out; 354 goto out;
354 355
355 /* Ungate automatic PHY configuration on non-managed 82579 */ 356 /* Ungate automatic PHY configuration on non-managed 82579 */
356 if ((hw->mac.type == e1000_pch2lan) && 357 if ((hw->mac.type == e1000_pch2lan) &&
357 !(fwsm & E1000_ICH_FWSM_FW_VALID)) { 358 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
358 usleep_range(10000, 20000); 359 usleep_range(10000, 20000);
359 e1000_gate_hw_phy_config_ich8lan(hw, false); 360 e1000_gate_hw_phy_config_ich8lan(hw, false);
361 }
360 } 362 }
361 363
362 phy->id = e1000_phy_unknown; 364 phy->id = e1000_phy_unknown;
@@ -900,8 +902,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
900 } 902 }
901 903
902 if (!timeout) { 904 if (!timeout) {
903 e_dbg("Failed to acquire the semaphore, FW or HW has it: " 905 e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
904 "FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
905 er32(FWSM), extcnf_ctrl); 906 er32(FWSM), extcnf_ctrl);
906 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 907 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
907 ew32(EXTCNF_CTRL, extcnf_ctrl); 908 ew32(EXTCNF_CTRL, extcnf_ctrl);
@@ -1669,6 +1670,26 @@ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1669 /* Set MDIO slow mode before any other MDIO access */ 1670 /* Set MDIO slow mode before any other MDIO access */
1670 ret_val = e1000_set_mdio_slow_mode_hv(hw); 1671 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1671 1672
1673 ret_val = hw->phy.ops.acquire(hw);
1674 if (ret_val)
1675 goto out;
1676 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1677 I82579_MSE_THRESHOLD);
1678 if (ret_val)
1679 goto release;
1680 /* set MSE higher to enable link to stay up when noise is high */
1681 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0034);
1682 if (ret_val)
1683 goto release;
1684 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1685 I82579_MSE_LINK_DOWN);
1686 if (ret_val)
1687 goto release;
1688 /* drop link after 5 times MSE threshold was reached */
1689 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0005);
1690release:
1691 hw->phy.ops.release(hw);
1692
1672out: 1693out:
1673 return ret_val; 1694 return ret_val;
1674} 1695}
@@ -1899,7 +1920,9 @@ static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
1899 else 1920 else
1900 oem_reg &= ~HV_OEM_BITS_LPLU; 1921 oem_reg &= ~HV_OEM_BITS_LPLU;
1901 1922
1902 oem_reg |= HV_OEM_BITS_RESTART_AN; 1923 if (!e1000_check_reset_block(hw))
1924 oem_reg |= HV_OEM_BITS_RESTART_AN;
1925
1903 ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg); 1926 ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg);
1904 1927
1905out: 1928out:
@@ -2108,8 +2131,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2108 2131
2109 return 0; 2132 return 0;
2110 } 2133 }
2111 e_dbg("Unable to determine valid NVM bank via EEC - " 2134 e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n");
2112 "reading flash signature\n");
2113 /* fall-thru */ 2135 /* fall-thru */
2114 default: 2136 default:
2115 /* set bank to 0 in case flash read fails */ 2137 /* set bank to 0 in case flash read fails */
@@ -2221,8 +2243,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2221 2243
2222 /* Check if the flash descriptor is valid */ 2244 /* Check if the flash descriptor is valid */
2223 if (hsfsts.hsf_status.fldesvalid == 0) { 2245 if (hsfsts.hsf_status.fldesvalid == 0) {
2224 e_dbg("Flash descriptor invalid. " 2246 e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n");
2225 "SW Sequencing must be used.\n");
2226 return -E1000_ERR_NVM; 2247 return -E1000_ERR_NVM;
2227 } 2248 }
2228 2249
@@ -2258,7 +2279,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2258 * cycle has a chance to end before giving up. 2279 * cycle has a chance to end before giving up.
2259 */ 2280 */
2260 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { 2281 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2261 hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS); 2282 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2262 if (hsfsts.hsf_status.flcinprog == 0) { 2283 if (hsfsts.hsf_status.flcinprog == 0) {
2263 ret_val = 0; 2284 ret_val = 0;
2264 break; 2285 break;
@@ -2422,8 +2443,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2422 /* Repeat for some time before giving up. */ 2443 /* Repeat for some time before giving up. */
2423 continue; 2444 continue;
2424 } else if (hsfsts.hsf_status.flcdone == 0) { 2445 } else if (hsfsts.hsf_status.flcdone == 0) {
2425 e_dbg("Timeout error - flash cycle " 2446 e_dbg("Timeout error - flash cycle did not complete.\n");
2426 "did not complete.\n");
2427 break; 2447 break;
2428 } 2448 }
2429 } 2449 }
@@ -2774,8 +2794,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2774 /* Repeat for some time before giving up. */ 2794 /* Repeat for some time before giving up. */
2775 continue; 2795 continue;
2776 if (hsfsts.hsf_status.flcdone == 0) { 2796 if (hsfsts.hsf_status.flcdone == 0) {
2777 e_dbg("Timeout error - flash cycle " 2797 e_dbg("Timeout error - flash cycle did not complete.\n");
2778 "did not complete.");
2779 break; 2798 break;
2780 } 2799 }
2781 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 2800 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
@@ -3676,9 +3695,10 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3676 * 3695 *
3677 * During S0 to Sx transition, it is possible the link remains at gig 3696 * During S0 to Sx transition, it is possible the link remains at gig
3678 * instead of negotiating to a lower speed. Before going to Sx, set 3697 * instead of negotiating to a lower speed. Before going to Sx, set
3679 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation 3698 * 'Gig Disable' to force link speed negotiation to a lower speed based on
3680 * to a lower speed. For PCH and newer parts, the OEM bits PHY register 3699 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
3681 * (LED, GbE disable and LPLU configurations) also needs to be written. 3700 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
3701 * needs to be written.
3682 **/ 3702 **/
3683void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) 3703void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3684{ 3704{
@@ -3686,7 +3706,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3686 s32 ret_val; 3706 s32 ret_val;
3687 3707
3688 phy_ctrl = er32(PHY_CTRL); 3708 phy_ctrl = er32(PHY_CTRL);
3689 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; 3709 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
3690 ew32(PHY_CTRL, phy_ctrl); 3710 ew32(PHY_CTRL, phy_ctrl);
3691 3711
3692 if (hw->mac.type == e1000_ich8lan) 3712 if (hw->mac.type == e1000_ich8lan)
@@ -3714,42 +3734,37 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3714 **/ 3734 **/
3715void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) 3735void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
3716{ 3736{
3717 u32 fwsm; 3737 u16 phy_id1, phy_id2;
3738 s32 ret_val;
3718 3739
3719 if (hw->mac.type != e1000_pch2lan) 3740 if ((hw->mac.type != e1000_pch2lan) || e1000_check_reset_block(hw))
3720 return; 3741 return;
3721 3742
3722 fwsm = er32(FWSM); 3743 ret_val = hw->phy.ops.acquire(hw);
3723 if (!(fwsm & E1000_ICH_FWSM_FW_VALID) || !e1000_check_reset_block(hw)) { 3744 if (ret_val) {
3724 u16 phy_id1, phy_id2; 3745 e_dbg("Failed to acquire PHY semaphore in resume\n");
3725 s32 ret_val; 3746 return;
3726 3747 }
3727 ret_val = hw->phy.ops.acquire(hw);
3728 if (ret_val) {
3729 e_dbg("Failed to acquire PHY semaphore in resume\n");
3730 return;
3731 }
3732 3748
3733 /* Test access to the PHY registers by reading the ID regs */ 3749 /* Test access to the PHY registers by reading the ID regs */
3734 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1); 3750 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1);
3735 if (ret_val) 3751 if (ret_val)
3736 goto release; 3752 goto release;
3737 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2); 3753 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2);
3738 if (ret_val) 3754 if (ret_val)
3739 goto release; 3755 goto release;
3740 3756
3741 if (hw->phy.id == ((u32)(phy_id1 << 16) | 3757 if (hw->phy.id == ((u32)(phy_id1 << 16) |
3742 (u32)(phy_id2 & PHY_REVISION_MASK))) 3758 (u32)(phy_id2 & PHY_REVISION_MASK)))
3743 goto release; 3759 goto release;
3744 3760
3745 e1000_toggle_lanphypc_value_ich8lan(hw); 3761 e1000_toggle_lanphypc_value_ich8lan(hw);
3746 3762
3747 hw->phy.ops.release(hw); 3763 hw->phy.ops.release(hw);
3748 msleep(50); 3764 msleep(50);
3749 e1000_phy_hw_reset(hw); 3765 e1000_phy_hw_reset(hw);
3750 msleep(50); 3766 msleep(50);
3751 return; 3767 return;
3752 }
3753 3768
3754release: 3769release:
3755 hw->phy.ops.release(hw); 3770 hw->phy.ops.release(hw);
@@ -4088,10 +4103,9 @@ const struct e1000_info e1000_ich9_info = {
4088 | FLAG_HAS_WOL 4103 | FLAG_HAS_WOL
4089 | FLAG_HAS_CTRLEXT_ON_LOAD 4104 | FLAG_HAS_CTRLEXT_ON_LOAD
4090 | FLAG_HAS_AMT 4105 | FLAG_HAS_AMT
4091 | FLAG_HAS_ERT
4092 | FLAG_HAS_FLASH 4106 | FLAG_HAS_FLASH
4093 | FLAG_APME_IN_WUC, 4107 | FLAG_APME_IN_WUC,
4094 .pba = 10, 4108 .pba = 18,
4095 .max_hw_frame_size = DEFAULT_JUMBO, 4109 .max_hw_frame_size = DEFAULT_JUMBO,
4096 .get_variants = e1000_get_variants_ich8lan, 4110 .get_variants = e1000_get_variants_ich8lan,
4097 .mac_ops = &ich8_mac_ops, 4111 .mac_ops = &ich8_mac_ops,
@@ -4106,10 +4120,9 @@ const struct e1000_info e1000_ich10_info = {
4106 | FLAG_HAS_WOL 4120 | FLAG_HAS_WOL
4107 | FLAG_HAS_CTRLEXT_ON_LOAD 4121 | FLAG_HAS_CTRLEXT_ON_LOAD
4108 | FLAG_HAS_AMT 4122 | FLAG_HAS_AMT
4109 | FLAG_HAS_ERT
4110 | FLAG_HAS_FLASH 4123 | FLAG_HAS_FLASH
4111 | FLAG_APME_IN_WUC, 4124 | FLAG_APME_IN_WUC,
4112 .pba = 10, 4125 .pba = 18,
4113 .max_hw_frame_size = DEFAULT_JUMBO, 4126 .max_hw_frame_size = DEFAULT_JUMBO,
4114 .get_variants = e1000_get_variants_ich8lan, 4127 .get_variants = e1000_get_variants_ich8lan,
4115 .mac_ops = &ich8_mac_ops, 4128 .mac_ops = &ich8_mac_ops,
diff --git a/drivers/net/ethernet/intel/e1000e/lib.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 0893ab107adf..e1cf1072f318 100644
--- a/drivers/net/ethernet/intel/e1000e/lib.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -28,19 +28,6 @@
28 28
29#include "e1000.h" 29#include "e1000.h"
30 30
31enum e1000_mng_mode {
32 e1000_mng_mode_none = 0,
33 e1000_mng_mode_asf,
34 e1000_mng_mode_pt,
35 e1000_mng_mode_ipmi,
36 e1000_mng_mode_host_if_only
37};
38
39#define E1000_FACTPS_MNGCG 0x20000000
40
41/* Intel(R) Active Management Technology signature */
42#define E1000_IAMT_SIGNATURE 0x544D4149
43
44/** 31/**
45 * e1000e_get_bus_info_pcie - Get PCIe bus information 32 * e1000e_get_bus_info_pcie - Get PCIe bus information
46 * @hw: pointer to the HW structure 33 * @hw: pointer to the HW structure
@@ -151,7 +138,7 @@ void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
151void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) 138void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
152{ 139{
153 u32 i; 140 u32 i;
154 u8 mac_addr[ETH_ALEN] = {0}; 141 u8 mac_addr[ETH_ALEN] = { 0 };
155 142
156 /* Setup the receive address */ 143 /* Setup the receive address */
157 e_dbg("Programming MAC Address into RAR[0]\n"); 144 e_dbg("Programming MAC Address into RAR[0]\n");
@@ -159,7 +146,7 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
159 e1000e_rar_set(hw, hw->mac.addr, 0); 146 e1000e_rar_set(hw, hw->mac.addr, 0);
160 147
161 /* Zero out the other (rar_entry_count - 1) receive addresses */ 148 /* Zero out the other (rar_entry_count - 1) receive addresses */
162 e_dbg("Clearing RAR[1-%u]\n", rar_count-1); 149 e_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
163 for (i = 1; i < rar_count; i++) 150 for (i = 1; i < rar_count; i++)
164 e1000e_rar_set(hw, mac_addr, i); 151 e1000e_rar_set(hw, mac_addr, i);
165} 152}
@@ -187,15 +174,12 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
187 if (ret_val) 174 if (ret_val)
188 goto out; 175 goto out;
189 176
190 /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ 177 /* not supported on older hardware or 82573 */
191 if (!((nvm_data & NVM_COMPAT_LOM) || 178 if ((hw->mac.type < e1000_82571) || (hw->mac.type == e1000_82573))
192 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
193 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) ||
194 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES)))
195 goto out; 179 goto out;
196 180
197 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, 181 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
198 &nvm_alt_mac_addr_offset); 182 &nvm_alt_mac_addr_offset);
199 if (ret_val) { 183 if (ret_val) {
200 e_dbg("NVM Read Error\n"); 184 e_dbg("NVM Read Error\n");
201 goto out; 185 goto out;
@@ -254,11 +238,10 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
254 * HW expects these in little endian so we reverse the byte order 238 * HW expects these in little endian so we reverse the byte order
255 * from network order (big endian) to little endian 239 * from network order (big endian) to little endian
256 */ 240 */
257 rar_low = ((u32) addr[0] | 241 rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
258 ((u32) addr[1] << 8) | 242 ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
259 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
260 243
261 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 244 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
262 245
263 /* If MAC address zero, no need to set the AV bit */ 246 /* If MAC address zero, no need to set the AV bit */
264 if (rar_low || rar_high) 247 if (rar_low || rar_high)
@@ -318,7 +301,7 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
318 * values resulting from each mc_filter_type... 301 * values resulting from each mc_filter_type...
319 * [0] [1] [2] [3] [4] [5] 302 * [0] [1] [2] [3] [4] [5]
320 * 01 AA 00 12 34 56 303 * 01 AA 00 12 34 56
321 * LSB MSB 304 * LSB MSB
322 * 305 *
323 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 306 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
324 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 307 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
@@ -341,7 +324,7 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
341 } 324 }
342 325
343 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | 326 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
344 (((u16) mc_addr[5]) << bit_shift))); 327 (((u16)mc_addr[5]) << bit_shift)));
345 328
346 return hash_value; 329 return hash_value;
347} 330}
@@ -365,7 +348,7 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
365 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 348 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
366 349
367 /* update mta_shadow from mc_addr_list */ 350 /* update mta_shadow from mc_addr_list */
368 for (i = 0; (u32) i < mc_addr_count; i++) { 351 for (i = 0; (u32)i < mc_addr_count; i++) {
369 hash_value = e1000_hash_mc_addr(hw, mc_addr_list); 352 hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
370 353
371 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); 354 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
@@ -461,7 +444,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
461 return ret_val; 444 return ret_val;
462 445
463 if (!link) 446 if (!link)
464 return ret_val; /* No link detected */ 447 return ret_val; /* No link detected */
465 448
466 mac->get_link_status = false; 449 mac->get_link_status = false;
467 450
@@ -656,12 +639,10 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
656 if (rxcw & E1000_RXCW_SYNCH) { 639 if (rxcw & E1000_RXCW_SYNCH) {
657 if (!(rxcw & E1000_RXCW_IV)) { 640 if (!(rxcw & E1000_RXCW_IV)) {
658 mac->serdes_has_link = true; 641 mac->serdes_has_link = true;
659 e_dbg("SERDES: Link up - autoneg " 642 e_dbg("SERDES: Link up - autoneg completed successfully.\n");
660 "completed successfully.\n");
661 } else { 643 } else {
662 mac->serdes_has_link = false; 644 mac->serdes_has_link = false;
663 e_dbg("SERDES: Link down - invalid" 645 e_dbg("SERDES: Link down - invalid codewords detected in autoneg.\n");
664 "codewords detected in autoneg.\n");
665 } 646 }
666 } else { 647 } else {
667 mac->serdes_has_link = false; 648 mac->serdes_has_link = false;
@@ -706,8 +687,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
706 687
707 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) 688 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
708 hw->fc.requested_mode = e1000_fc_none; 689 hw->fc.requested_mode = e1000_fc_none;
709 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 690 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
710 NVM_WORD0F_ASM_DIR)
711 hw->fc.requested_mode = e1000_fc_tx_pause; 691 hw->fc.requested_mode = e1000_fc_tx_pause;
712 else 692 else
713 hw->fc.requested_mode = e1000_fc_full; 693 hw->fc.requested_mode = e1000_fc_full;
@@ -753,8 +733,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
753 */ 733 */
754 hw->fc.current_mode = hw->fc.requested_mode; 734 hw->fc.current_mode = hw->fc.requested_mode;
755 735
756 e_dbg("After fix-ups FlowControl is now = %x\n", 736 e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
757 hw->fc.current_mode);
758 737
759 /* Call the necessary media_type subroutine to configure the link. */ 738 /* Call the necessary media_type subroutine to configure the link. */
760 ret_val = mac->ops.setup_physical_interface(hw); 739 ret_val = mac->ops.setup_physical_interface(hw);
@@ -1121,8 +1100,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1121 return ret_val; 1100 return ret_val;
1122 1101
1123 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { 1102 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
1124 e_dbg("Copper PHY and Auto Neg " 1103 e_dbg("Copper PHY and Auto Neg has not completed.\n");
1125 "has not completed.\n");
1126 return ret_val; 1104 return ret_val;
1127 } 1105 }
1128 1106
@@ -1186,11 +1164,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1186 */ 1164 */
1187 if (hw->fc.requested_mode == e1000_fc_full) { 1165 if (hw->fc.requested_mode == e1000_fc_full) {
1188 hw->fc.current_mode = e1000_fc_full; 1166 hw->fc.current_mode = e1000_fc_full;
1189 e_dbg("Flow Control = FULL.\r\n"); 1167 e_dbg("Flow Control = FULL.\n");
1190 } else { 1168 } else {
1191 hw->fc.current_mode = e1000_fc_rx_pause; 1169 hw->fc.current_mode = e1000_fc_rx_pause;
1192 e_dbg("Flow Control = " 1170 e_dbg("Flow Control = Rx PAUSE frames only.\n");
1193 "Rx PAUSE frames only.\r\n");
1194 } 1171 }
1195 } 1172 }
1196 /* 1173 /*
@@ -1202,11 +1179,11 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1202 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause 1179 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1203 */ 1180 */
1204 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && 1181 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1205 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 1182 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1206 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 1183 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1207 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 1184 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1208 hw->fc.current_mode = e1000_fc_tx_pause; 1185 hw->fc.current_mode = e1000_fc_tx_pause;
1209 e_dbg("Flow Control = Tx PAUSE frames only.\r\n"); 1186 e_dbg("Flow Control = Tx PAUSE frames only.\n");
1210 } 1187 }
1211 /* 1188 /*
1212 * For transmitting PAUSE frames ONLY. 1189 * For transmitting PAUSE frames ONLY.
@@ -1221,14 +1198,14 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1221 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 1198 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1222 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 1199 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1223 hw->fc.current_mode = e1000_fc_rx_pause; 1200 hw->fc.current_mode = e1000_fc_rx_pause;
1224 e_dbg("Flow Control = Rx PAUSE frames only.\r\n"); 1201 e_dbg("Flow Control = Rx PAUSE frames only.\n");
1225 } else { 1202 } else {
1226 /* 1203 /*
1227 * Per the IEEE spec, at this point flow control 1204 * Per the IEEE spec, at this point flow control
1228 * should be disabled. 1205 * should be disabled.
1229 */ 1206 */
1230 hw->fc.current_mode = e1000_fc_none; 1207 hw->fc.current_mode = e1000_fc_none;
1231 e_dbg("Flow Control = NONE.\r\n"); 1208 e_dbg("Flow Control = NONE.\n");
1232 } 1209 }
1233 1210
1234 /* 1211 /*
@@ -1268,7 +1245,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1268 * Read the status register for the current speed/duplex and store the current 1245 * Read the status register for the current speed/duplex and store the current
1269 * speed and duplex for copper connections. 1246 * speed and duplex for copper connections.
1270 **/ 1247 **/
1271s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex) 1248s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1249 u16 *duplex)
1272{ 1250{
1273 u32 status; 1251 u32 status;
1274 1252
@@ -1301,7 +1279,8 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup
1301 * Sets the speed and duplex to gigabit full duplex (the only possible option) 1279 * Sets the speed and duplex to gigabit full duplex (the only possible option)
1302 * for fiber/serdes links. 1280 * for fiber/serdes links.
1303 **/ 1281 **/
1304s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex) 1282s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed,
1283 u16 *duplex)
1305{ 1284{
1306 *speed = SPEED_1000; 1285 *speed = SPEED_1000;
1307 *duplex = FULL_DUPLEX; 1286 *duplex = FULL_DUPLEX;
@@ -1504,11 +1483,10 @@ s32 e1000e_setup_led_generic(struct e1000_hw *hw)
1504 ledctl = er32(LEDCTL); 1483 ledctl = er32(LEDCTL);
1505 hw->mac.ledctl_default = ledctl; 1484 hw->mac.ledctl_default = ledctl;
1506 /* Turn off LED0 */ 1485 /* Turn off LED0 */
1507 ledctl &= ~(E1000_LEDCTL_LED0_IVRT | 1486 ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK |
1508 E1000_LEDCTL_LED0_BLINK | 1487 E1000_LEDCTL_LED0_MODE_MASK);
1509 E1000_LEDCTL_LED0_MODE_MASK);
1510 ledctl |= (E1000_LEDCTL_MODE_LED_OFF << 1488 ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
1511 E1000_LEDCTL_LED0_MODE_SHIFT); 1489 E1000_LEDCTL_LED0_MODE_SHIFT);
1512 ew32(LEDCTL, ledctl); 1490 ew32(LEDCTL, ledctl);
1513 } else if (hw->phy.media_type == e1000_media_type_copper) { 1491 } else if (hw->phy.media_type == e1000_media_type_copper) {
1514 ew32(LEDCTL, hw->mac.ledctl_mode1); 1492 ew32(LEDCTL, hw->mac.ledctl_mode1);
@@ -1544,7 +1522,7 @@ s32 e1000e_blink_led_generic(struct e1000_hw *hw)
1544 if (hw->phy.media_type == e1000_media_type_fiber) { 1522 if (hw->phy.media_type == e1000_media_type_fiber) {
1545 /* always blink LED0 for PCI-E fiber */ 1523 /* always blink LED0 for PCI-E fiber */
1546 ledctl_blink = E1000_LEDCTL_LED0_BLINK | 1524 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1547 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); 1525 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1548 } else { 1526 } else {
1549 /* 1527 /*
1550 * set the blink bit for each LED that's "on" (0x0E) 1528 * set the blink bit for each LED that's "on" (0x0E)
@@ -1657,8 +1635,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
1657 ew32(CTRL, ctrl); 1635 ew32(CTRL, ctrl);
1658 1636
1659 while (timeout) { 1637 while (timeout) {
1660 if (!(er32(STATUS) & 1638 if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
1661 E1000_STATUS_GIO_MASTER_ENABLE))
1662 break; 1639 break;
1663 udelay(100); 1640 udelay(100);
1664 timeout--; 1641 timeout--;
@@ -1723,7 +1700,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
1723 mac->current_ifs_val = mac->ifs_min_val; 1700 mac->current_ifs_val = mac->ifs_min_val;
1724 else 1701 else
1725 mac->current_ifs_val += 1702 mac->current_ifs_val +=
1726 mac->ifs_step_size; 1703 mac->ifs_step_size;
1727 ew32(AIT, mac->current_ifs_val); 1704 ew32(AIT, mac->current_ifs_val);
1728 } 1705 }
1729 } 1706 }
@@ -1738,956 +1715,3 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
1738out: 1715out:
1739 return; 1716 return;
1740} 1717}
1741
1742/**
1743 * e1000_raise_eec_clk - Raise EEPROM clock
1744 * @hw: pointer to the HW structure
1745 * @eecd: pointer to the EEPROM
1746 *
1747 * Enable/Raise the EEPROM clock bit.
1748 **/
1749static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
1750{
1751 *eecd = *eecd | E1000_EECD_SK;
1752 ew32(EECD, *eecd);
1753 e1e_flush();
1754 udelay(hw->nvm.delay_usec);
1755}
1756
1757/**
1758 * e1000_lower_eec_clk - Lower EEPROM clock
1759 * @hw: pointer to the HW structure
1760 * @eecd: pointer to the EEPROM
1761 *
1762 * Clear/Lower the EEPROM clock bit.
1763 **/
1764static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
1765{
1766 *eecd = *eecd & ~E1000_EECD_SK;
1767 ew32(EECD, *eecd);
1768 e1e_flush();
1769 udelay(hw->nvm.delay_usec);
1770}
1771
1772/**
1773 * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
1774 * @hw: pointer to the HW structure
1775 * @data: data to send to the EEPROM
1776 * @count: number of bits to shift out
1777 *
1778 * We need to shift 'count' bits out to the EEPROM. So, the value in the
1779 * "data" parameter will be shifted out to the EEPROM one bit at a time.
1780 * In order to do this, "data" must be broken down into bits.
1781 **/
1782static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
1783{
1784 struct e1000_nvm_info *nvm = &hw->nvm;
1785 u32 eecd = er32(EECD);
1786 u32 mask;
1787
1788 mask = 0x01 << (count - 1);
1789 if (nvm->type == e1000_nvm_eeprom_spi)
1790 eecd |= E1000_EECD_DO;
1791
1792 do {
1793 eecd &= ~E1000_EECD_DI;
1794
1795 if (data & mask)
1796 eecd |= E1000_EECD_DI;
1797
1798 ew32(EECD, eecd);
1799 e1e_flush();
1800
1801 udelay(nvm->delay_usec);
1802
1803 e1000_raise_eec_clk(hw, &eecd);
1804 e1000_lower_eec_clk(hw, &eecd);
1805
1806 mask >>= 1;
1807 } while (mask);
1808
1809 eecd &= ~E1000_EECD_DI;
1810 ew32(EECD, eecd);
1811}
1812
1813/**
1814 * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
1815 * @hw: pointer to the HW structure
1816 * @count: number of bits to shift in
1817 *
1818 * In order to read a register from the EEPROM, we need to shift 'count' bits
1819 * in from the EEPROM. Bits are "shifted in" by raising the clock input to
1820 * the EEPROM (setting the SK bit), and then reading the value of the data out
1821 * "DO" bit. During this "shifting in" process the data in "DI" bit should
1822 * always be clear.
1823 **/
1824static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
1825{
1826 u32 eecd;
1827 u32 i;
1828 u16 data;
1829
1830 eecd = er32(EECD);
1831
1832 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
1833 data = 0;
1834
1835 for (i = 0; i < count; i++) {
1836 data <<= 1;
1837 e1000_raise_eec_clk(hw, &eecd);
1838
1839 eecd = er32(EECD);
1840
1841 eecd &= ~E1000_EECD_DI;
1842 if (eecd & E1000_EECD_DO)
1843 data |= 1;
1844
1845 e1000_lower_eec_clk(hw, &eecd);
1846 }
1847
1848 return data;
1849}
1850
1851/**
1852 * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
1853 * @hw: pointer to the HW structure
1854 * @ee_reg: EEPROM flag for polling
1855 *
1856 * Polls the EEPROM status bit for either read or write completion based
1857 * upon the value of 'ee_reg'.
1858 **/
1859s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
1860{
1861 u32 attempts = 100000;
1862 u32 i, reg = 0;
1863
1864 for (i = 0; i < attempts; i++) {
1865 if (ee_reg == E1000_NVM_POLL_READ)
1866 reg = er32(EERD);
1867 else
1868 reg = er32(EEWR);
1869
1870 if (reg & E1000_NVM_RW_REG_DONE)
1871 return 0;
1872
1873 udelay(5);
1874 }
1875
1876 return -E1000_ERR_NVM;
1877}
1878
1879/**
1880 * e1000e_acquire_nvm - Generic request for access to EEPROM
1881 * @hw: pointer to the HW structure
1882 *
1883 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
1884 * Return successful if access grant bit set, else clear the request for
1885 * EEPROM access and return -E1000_ERR_NVM (-1).
1886 **/
1887s32 e1000e_acquire_nvm(struct e1000_hw *hw)
1888{
1889 u32 eecd = er32(EECD);
1890 s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
1891
1892 ew32(EECD, eecd | E1000_EECD_REQ);
1893 eecd = er32(EECD);
1894
1895 while (timeout) {
1896 if (eecd & E1000_EECD_GNT)
1897 break;
1898 udelay(5);
1899 eecd = er32(EECD);
1900 timeout--;
1901 }
1902
1903 if (!timeout) {
1904 eecd &= ~E1000_EECD_REQ;
1905 ew32(EECD, eecd);
1906 e_dbg("Could not acquire NVM grant\n");
1907 return -E1000_ERR_NVM;
1908 }
1909
1910 return 0;
1911}
1912
1913/**
1914 * e1000_standby_nvm - Return EEPROM to standby state
1915 * @hw: pointer to the HW structure
1916 *
1917 * Return the EEPROM to a standby state.
1918 **/
1919static void e1000_standby_nvm(struct e1000_hw *hw)
1920{
1921 struct e1000_nvm_info *nvm = &hw->nvm;
1922 u32 eecd = er32(EECD);
1923
1924 if (nvm->type == e1000_nvm_eeprom_spi) {
1925 /* Toggle CS to flush commands */
1926 eecd |= E1000_EECD_CS;
1927 ew32(EECD, eecd);
1928 e1e_flush();
1929 udelay(nvm->delay_usec);
1930 eecd &= ~E1000_EECD_CS;
1931 ew32(EECD, eecd);
1932 e1e_flush();
1933 udelay(nvm->delay_usec);
1934 }
1935}
1936
1937/**
1938 * e1000_stop_nvm - Terminate EEPROM command
1939 * @hw: pointer to the HW structure
1940 *
1941 * Terminates the current command by inverting the EEPROM's chip select pin.
1942 **/
1943static void e1000_stop_nvm(struct e1000_hw *hw)
1944{
1945 u32 eecd;
1946
1947 eecd = er32(EECD);
1948 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
1949 /* Pull CS high */
1950 eecd |= E1000_EECD_CS;
1951 e1000_lower_eec_clk(hw, &eecd);
1952 }
1953}
1954
1955/**
1956 * e1000e_release_nvm - Release exclusive access to EEPROM
1957 * @hw: pointer to the HW structure
1958 *
1959 * Stop any current commands to the EEPROM and clear the EEPROM request bit.
1960 **/
1961void e1000e_release_nvm(struct e1000_hw *hw)
1962{
1963 u32 eecd;
1964
1965 e1000_stop_nvm(hw);
1966
1967 eecd = er32(EECD);
1968 eecd &= ~E1000_EECD_REQ;
1969 ew32(EECD, eecd);
1970}
1971
1972/**
1973 * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
1974 * @hw: pointer to the HW structure
1975 *
1976 * Setups the EEPROM for reading and writing.
1977 **/
1978static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1979{
1980 struct e1000_nvm_info *nvm = &hw->nvm;
1981 u32 eecd = er32(EECD);
1982 u8 spi_stat_reg;
1983
1984 if (nvm->type == e1000_nvm_eeprom_spi) {
1985 u16 timeout = NVM_MAX_RETRY_SPI;
1986
1987 /* Clear SK and CS */
1988 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
1989 ew32(EECD, eecd);
1990 e1e_flush();
1991 udelay(1);
1992
1993 /*
1994 * Read "Status Register" repeatedly until the LSB is cleared.
1995 * The EEPROM will signal that the command has been completed
1996 * by clearing bit 0 of the internal status register. If it's
1997 * not cleared within 'timeout', then error out.
1998 */
1999 while (timeout) {
2000 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
2001 hw->nvm.opcode_bits);
2002 spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
2003 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
2004 break;
2005
2006 udelay(5);
2007 e1000_standby_nvm(hw);
2008 timeout--;
2009 }
2010
2011 if (!timeout) {
2012 e_dbg("SPI NVM Status error\n");
2013 return -E1000_ERR_NVM;
2014 }
2015 }
2016
2017 return 0;
2018}
2019
2020/**
2021 * e1000e_read_nvm_eerd - Reads EEPROM using EERD register
2022 * @hw: pointer to the HW structure
2023 * @offset: offset of word in the EEPROM to read
2024 * @words: number of words to read
2025 * @data: word read from the EEPROM
2026 *
2027 * Reads a 16 bit word from the EEPROM using the EERD register.
2028 **/
2029s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
2030{
2031 struct e1000_nvm_info *nvm = &hw->nvm;
2032 u32 i, eerd = 0;
2033 s32 ret_val = 0;
2034
2035 /*
2036 * A check for invalid values: offset too large, too many words,
2037 * too many words for the offset, and not enough words.
2038 */
2039 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
2040 (words == 0)) {
2041 e_dbg("nvm parameter(s) out of bounds\n");
2042 return -E1000_ERR_NVM;
2043 }
2044
2045 for (i = 0; i < words; i++) {
2046 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
2047 E1000_NVM_RW_REG_START;
2048
2049 ew32(EERD, eerd);
2050 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
2051 if (ret_val)
2052 break;
2053
2054 data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
2055 }
2056
2057 return ret_val;
2058}
2059
2060/**
2061 * e1000e_write_nvm_spi - Write to EEPROM using SPI
2062 * @hw: pointer to the HW structure
2063 * @offset: offset within the EEPROM to be written to
2064 * @words: number of words to write
2065 * @data: 16 bit word(s) to be written to the EEPROM
2066 *
2067 * Writes data to EEPROM at offset using SPI interface.
2068 *
2069 * If e1000e_update_nvm_checksum is not called after this function , the
2070 * EEPROM will most likely contain an invalid checksum.
2071 **/
2072s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
2073{
2074 struct e1000_nvm_info *nvm = &hw->nvm;
2075 s32 ret_val;
2076 u16 widx = 0;
2077
2078 /*
2079 * A check for invalid values: offset too large, too many words,
2080 * and not enough words.
2081 */
2082 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
2083 (words == 0)) {
2084 e_dbg("nvm parameter(s) out of bounds\n");
2085 return -E1000_ERR_NVM;
2086 }
2087
2088 ret_val = nvm->ops.acquire(hw);
2089 if (ret_val)
2090 return ret_val;
2091
2092 while (widx < words) {
2093 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
2094
2095 ret_val = e1000_ready_nvm_eeprom(hw);
2096 if (ret_val) {
2097 nvm->ops.release(hw);
2098 return ret_val;
2099 }
2100
2101 e1000_standby_nvm(hw);
2102
2103 /* Send the WRITE ENABLE command (8 bit opcode) */
2104 e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
2105 nvm->opcode_bits);
2106
2107 e1000_standby_nvm(hw);
2108
2109 /*
2110 * Some SPI eeproms use the 8th address bit embedded in the
2111 * opcode
2112 */
2113 if ((nvm->address_bits == 8) && (offset >= 128))
2114 write_opcode |= NVM_A8_OPCODE_SPI;
2115
2116 /* Send the Write command (8-bit opcode + addr) */
2117 e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
2118 e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
2119 nvm->address_bits);
2120
2121 /* Loop to allow for up to whole page write of eeprom */
2122 while (widx < words) {
2123 u16 word_out = data[widx];
2124 word_out = (word_out >> 8) | (word_out << 8);
2125 e1000_shift_out_eec_bits(hw, word_out, 16);
2126 widx++;
2127
2128 if ((((offset + widx) * 2) % nvm->page_size) == 0) {
2129 e1000_standby_nvm(hw);
2130 break;
2131 }
2132 }
2133 }
2134
2135 usleep_range(10000, 20000);
2136 nvm->ops.release(hw);
2137 return 0;
2138}
2139
2140/**
2141 * e1000_read_pba_string_generic - Read device part number
2142 * @hw: pointer to the HW structure
2143 * @pba_num: pointer to device part number
2144 * @pba_num_size: size of part number buffer
2145 *
2146 * Reads the product board assembly (PBA) number from the EEPROM and stores
2147 * the value in pba_num.
2148 **/
2149s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
2150 u32 pba_num_size)
2151{
2152 s32 ret_val;
2153 u16 nvm_data;
2154 u16 pba_ptr;
2155 u16 offset;
2156 u16 length;
2157
2158 if (pba_num == NULL) {
2159 e_dbg("PBA string buffer was null\n");
2160 ret_val = E1000_ERR_INVALID_ARGUMENT;
2161 goto out;
2162 }
2163
2164 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
2165 if (ret_val) {
2166 e_dbg("NVM Read Error\n");
2167 goto out;
2168 }
2169
2170 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
2171 if (ret_val) {
2172 e_dbg("NVM Read Error\n");
2173 goto out;
2174 }
2175
2176 /*
2177 * if nvm_data is not ptr guard the PBA must be in legacy format which
2178 * means pba_ptr is actually our second data word for the PBA number
2179 * and we can decode it into an ascii string
2180 */
2181 if (nvm_data != NVM_PBA_PTR_GUARD) {
2182 e_dbg("NVM PBA number is not stored as string\n");
2183
2184 /* we will need 11 characters to store the PBA */
2185 if (pba_num_size < 11) {
2186 e_dbg("PBA string buffer too small\n");
2187 return E1000_ERR_NO_SPACE;
2188 }
2189
2190 /* extract hex string from data and pba_ptr */
2191 pba_num[0] = (nvm_data >> 12) & 0xF;
2192 pba_num[1] = (nvm_data >> 8) & 0xF;
2193 pba_num[2] = (nvm_data >> 4) & 0xF;
2194 pba_num[3] = nvm_data & 0xF;
2195 pba_num[4] = (pba_ptr >> 12) & 0xF;
2196 pba_num[5] = (pba_ptr >> 8) & 0xF;
2197 pba_num[6] = '-';
2198 pba_num[7] = 0;
2199 pba_num[8] = (pba_ptr >> 4) & 0xF;
2200 pba_num[9] = pba_ptr & 0xF;
2201
2202 /* put a null character on the end of our string */
2203 pba_num[10] = '\0';
2204
2205 /* switch all the data but the '-' to hex char */
2206 for (offset = 0; offset < 10; offset++) {
2207 if (pba_num[offset] < 0xA)
2208 pba_num[offset] += '0';
2209 else if (pba_num[offset] < 0x10)
2210 pba_num[offset] += 'A' - 0xA;
2211 }
2212
2213 goto out;
2214 }
2215
2216 ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length);
2217 if (ret_val) {
2218 e_dbg("NVM Read Error\n");
2219 goto out;
2220 }
2221
2222 if (length == 0xFFFF || length == 0) {
2223 e_dbg("NVM PBA number section invalid length\n");
2224 ret_val = E1000_ERR_NVM_PBA_SECTION;
2225 goto out;
2226 }
2227 /* check if pba_num buffer is big enough */
2228 if (pba_num_size < (((u32)length * 2) - 1)) {
2229 e_dbg("PBA string buffer too small\n");
2230 ret_val = E1000_ERR_NO_SPACE;
2231 goto out;
2232 }
2233
2234 /* trim pba length from start of string */
2235 pba_ptr++;
2236 length--;
2237
2238 for (offset = 0; offset < length; offset++) {
2239 ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data);
2240 if (ret_val) {
2241 e_dbg("NVM Read Error\n");
2242 goto out;
2243 }
2244 pba_num[offset * 2] = (u8)(nvm_data >> 8);
2245 pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
2246 }
2247 pba_num[offset * 2] = '\0';
2248
2249out:
2250 return ret_val;
2251}
2252
2253/**
2254 * e1000_read_mac_addr_generic - Read device MAC address
2255 * @hw: pointer to the HW structure
2256 *
2257 * Reads the device MAC address from the EEPROM and stores the value.
2258 * Since devices with two ports use the same EEPROM, we increment the
2259 * last bit in the MAC address for the second port.
2260 **/
2261s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
2262{
2263 u32 rar_high;
2264 u32 rar_low;
2265 u16 i;
2266
2267 rar_high = er32(RAH(0));
2268 rar_low = er32(RAL(0));
2269
2270 for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
2271 hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
2272
2273 for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
2274 hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
2275
2276 for (i = 0; i < ETH_ALEN; i++)
2277 hw->mac.addr[i] = hw->mac.perm_addr[i];
2278
2279 return 0;
2280}
2281
2282/**
2283 * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
2284 * @hw: pointer to the HW structure
2285 *
2286 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2287 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
2288 **/
2289s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
2290{
2291 s32 ret_val;
2292 u16 checksum = 0;
2293 u16 i, nvm_data;
2294
2295 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
2296 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2297 if (ret_val) {
2298 e_dbg("NVM Read Error\n");
2299 return ret_val;
2300 }
2301 checksum += nvm_data;
2302 }
2303
2304 if (checksum != (u16) NVM_SUM) {
2305 e_dbg("NVM Checksum Invalid\n");
2306 return -E1000_ERR_NVM;
2307 }
2308
2309 return 0;
2310}
2311
2312/**
2313 * e1000e_update_nvm_checksum_generic - Update EEPROM checksum
2314 * @hw: pointer to the HW structure
2315 *
2316 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
2317 * up to the checksum. Then calculates the EEPROM checksum and writes the
2318 * value to the EEPROM.
2319 **/
2320s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
2321{
2322 s32 ret_val;
2323 u16 checksum = 0;
2324 u16 i, nvm_data;
2325
2326 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
2327 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2328 if (ret_val) {
2329 e_dbg("NVM Read Error while updating checksum.\n");
2330 return ret_val;
2331 }
2332 checksum += nvm_data;
2333 }
2334 checksum = (u16) NVM_SUM - checksum;
2335 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
2336 if (ret_val)
2337 e_dbg("NVM Write Error while updating checksum.\n");
2338
2339 return ret_val;
2340}
2341
2342/**
2343 * e1000e_reload_nvm - Reloads EEPROM
2344 * @hw: pointer to the HW structure
2345 *
2346 * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
2347 * extended control register.
2348 **/
2349void e1000e_reload_nvm(struct e1000_hw *hw)
2350{
2351 u32 ctrl_ext;
2352
2353 udelay(10);
2354 ctrl_ext = er32(CTRL_EXT);
2355 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
2356 ew32(CTRL_EXT, ctrl_ext);
2357 e1e_flush();
2358}
2359
2360/**
2361 * e1000_calculate_checksum - Calculate checksum for buffer
2362 * @buffer: pointer to EEPROM
2363 * @length: size of EEPROM to calculate a checksum for
2364 *
2365 * Calculates the checksum for some buffer on a specified length. The
2366 * checksum calculated is returned.
2367 **/
2368static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
2369{
2370 u32 i;
2371 u8 sum = 0;
2372
2373 if (!buffer)
2374 return 0;
2375
2376 for (i = 0; i < length; i++)
2377 sum += buffer[i];
2378
2379 return (u8) (0 - sum);
2380}
2381
2382/**
2383 * e1000_mng_enable_host_if - Checks host interface is enabled
2384 * @hw: pointer to the HW structure
2385 *
2386 * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
2387 *
2388 * This function checks whether the HOST IF is enabled for command operation
2389 * and also checks whether the previous command is completed. It busy waits
2390 * in case of previous command is not completed.
2391 **/
2392static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2393{
2394 u32 hicr;
2395 u8 i;
2396
2397 if (!(hw->mac.arc_subsystem_valid)) {
2398 e_dbg("ARC subsystem not valid.\n");
2399 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2400 }
2401
2402 /* Check that the host interface is enabled. */
2403 hicr = er32(HICR);
2404 if ((hicr & E1000_HICR_EN) == 0) {
2405 e_dbg("E1000_HOST_EN bit disabled.\n");
2406 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2407 }
2408 /* check the previous command is completed */
2409 for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
2410 hicr = er32(HICR);
2411 if (!(hicr & E1000_HICR_C))
2412 break;
2413 mdelay(1);
2414 }
2415
2416 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
2417 e_dbg("Previous command timeout failed .\n");
2418 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2419 }
2420
2421 return 0;
2422}
2423
2424/**
2425 * e1000e_check_mng_mode_generic - check management mode
2426 * @hw: pointer to the HW structure
2427 *
2428 * Reads the firmware semaphore register and returns true (>0) if
2429 * manageability is enabled, else false (0).
2430 **/
2431bool e1000e_check_mng_mode_generic(struct e1000_hw *hw)
2432{
2433 u32 fwsm = er32(FWSM);
2434
2435 return (fwsm & E1000_FWSM_MODE_MASK) ==
2436 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
2437}
2438
2439/**
2440 * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx
2441 * @hw: pointer to the HW structure
2442 *
2443 * Enables packet filtering on transmit packets if manageability is enabled
2444 * and host interface is enabled.
2445 **/
2446bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2447{
2448 struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
2449 u32 *buffer = (u32 *)&hw->mng_cookie;
2450 u32 offset;
2451 s32 ret_val, hdr_csum, csum;
2452 u8 i, len;
2453
2454 hw->mac.tx_pkt_filtering = true;
2455
2456 /* No manageability, no filtering */
2457 if (!e1000e_check_mng_mode(hw)) {
2458 hw->mac.tx_pkt_filtering = false;
2459 goto out;
2460 }
2461
2462 /*
2463 * If we can't read from the host interface for whatever
2464 * reason, disable filtering.
2465 */
2466 ret_val = e1000_mng_enable_host_if(hw);
2467 if (ret_val) {
2468 hw->mac.tx_pkt_filtering = false;
2469 goto out;
2470 }
2471
2472 /* Read in the header. Length and offset are in dwords. */
2473 len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
2474 offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
2475 for (i = 0; i < len; i++)
2476 *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i);
2477 hdr_csum = hdr->checksum;
2478 hdr->checksum = 0;
2479 csum = e1000_calculate_checksum((u8 *)hdr,
2480 E1000_MNG_DHCP_COOKIE_LENGTH);
2481 /*
2482 * If either the checksums or signature don't match, then
2483 * the cookie area isn't considered valid, in which case we
2484 * take the safe route of assuming Tx filtering is enabled.
2485 */
2486 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
2487 hw->mac.tx_pkt_filtering = true;
2488 goto out;
2489 }
2490
2491 /* Cookie area is valid, make the final check for filtering. */
2492 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
2493 hw->mac.tx_pkt_filtering = false;
2494 goto out;
2495 }
2496
2497out:
2498 return hw->mac.tx_pkt_filtering;
2499}
2500
2501/**
2502 * e1000_mng_write_cmd_header - Writes manageability command header
2503 * @hw: pointer to the HW structure
2504 * @hdr: pointer to the host interface command header
2505 *
2506 * Writes the command header after does the checksum calculation.
2507 **/
2508static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
2509 struct e1000_host_mng_command_header *hdr)
2510{
2511 u16 i, length = sizeof(struct e1000_host_mng_command_header);
2512
2513 /* Write the whole command header structure with new checksum. */
2514
2515 hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
2516
2517 length >>= 2;
2518 /* Write the relevant command block into the ram area. */
2519 for (i = 0; i < length; i++) {
2520 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i,
2521 *((u32 *) hdr + i));
2522 e1e_flush();
2523 }
2524
2525 return 0;
2526}
2527
2528/**
2529 * e1000_mng_host_if_write - Write to the manageability host interface
2530 * @hw: pointer to the HW structure
2531 * @buffer: pointer to the host interface buffer
2532 * @length: size of the buffer
2533 * @offset: location in the buffer to write to
2534 * @sum: sum of the data (not checksum)
2535 *
2536 * This function writes the buffer content at the offset given on the host if.
2537 * It also does alignment considerations to do the writes in most efficient
2538 * way. Also fills up the sum of the buffer in *buffer parameter.
2539 **/
2540static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
2541 u16 length, u16 offset, u8 *sum)
2542{
2543 u8 *tmp;
2544 u8 *bufptr = buffer;
2545 u32 data = 0;
2546 u16 remaining, i, j, prev_bytes;
2547
2548 /* sum = only sum of the data and it is not checksum */
2549
2550 if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
2551 return -E1000_ERR_PARAM;
2552
2553 tmp = (u8 *)&data;
2554 prev_bytes = offset & 0x3;
2555 offset >>= 2;
2556
2557 if (prev_bytes) {
2558 data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
2559 for (j = prev_bytes; j < sizeof(u32); j++) {
2560 *(tmp + j) = *bufptr++;
2561 *sum += *(tmp + j);
2562 }
2563 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
2564 length -= j - prev_bytes;
2565 offset++;
2566 }
2567
2568 remaining = length & 0x3;
2569 length -= remaining;
2570
2571 /* Calculate length in DWORDs */
2572 length >>= 2;
2573
2574 /*
2575 * The device driver writes the relevant command block into the
2576 * ram area.
2577 */
2578 for (i = 0; i < length; i++) {
2579 for (j = 0; j < sizeof(u32); j++) {
2580 *(tmp + j) = *bufptr++;
2581 *sum += *(tmp + j);
2582 }
2583
2584 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2585 }
2586 if (remaining) {
2587 for (j = 0; j < sizeof(u32); j++) {
2588 if (j < remaining)
2589 *(tmp + j) = *bufptr++;
2590 else
2591 *(tmp + j) = 0;
2592
2593 *sum += *(tmp + j);
2594 }
2595 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2596 }
2597
2598 return 0;
2599}
2600
2601/**
2602 * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
2603 * @hw: pointer to the HW structure
2604 * @buffer: pointer to the host interface
2605 * @length: size of the buffer
2606 *
2607 * Writes the DHCP information to the host interface.
2608 **/
2609s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
2610{
2611 struct e1000_host_mng_command_header hdr;
2612 s32 ret_val;
2613 u32 hicr;
2614
2615 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
2616 hdr.command_length = length;
2617 hdr.reserved1 = 0;
2618 hdr.reserved2 = 0;
2619 hdr.checksum = 0;
2620
2621 /* Enable the host interface */
2622 ret_val = e1000_mng_enable_host_if(hw);
2623 if (ret_val)
2624 return ret_val;
2625
2626 /* Populate the host interface with the contents of "buffer". */
2627 ret_val = e1000_mng_host_if_write(hw, buffer, length,
2628 sizeof(hdr), &(hdr.checksum));
2629 if (ret_val)
2630 return ret_val;
2631
2632 /* Write the manageability command header */
2633 ret_val = e1000_mng_write_cmd_header(hw, &hdr);
2634 if (ret_val)
2635 return ret_val;
2636
2637 /* Tell the ARC a new command is pending. */
2638 hicr = er32(HICR);
2639 ew32(HICR, hicr | E1000_HICR_C);
2640
2641 return 0;
2642}
2643
2644/**
2645 * e1000e_enable_mng_pass_thru - Check if management passthrough is needed
2646 * @hw: pointer to the HW structure
2647 *
2648 * Verifies the hardware needs to leave interface enabled so that frames can
2649 * be directed to and from the management interface.
2650 **/
2651bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2652{
2653 u32 manc;
2654 u32 fwsm, factps;
2655 bool ret_val = false;
2656
2657 manc = er32(MANC);
2658
2659 if (!(manc & E1000_MANC_RCV_TCO_EN))
2660 goto out;
2661
2662 if (hw->mac.has_fwsm) {
2663 fwsm = er32(FWSM);
2664 factps = er32(FACTPS);
2665
2666 if (!(factps & E1000_FACTPS_MNGCG) &&
2667 ((fwsm & E1000_FWSM_MODE_MASK) ==
2668 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
2669 ret_val = true;
2670 goto out;
2671 }
2672 } else if ((hw->mac.type == e1000_82574) ||
2673 (hw->mac.type == e1000_82583)) {
2674 u16 data;
2675
2676 factps = er32(FACTPS);
2677 e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
2678
2679 if (!(factps & E1000_FACTPS_MNGCG) &&
2680 ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
2681 (e1000_mng_mode_pt << 13))) {
2682 ret_val = true;
2683 goto out;
2684 }
2685 } else if ((manc & E1000_MANC_SMBUS_EN) &&
2686 !(manc & E1000_MANC_ASF_EN)) {
2687 ret_val = true;
2688 goto out;
2689 }
2690
2691out:
2692 return ret_val;
2693}
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
new file mode 100644
index 000000000000..6594dbf248b4
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -0,0 +1,377 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "e1000.h"
30
31enum e1000_mng_mode {
32 e1000_mng_mode_none = 0,
33 e1000_mng_mode_asf,
34 e1000_mng_mode_pt,
35 e1000_mng_mode_ipmi,
36 e1000_mng_mode_host_if_only
37};
38
39#define E1000_FACTPS_MNGCG 0x20000000
40
41/* Intel(R) Active Management Technology signature */
42#define E1000_IAMT_SIGNATURE 0x544D4149
43
44/**
45 * e1000_calculate_checksum - Calculate checksum for buffer
46 * @buffer: pointer to EEPROM
47 * @length: size of EEPROM to calculate a checksum for
48 *
49 * Calculates the checksum for some buffer on a specified length. The
50 * checksum calculated is returned.
51 **/
52static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
53{
54 u32 i;
55 u8 sum = 0;
56
57 if (!buffer)
58 return 0;
59
60 for (i = 0; i < length; i++)
61 sum += buffer[i];
62
63 return (u8)(0 - sum);
64}
65
66/**
67 * e1000_mng_enable_host_if - Checks host interface is enabled
68 * @hw: pointer to the HW structure
69 *
70 * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
71 *
72 * This function checks whether the HOST IF is enabled for command operation
73 * and also checks whether the previous command is completed. It busy waits
74 * in case of previous command is not completed.
75 **/
76static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
77{
78 u32 hicr;
79 u8 i;
80
81 if (!(hw->mac.arc_subsystem_valid)) {
82 e_dbg("ARC subsystem not valid.\n");
83 return -E1000_ERR_HOST_INTERFACE_COMMAND;
84 }
85
86 /* Check that the host interface is enabled. */
87 hicr = er32(HICR);
88 if ((hicr & E1000_HICR_EN) == 0) {
89 e_dbg("E1000_HOST_EN bit disabled.\n");
90 return -E1000_ERR_HOST_INTERFACE_COMMAND;
91 }
92 /* check the previous command is completed */
93 for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
94 hicr = er32(HICR);
95 if (!(hicr & E1000_HICR_C))
96 break;
97 mdelay(1);
98 }
99
100 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
101 e_dbg("Previous command timeout failed .\n");
102 return -E1000_ERR_HOST_INTERFACE_COMMAND;
103 }
104
105 return 0;
106}
107
108/**
109 * e1000e_check_mng_mode_generic - check management mode
110 * @hw: pointer to the HW structure
111 *
112 * Reads the firmware semaphore register and returns true (>0) if
113 * manageability is enabled, else false (0).
114 **/
115bool e1000e_check_mng_mode_generic(struct e1000_hw *hw)
116{
117 u32 fwsm = er32(FWSM);
118
119 return (fwsm & E1000_FWSM_MODE_MASK) ==
120 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
121}
122
123/**
124 * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx
125 * @hw: pointer to the HW structure
126 *
127 * Enables packet filtering on transmit packets if manageability is enabled
128 * and host interface is enabled.
129 **/
130bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
131{
132 struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
133 u32 *buffer = (u32 *)&hw->mng_cookie;
134 u32 offset;
135 s32 ret_val, hdr_csum, csum;
136 u8 i, len;
137
138 hw->mac.tx_pkt_filtering = true;
139
140 /* No manageability, no filtering */
141 if (!e1000e_check_mng_mode(hw)) {
142 hw->mac.tx_pkt_filtering = false;
143 goto out;
144 }
145
146 /*
147 * If we can't read from the host interface for whatever
148 * reason, disable filtering.
149 */
150 ret_val = e1000_mng_enable_host_if(hw);
151 if (ret_val) {
152 hw->mac.tx_pkt_filtering = false;
153 goto out;
154 }
155
156 /* Read in the header. Length and offset are in dwords. */
157 len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
158 offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
159 for (i = 0; i < len; i++)
160 *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF,
161 offset + i);
162 hdr_csum = hdr->checksum;
163 hdr->checksum = 0;
164 csum = e1000_calculate_checksum((u8 *)hdr,
165 E1000_MNG_DHCP_COOKIE_LENGTH);
166 /*
167 * If either the checksums or signature don't match, then
168 * the cookie area isn't considered valid, in which case we
169 * take the safe route of assuming Tx filtering is enabled.
170 */
171 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
172 hw->mac.tx_pkt_filtering = true;
173 goto out;
174 }
175
176 /* Cookie area is valid, make the final check for filtering. */
177 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
178 hw->mac.tx_pkt_filtering = false;
179 goto out;
180 }
181
182out:
183 return hw->mac.tx_pkt_filtering;
184}
185
186/**
187 * e1000_mng_write_cmd_header - Writes manageability command header
188 * @hw: pointer to the HW structure
189 * @hdr: pointer to the host interface command header
190 *
191 * Writes the command header after does the checksum calculation.
192 **/
193static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
194 struct e1000_host_mng_command_header *hdr)
195{
196 u16 i, length = sizeof(struct e1000_host_mng_command_header);
197
198 /* Write the whole command header structure with new checksum. */
199
200 hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
201
202 length >>= 2;
203 /* Write the relevant command block into the ram area. */
204 for (i = 0; i < length; i++) {
205 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, *((u32 *)hdr + i));
206 e1e_flush();
207 }
208
209 return 0;
210}
211
212/**
213 * e1000_mng_host_if_write - Write to the manageability host interface
214 * @hw: pointer to the HW structure
215 * @buffer: pointer to the host interface buffer
216 * @length: size of the buffer
217 * @offset: location in the buffer to write to
218 * @sum: sum of the data (not checksum)
219 *
220 * This function writes the buffer content at the offset given on the host if.
221 * It also does alignment considerations to do the writes in most efficient
222 * way. Also fills up the sum of the buffer in *buffer parameter.
223 **/
224static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
225 u16 length, u16 offset, u8 *sum)
226{
227 u8 *tmp;
228 u8 *bufptr = buffer;
229 u32 data = 0;
230 u16 remaining, i, j, prev_bytes;
231
232 /* sum = only sum of the data and it is not checksum */
233
234 if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
235 return -E1000_ERR_PARAM;
236
237 tmp = (u8 *)&data;
238 prev_bytes = offset & 0x3;
239 offset >>= 2;
240
241 if (prev_bytes) {
242 data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
243 for (j = prev_bytes; j < sizeof(u32); j++) {
244 *(tmp + j) = *bufptr++;
245 *sum += *(tmp + j);
246 }
247 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
248 length -= j - prev_bytes;
249 offset++;
250 }
251
252 remaining = length & 0x3;
253 length -= remaining;
254
255 /* Calculate length in DWORDs */
256 length >>= 2;
257
258 /*
259 * The device driver writes the relevant command block into the
260 * ram area.
261 */
262 for (i = 0; i < length; i++) {
263 for (j = 0; j < sizeof(u32); j++) {
264 *(tmp + j) = *bufptr++;
265 *sum += *(tmp + j);
266 }
267
268 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
269 }
270 if (remaining) {
271 for (j = 0; j < sizeof(u32); j++) {
272 if (j < remaining)
273 *(tmp + j) = *bufptr++;
274 else
275 *(tmp + j) = 0;
276
277 *sum += *(tmp + j);
278 }
279 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
280 }
281
282 return 0;
283}
284
285/**
286 * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
287 * @hw: pointer to the HW structure
288 * @buffer: pointer to the host interface
289 * @length: size of the buffer
290 *
291 * Writes the DHCP information to the host interface.
292 **/
293s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
294{
295 struct e1000_host_mng_command_header hdr;
296 s32 ret_val;
297 u32 hicr;
298
299 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
300 hdr.command_length = length;
301 hdr.reserved1 = 0;
302 hdr.reserved2 = 0;
303 hdr.checksum = 0;
304
305 /* Enable the host interface */
306 ret_val = e1000_mng_enable_host_if(hw);
307 if (ret_val)
308 return ret_val;
309
310 /* Populate the host interface with the contents of "buffer". */
311 ret_val = e1000_mng_host_if_write(hw, buffer, length,
312 sizeof(hdr), &(hdr.checksum));
313 if (ret_val)
314 return ret_val;
315
316 /* Write the manageability command header */
317 ret_val = e1000_mng_write_cmd_header(hw, &hdr);
318 if (ret_val)
319 return ret_val;
320
321 /* Tell the ARC a new command is pending. */
322 hicr = er32(HICR);
323 ew32(HICR, hicr | E1000_HICR_C);
324
325 return 0;
326}
327
328/**
329 * e1000e_enable_mng_pass_thru - Check if management passthrough is needed
330 * @hw: pointer to the HW structure
331 *
332 * Verifies the hardware needs to leave interface enabled so that frames can
333 * be directed to and from the management interface.
334 **/
335bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
336{
337 u32 manc;
338 u32 fwsm, factps;
339 bool ret_val = false;
340
341 manc = er32(MANC);
342
343 if (!(manc & E1000_MANC_RCV_TCO_EN))
344 goto out;
345
346 if (hw->mac.has_fwsm) {
347 fwsm = er32(FWSM);
348 factps = er32(FACTPS);
349
350 if (!(factps & E1000_FACTPS_MNGCG) &&
351 ((fwsm & E1000_FWSM_MODE_MASK) ==
352 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
353 ret_val = true;
354 goto out;
355 }
356 } else if ((hw->mac.type == e1000_82574) ||
357 (hw->mac.type == e1000_82583)) {
358 u16 data;
359
360 factps = er32(FACTPS);
361 e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
362
363 if (!(factps & E1000_FACTPS_MNGCG) &&
364 ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
365 (e1000_mng_mode_pt << 13))) {
366 ret_val = true;
367 goto out;
368 }
369 } else if ((manc & E1000_MANC_SMBUS_EN) &&
370 !(manc & E1000_MANC_ASF_EN)) {
371 ret_val = true;
372 goto out;
373 }
374
375out:
376 return ret_val;
377}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3911401ed65d..1a8dd2f0e609 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -56,7 +56,7 @@
56 56
57#define DRV_EXTRAVERSION "-k" 57#define DRV_EXTRAVERSION "-k"
58 58
59#define DRV_VERSION "1.5.1" DRV_EXTRAVERSION 59#define DRV_VERSION "1.9.5" DRV_EXTRAVERSION
60char e1000e_driver_name[] = "e1000e"; 60char e1000e_driver_name[] = "e1000e";
61const char e1000e_driver_version[] = DRV_VERSION; 61const char e1000e_driver_version[] = DRV_VERSION;
62 62
@@ -487,22 +487,27 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
487 487
488/** 488/**
489 * e1000_rx_checksum - Receive Checksum Offload 489 * e1000_rx_checksum - Receive Checksum Offload
490 * @adapter: board private structure 490 * @adapter: board private structure
491 * @status_err: receive descriptor status and error fields 491 * @status_err: receive descriptor status and error fields
492 * @csum: receive descriptor csum field 492 * @csum: receive descriptor csum field
493 * @sk_buff: socket buffer with received data 493 * @sk_buff: socket buffer with received data
494 **/ 494 **/
495static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 495static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
496 u32 csum, struct sk_buff *skb) 496 __le16 csum, struct sk_buff *skb)
497{ 497{
498 u16 status = (u16)status_err; 498 u16 status = (u16)status_err;
499 u8 errors = (u8)(status_err >> 24); 499 u8 errors = (u8)(status_err >> 24);
500 500
501 skb_checksum_none_assert(skb); 501 skb_checksum_none_assert(skb);
502 502
503 /* Rx checksum disabled */
504 if (!(adapter->netdev->features & NETIF_F_RXCSUM))
505 return;
506
503 /* Ignore Checksum bit is set */ 507 /* Ignore Checksum bit is set */
504 if (status & E1000_RXD_STAT_IXSM) 508 if (status & E1000_RXD_STAT_IXSM)
505 return; 509 return;
510
506 /* TCP/UDP checksum error bit is set */ 511 /* TCP/UDP checksum error bit is set */
507 if (errors & E1000_RXD_ERR_TCPE) { 512 if (errors & E1000_RXD_ERR_TCPE) {
508 /* let the stack verify checksum errors */ 513 /* let the stack verify checksum errors */
@@ -524,7 +529,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
524 * Hardware complements the payload checksum, so we undo it 529 * Hardware complements the payload checksum, so we undo it
525 * and then put the value in host order for further stack use. 530 * and then put the value in host order for further stack use.
526 */ 531 */
527 __sum16 sum = (__force __sum16)htons(csum); 532 __sum16 sum = (__force __sum16)swab16((__force u16)csum);
528 skb->csum = csum_unfold(~sum); 533 skb->csum = csum_unfold(~sum);
529 skb->ip_summed = CHECKSUM_COMPLETE; 534 skb->ip_summed = CHECKSUM_COMPLETE;
530 } 535 }
@@ -545,7 +550,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
545 * which has bit 24 set while ME is accessing Host CSR registers, wait 550 * which has bit 24 set while ME is accessing Host CSR registers, wait
546 * if it is set and try again a number of times. 551 * if it is set and try again a number of times.
547 **/ 552 **/
548static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail, 553static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, void __iomem *tail,
549 unsigned int i) 554 unsigned int i)
550{ 555{
551 unsigned int j = 0; 556 unsigned int j = 0;
@@ -562,12 +567,12 @@ static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
562 return 0; 567 return 0;
563} 568}
564 569
565static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i) 570static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
566{ 571{
567 u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail); 572 struct e1000_adapter *adapter = rx_ring->adapter;
568 struct e1000_hw *hw = &adapter->hw; 573 struct e1000_hw *hw = &adapter->hw;
569 574
570 if (e1000e_update_tail_wa(hw, tail, i)) { 575 if (e1000e_update_tail_wa(hw, rx_ring->tail, i)) {
571 u32 rctl = er32(RCTL); 576 u32 rctl = er32(RCTL);
572 ew32(RCTL, rctl & ~E1000_RCTL_EN); 577 ew32(RCTL, rctl & ~E1000_RCTL_EN);
573 e_err("ME firmware caused invalid RDT - resetting\n"); 578 e_err("ME firmware caused invalid RDT - resetting\n");
@@ -575,12 +580,12 @@ static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
575 } 580 }
576} 581}
577 582
578static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i) 583static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
579{ 584{
580 u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail); 585 struct e1000_adapter *adapter = tx_ring->adapter;
581 struct e1000_hw *hw = &adapter->hw; 586 struct e1000_hw *hw = &adapter->hw;
582 587
583 if (e1000e_update_tail_wa(hw, tail, i)) { 588 if (e1000e_update_tail_wa(hw, tx_ring->tail, i)) {
584 u32 tctl = er32(TCTL); 589 u32 tctl = er32(TCTL);
585 ew32(TCTL, tctl & ~E1000_TCTL_EN); 590 ew32(TCTL, tctl & ~E1000_TCTL_EN);
586 e_err("ME firmware caused invalid TDT - resetting\n"); 591 e_err("ME firmware caused invalid TDT - resetting\n");
@@ -590,14 +595,14 @@ static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
590 595
591/** 596/**
592 * e1000_alloc_rx_buffers - Replace used receive buffers 597 * e1000_alloc_rx_buffers - Replace used receive buffers
593 * @adapter: address of board private structure 598 * @rx_ring: Rx descriptor ring
594 **/ 599 **/
595static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 600static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
596 int cleaned_count, gfp_t gfp) 601 int cleaned_count, gfp_t gfp)
597{ 602{
603 struct e1000_adapter *adapter = rx_ring->adapter;
598 struct net_device *netdev = adapter->netdev; 604 struct net_device *netdev = adapter->netdev;
599 struct pci_dev *pdev = adapter->pdev; 605 struct pci_dev *pdev = adapter->pdev;
600 struct e1000_ring *rx_ring = adapter->rx_ring;
601 union e1000_rx_desc_extended *rx_desc; 606 union e1000_rx_desc_extended *rx_desc;
602 struct e1000_buffer *buffer_info; 607 struct e1000_buffer *buffer_info;
603 struct sk_buff *skb; 608 struct sk_buff *skb;
@@ -644,9 +649,9 @@ map_skb:
644 */ 649 */
645 wmb(); 650 wmb();
646 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 651 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
647 e1000e_update_rdt_wa(adapter, i); 652 e1000e_update_rdt_wa(rx_ring, i);
648 else 653 else
649 writel(i, adapter->hw.hw_addr + rx_ring->tail); 654 writel(i, rx_ring->tail);
650 } 655 }
651 i++; 656 i++;
652 if (i == rx_ring->count) 657 if (i == rx_ring->count)
@@ -659,15 +664,15 @@ map_skb:
659 664
660/** 665/**
661 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split 666 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
662 * @adapter: address of board private structure 667 * @rx_ring: Rx descriptor ring
663 **/ 668 **/
664static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, 669static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
665 int cleaned_count, gfp_t gfp) 670 int cleaned_count, gfp_t gfp)
666{ 671{
672 struct e1000_adapter *adapter = rx_ring->adapter;
667 struct net_device *netdev = adapter->netdev; 673 struct net_device *netdev = adapter->netdev;
668 struct pci_dev *pdev = adapter->pdev; 674 struct pci_dev *pdev = adapter->pdev;
669 union e1000_rx_desc_packet_split *rx_desc; 675 union e1000_rx_desc_packet_split *rx_desc;
670 struct e1000_ring *rx_ring = adapter->rx_ring;
671 struct e1000_buffer *buffer_info; 676 struct e1000_buffer *buffer_info;
672 struct e1000_ps_page *ps_page; 677 struct e1000_ps_page *ps_page;
673 struct sk_buff *skb; 678 struct sk_buff *skb;
@@ -747,10 +752,9 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
747 */ 752 */
748 wmb(); 753 wmb();
749 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 754 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
750 e1000e_update_rdt_wa(adapter, i << 1); 755 e1000e_update_rdt_wa(rx_ring, i << 1);
751 else 756 else
752 writel(i << 1, 757 writel(i << 1, rx_ring->tail);
753 adapter->hw.hw_addr + rx_ring->tail);
754 } 758 }
755 759
756 i++; 760 i++;
@@ -765,17 +769,17 @@ no_buffers:
765 769
766/** 770/**
767 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 771 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
768 * @adapter: address of board private structure 772 * @rx_ring: Rx descriptor ring
769 * @cleaned_count: number of buffers to allocate this pass 773 * @cleaned_count: number of buffers to allocate this pass
770 **/ 774 **/
771 775
772static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 776static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
773 int cleaned_count, gfp_t gfp) 777 int cleaned_count, gfp_t gfp)
774{ 778{
779 struct e1000_adapter *adapter = rx_ring->adapter;
775 struct net_device *netdev = adapter->netdev; 780 struct net_device *netdev = adapter->netdev;
776 struct pci_dev *pdev = adapter->pdev; 781 struct pci_dev *pdev = adapter->pdev;
777 union e1000_rx_desc_extended *rx_desc; 782 union e1000_rx_desc_extended *rx_desc;
778 struct e1000_ring *rx_ring = adapter->rx_ring;
779 struct e1000_buffer *buffer_info; 783 struct e1000_buffer *buffer_info;
780 struct sk_buff *skb; 784 struct sk_buff *skb;
781 unsigned int i; 785 unsigned int i;
@@ -834,26 +838,33 @@ check_page:
834 * such as IA-64). */ 838 * such as IA-64). */
835 wmb(); 839 wmb();
836 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 840 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
837 e1000e_update_rdt_wa(adapter, i); 841 e1000e_update_rdt_wa(rx_ring, i);
838 else 842 else
839 writel(i, adapter->hw.hw_addr + rx_ring->tail); 843 writel(i, rx_ring->tail);
840 } 844 }
841} 845}
842 846
847static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
848 struct sk_buff *skb)
849{
850 if (netdev->features & NETIF_F_RXHASH)
851 skb->rxhash = le32_to_cpu(rss);
852}
853
843/** 854/**
844 * e1000_clean_rx_irq - Send received data up the network stack; legacy 855 * e1000_clean_rx_irq - Send received data up the network stack
845 * @adapter: board private structure 856 * @rx_ring: Rx descriptor ring
846 * 857 *
847 * the return value indicates whether actual cleaning was done, there 858 * the return value indicates whether actual cleaning was done, there
848 * is no guarantee that everything was cleaned 859 * is no guarantee that everything was cleaned
849 **/ 860 **/
850static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 861static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
851 int *work_done, int work_to_do) 862 int work_to_do)
852{ 863{
864 struct e1000_adapter *adapter = rx_ring->adapter;
853 struct net_device *netdev = adapter->netdev; 865 struct net_device *netdev = adapter->netdev;
854 struct pci_dev *pdev = adapter->pdev; 866 struct pci_dev *pdev = adapter->pdev;
855 struct e1000_hw *hw = &adapter->hw; 867 struct e1000_hw *hw = &adapter->hw;
856 struct e1000_ring *rx_ring = adapter->rx_ring;
857 union e1000_rx_desc_extended *rx_desc, *next_rxd; 868 union e1000_rx_desc_extended *rx_desc, *next_rxd;
858 struct e1000_buffer *buffer_info, *next_buffer; 869 struct e1000_buffer *buffer_info, *next_buffer;
859 u32 length, staterr; 870 u32 length, staterr;
@@ -957,8 +968,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
957 968
958 /* Receive Checksum Offload */ 969 /* Receive Checksum Offload */
959 e1000_rx_checksum(adapter, staterr, 970 e1000_rx_checksum(adapter, staterr,
960 le16_to_cpu(rx_desc->wb.lower.hi_dword. 971 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
961 csum_ip.csum), skb); 972
973 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
962 974
963 e1000_receive_skb(adapter, netdev, skb, staterr, 975 e1000_receive_skb(adapter, netdev, skb, staterr,
964 rx_desc->wb.upper.vlan); 976 rx_desc->wb.upper.vlan);
@@ -968,7 +980,7 @@ next_desc:
968 980
969 /* return some buffers to hardware, one at a time is too slow */ 981 /* return some buffers to hardware, one at a time is too slow */
970 if (cleaned_count >= E1000_RX_BUFFER_WRITE) { 982 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
971 adapter->alloc_rx_buf(adapter, cleaned_count, 983 adapter->alloc_rx_buf(rx_ring, cleaned_count,
972 GFP_ATOMIC); 984 GFP_ATOMIC);
973 cleaned_count = 0; 985 cleaned_count = 0;
974 } 986 }
@@ -983,16 +995,18 @@ next_desc:
983 995
984 cleaned_count = e1000_desc_unused(rx_ring); 996 cleaned_count = e1000_desc_unused(rx_ring);
985 if (cleaned_count) 997 if (cleaned_count)
986 adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); 998 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
987 999
988 adapter->total_rx_bytes += total_rx_bytes; 1000 adapter->total_rx_bytes += total_rx_bytes;
989 adapter->total_rx_packets += total_rx_packets; 1001 adapter->total_rx_packets += total_rx_packets;
990 return cleaned; 1002 return cleaned;
991} 1003}
992 1004
993static void e1000_put_txbuf(struct e1000_adapter *adapter, 1005static void e1000_put_txbuf(struct e1000_ring *tx_ring,
994 struct e1000_buffer *buffer_info) 1006 struct e1000_buffer *buffer_info)
995{ 1007{
1008 struct e1000_adapter *adapter = tx_ring->adapter;
1009
996 if (buffer_info->dma) { 1010 if (buffer_info->dma) {
997 if (buffer_info->mapped_as_page) 1011 if (buffer_info->mapped_as_page)
998 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, 1012 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
@@ -1063,8 +1077,8 @@ static void e1000_print_hw_hang(struct work_struct *work)
1063 "PHY 1000BASE-T Status <%x>\n" 1077 "PHY 1000BASE-T Status <%x>\n"
1064 "PHY Extended Status <%x>\n" 1078 "PHY Extended Status <%x>\n"
1065 "PCI Status <%x>\n", 1079 "PCI Status <%x>\n",
1066 readl(adapter->hw.hw_addr + tx_ring->head), 1080 readl(tx_ring->head),
1067 readl(adapter->hw.hw_addr + tx_ring->tail), 1081 readl(tx_ring->tail),
1068 tx_ring->next_to_use, 1082 tx_ring->next_to_use,
1069 tx_ring->next_to_clean, 1083 tx_ring->next_to_clean,
1070 tx_ring->buffer_info[eop].time_stamp, 1084 tx_ring->buffer_info[eop].time_stamp,
@@ -1080,16 +1094,16 @@ static void e1000_print_hw_hang(struct work_struct *work)
1080 1094
1081/** 1095/**
1082 * e1000_clean_tx_irq - Reclaim resources after transmit completes 1096 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1083 * @adapter: board private structure 1097 * @tx_ring: Tx descriptor ring
1084 * 1098 *
1085 * the return value indicates whether actual cleaning was done, there 1099 * the return value indicates whether actual cleaning was done, there
1086 * is no guarantee that everything was cleaned 1100 * is no guarantee that everything was cleaned
1087 **/ 1101 **/
1088static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) 1102static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1089{ 1103{
1104 struct e1000_adapter *adapter = tx_ring->adapter;
1090 struct net_device *netdev = adapter->netdev; 1105 struct net_device *netdev = adapter->netdev;
1091 struct e1000_hw *hw = &adapter->hw; 1106 struct e1000_hw *hw = &adapter->hw;
1092 struct e1000_ring *tx_ring = adapter->tx_ring;
1093 struct e1000_tx_desc *tx_desc, *eop_desc; 1107 struct e1000_tx_desc *tx_desc, *eop_desc;
1094 struct e1000_buffer *buffer_info; 1108 struct e1000_buffer *buffer_info;
1095 unsigned int i, eop; 1109 unsigned int i, eop;
@@ -1119,7 +1133,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
1119 } 1133 }
1120 } 1134 }
1121 1135
1122 e1000_put_txbuf(adapter, buffer_info); 1136 e1000_put_txbuf(tx_ring, buffer_info);
1123 tx_desc->upper.data = 0; 1137 tx_desc->upper.data = 0;
1124 1138
1125 i++; 1139 i++;
@@ -1173,19 +1187,19 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
1173 1187
1174/** 1188/**
1175 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split 1189 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1176 * @adapter: board private structure 1190 * @rx_ring: Rx descriptor ring
1177 * 1191 *
1178 * the return value indicates whether actual cleaning was done, there 1192 * the return value indicates whether actual cleaning was done, there
1179 * is no guarantee that everything was cleaned 1193 * is no guarantee that everything was cleaned
1180 **/ 1194 **/
1181static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 1195static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1182 int *work_done, int work_to_do) 1196 int work_to_do)
1183{ 1197{
1198 struct e1000_adapter *adapter = rx_ring->adapter;
1184 struct e1000_hw *hw = &adapter->hw; 1199 struct e1000_hw *hw = &adapter->hw;
1185 union e1000_rx_desc_packet_split *rx_desc, *next_rxd; 1200 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1186 struct net_device *netdev = adapter->netdev; 1201 struct net_device *netdev = adapter->netdev;
1187 struct pci_dev *pdev = adapter->pdev; 1202 struct pci_dev *pdev = adapter->pdev;
1188 struct e1000_ring *rx_ring = adapter->rx_ring;
1189 struct e1000_buffer *buffer_info, *next_buffer; 1203 struct e1000_buffer *buffer_info, *next_buffer;
1190 struct e1000_ps_page *ps_page; 1204 struct e1000_ps_page *ps_page;
1191 struct sk_buff *skb; 1205 struct sk_buff *skb;
@@ -1318,8 +1332,10 @@ copydone:
1318 total_rx_bytes += skb->len; 1332 total_rx_bytes += skb->len;
1319 total_rx_packets++; 1333 total_rx_packets++;
1320 1334
1321 e1000_rx_checksum(adapter, staterr, le16_to_cpu( 1335 e1000_rx_checksum(adapter, staterr,
1322 rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); 1336 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
1337
1338 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1323 1339
1324 if (rx_desc->wb.upper.header_status & 1340 if (rx_desc->wb.upper.header_status &
1325 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) 1341 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
@@ -1334,7 +1350,7 @@ next_desc:
1334 1350
1335 /* return some buffers to hardware, one at a time is too slow */ 1351 /* return some buffers to hardware, one at a time is too slow */
1336 if (cleaned_count >= E1000_RX_BUFFER_WRITE) { 1352 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1337 adapter->alloc_rx_buf(adapter, cleaned_count, 1353 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1338 GFP_ATOMIC); 1354 GFP_ATOMIC);
1339 cleaned_count = 0; 1355 cleaned_count = 0;
1340 } 1356 }
@@ -1349,7 +1365,7 @@ next_desc:
1349 1365
1350 cleaned_count = e1000_desc_unused(rx_ring); 1366 cleaned_count = e1000_desc_unused(rx_ring);
1351 if (cleaned_count) 1367 if (cleaned_count)
1352 adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); 1368 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1353 1369
1354 adapter->total_rx_bytes += total_rx_bytes; 1370 adapter->total_rx_bytes += total_rx_bytes;
1355 adapter->total_rx_packets += total_rx_packets; 1371 adapter->total_rx_packets += total_rx_packets;
@@ -1375,13 +1391,12 @@ static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1375 * the return value indicates whether actual cleaning was done, there 1391 * the return value indicates whether actual cleaning was done, there
1376 * is no guarantee that everything was cleaned 1392 * is no guarantee that everything was cleaned
1377 **/ 1393 **/
1378 1394static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1379static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 1395 int work_to_do)
1380 int *work_done, int work_to_do)
1381{ 1396{
1397 struct e1000_adapter *adapter = rx_ring->adapter;
1382 struct net_device *netdev = adapter->netdev; 1398 struct net_device *netdev = adapter->netdev;
1383 struct pci_dev *pdev = adapter->pdev; 1399 struct pci_dev *pdev = adapter->pdev;
1384 struct e1000_ring *rx_ring = adapter->rx_ring;
1385 union e1000_rx_desc_extended *rx_desc, *next_rxd; 1400 union e1000_rx_desc_extended *rx_desc, *next_rxd;
1386 struct e1000_buffer *buffer_info, *next_buffer; 1401 struct e1000_buffer *buffer_info, *next_buffer;
1387 u32 length, staterr; 1402 u32 length, staterr;
@@ -1491,8 +1506,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1491 1506
1492 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 1507 /* Receive Checksum Offload XXX recompute due to CRC strip? */
1493 e1000_rx_checksum(adapter, staterr, 1508 e1000_rx_checksum(adapter, staterr,
1494 le16_to_cpu(rx_desc->wb.lower.hi_dword. 1509 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
1495 csum_ip.csum), skb); 1510
1511 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1496 1512
1497 /* probably a little skewed due to removing CRC */ 1513 /* probably a little skewed due to removing CRC */
1498 total_rx_bytes += skb->len; 1514 total_rx_bytes += skb->len;
@@ -1513,7 +1529,7 @@ next_desc:
1513 1529
1514 /* return some buffers to hardware, one at a time is too slow */ 1530 /* return some buffers to hardware, one at a time is too slow */
1515 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 1531 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1516 adapter->alloc_rx_buf(adapter, cleaned_count, 1532 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1517 GFP_ATOMIC); 1533 GFP_ATOMIC);
1518 cleaned_count = 0; 1534 cleaned_count = 0;
1519 } 1535 }
@@ -1528,7 +1544,7 @@ next_desc:
1528 1544
1529 cleaned_count = e1000_desc_unused(rx_ring); 1545 cleaned_count = e1000_desc_unused(rx_ring);
1530 if (cleaned_count) 1546 if (cleaned_count)
1531 adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); 1547 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1532 1548
1533 adapter->total_rx_bytes += total_rx_bytes; 1549 adapter->total_rx_bytes += total_rx_bytes;
1534 adapter->total_rx_packets += total_rx_packets; 1550 adapter->total_rx_packets += total_rx_packets;
@@ -1537,11 +1553,11 @@ next_desc:
1537 1553
1538/** 1554/**
1539 * e1000_clean_rx_ring - Free Rx Buffers per Queue 1555 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1540 * @adapter: board private structure 1556 * @rx_ring: Rx descriptor ring
1541 **/ 1557 **/
1542static void e1000_clean_rx_ring(struct e1000_adapter *adapter) 1558static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1543{ 1559{
1544 struct e1000_ring *rx_ring = adapter->rx_ring; 1560 struct e1000_adapter *adapter = rx_ring->adapter;
1545 struct e1000_buffer *buffer_info; 1561 struct e1000_buffer *buffer_info;
1546 struct e1000_ps_page *ps_page; 1562 struct e1000_ps_page *ps_page;
1547 struct pci_dev *pdev = adapter->pdev; 1563 struct pci_dev *pdev = adapter->pdev;
@@ -1601,8 +1617,8 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1601 rx_ring->next_to_use = 0; 1617 rx_ring->next_to_use = 0;
1602 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1618 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1603 1619
1604 writel(0, adapter->hw.hw_addr + rx_ring->head); 1620 writel(0, rx_ring->head);
1605 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1621 writel(0, rx_ring->tail);
1606} 1622}
1607 1623
1608static void e1000e_downshift_workaround(struct work_struct *work) 1624static void e1000e_downshift_workaround(struct work_struct *work)
@@ -1781,7 +1797,7 @@ static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1781 adapter->total_tx_bytes = 0; 1797 adapter->total_tx_bytes = 0;
1782 adapter->total_tx_packets = 0; 1798 adapter->total_tx_packets = 0;
1783 1799
1784 if (!e1000_clean_tx_irq(adapter)) 1800 if (!e1000_clean_tx_irq(tx_ring))
1785 /* Ring was not completely cleaned, so fire another interrupt */ 1801 /* Ring was not completely cleaned, so fire another interrupt */
1786 ew32(ICS, tx_ring->ims_val); 1802 ew32(ICS, tx_ring->ims_val);
1787 1803
@@ -1792,14 +1808,15 @@ static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1792{ 1808{
1793 struct net_device *netdev = data; 1809 struct net_device *netdev = data;
1794 struct e1000_adapter *adapter = netdev_priv(netdev); 1810 struct e1000_adapter *adapter = netdev_priv(netdev);
1811 struct e1000_ring *rx_ring = adapter->rx_ring;
1795 1812
1796 /* Write the ITR value calculated at the end of the 1813 /* Write the ITR value calculated at the end of the
1797 * previous interrupt. 1814 * previous interrupt.
1798 */ 1815 */
1799 if (adapter->rx_ring->set_itr) { 1816 if (rx_ring->set_itr) {
1800 writel(1000000000 / (adapter->rx_ring->itr_val * 256), 1817 writel(1000000000 / (rx_ring->itr_val * 256),
1801 adapter->hw.hw_addr + adapter->rx_ring->itr_register); 1818 rx_ring->itr_register);
1802 adapter->rx_ring->set_itr = 0; 1819 rx_ring->set_itr = 0;
1803 } 1820 }
1804 1821
1805 if (napi_schedule_prep(&adapter->napi)) { 1822 if (napi_schedule_prep(&adapter->napi)) {
@@ -1839,9 +1856,9 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
1839 adapter->eiac_mask |= rx_ring->ims_val; 1856 adapter->eiac_mask |= rx_ring->ims_val;
1840 if (rx_ring->itr_val) 1857 if (rx_ring->itr_val)
1841 writel(1000000000 / (rx_ring->itr_val * 256), 1858 writel(1000000000 / (rx_ring->itr_val * 256),
1842 hw->hw_addr + rx_ring->itr_register); 1859 rx_ring->itr_register);
1843 else 1860 else
1844 writel(1, hw->hw_addr + rx_ring->itr_register); 1861 writel(1, rx_ring->itr_register);
1845 ivar = E1000_IVAR_INT_ALLOC_VALID | vector; 1862 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1846 1863
1847 /* Configure Tx vector */ 1864 /* Configure Tx vector */
@@ -1849,9 +1866,9 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
1849 vector++; 1866 vector++;
1850 if (tx_ring->itr_val) 1867 if (tx_ring->itr_val)
1851 writel(1000000000 / (tx_ring->itr_val * 256), 1868 writel(1000000000 / (tx_ring->itr_val * 256),
1852 hw->hw_addr + tx_ring->itr_register); 1869 tx_ring->itr_register);
1853 else 1870 else
1854 writel(1, hw->hw_addr + tx_ring->itr_register); 1871 writel(1, tx_ring->itr_register);
1855 adapter->eiac_mask |= tx_ring->ims_val; 1872 adapter->eiac_mask |= tx_ring->ims_val;
1856 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); 1873 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1857 1874
@@ -1966,7 +1983,8 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1966 netdev); 1983 netdev);
1967 if (err) 1984 if (err)
1968 goto out; 1985 goto out;
1969 adapter->rx_ring->itr_register = E1000_EITR_82574(vector); 1986 adapter->rx_ring->itr_register = adapter->hw.hw_addr +
1987 E1000_EITR_82574(vector);
1970 adapter->rx_ring->itr_val = adapter->itr; 1988 adapter->rx_ring->itr_val = adapter->itr;
1971 vector++; 1989 vector++;
1972 1990
@@ -1981,7 +1999,8 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1981 netdev); 1999 netdev);
1982 if (err) 2000 if (err)
1983 goto out; 2001 goto out;
1984 adapter->tx_ring->itr_register = E1000_EITR_82574(vector); 2002 adapter->tx_ring->itr_register = adapter->hw.hw_addr +
2003 E1000_EITR_82574(vector);
1985 adapter->tx_ring->itr_val = adapter->itr; 2004 adapter->tx_ring->itr_val = adapter->itr;
1986 vector++; 2005 vector++;
1987 2006
@@ -2162,13 +2181,13 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2162 2181
2163/** 2182/**
2164 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) 2183 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2165 * @adapter: board private structure 2184 * @tx_ring: Tx descriptor ring
2166 * 2185 *
2167 * Return 0 on success, negative on failure 2186 * Return 0 on success, negative on failure
2168 **/ 2187 **/
2169int e1000e_setup_tx_resources(struct e1000_adapter *adapter) 2188int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
2170{ 2189{
2171 struct e1000_ring *tx_ring = adapter->tx_ring; 2190 struct e1000_adapter *adapter = tx_ring->adapter;
2172 int err = -ENOMEM, size; 2191 int err = -ENOMEM, size;
2173 2192
2174 size = sizeof(struct e1000_buffer) * tx_ring->count; 2193 size = sizeof(struct e1000_buffer) * tx_ring->count;
@@ -2196,13 +2215,13 @@ err:
2196 2215
2197/** 2216/**
2198 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) 2217 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2199 * @adapter: board private structure 2218 * @rx_ring: Rx descriptor ring
2200 * 2219 *
2201 * Returns 0 on success, negative on failure 2220 * Returns 0 on success, negative on failure
2202 **/ 2221 **/
2203int e1000e_setup_rx_resources(struct e1000_adapter *adapter) 2222int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
2204{ 2223{
2205 struct e1000_ring *rx_ring = adapter->rx_ring; 2224 struct e1000_adapter *adapter = rx_ring->adapter;
2206 struct e1000_buffer *buffer_info; 2225 struct e1000_buffer *buffer_info;
2207 int i, size, desc_len, err = -ENOMEM; 2226 int i, size, desc_len, err = -ENOMEM;
2208 2227
@@ -2249,18 +2268,18 @@ err:
2249 2268
2250/** 2269/**
2251 * e1000_clean_tx_ring - Free Tx Buffers 2270 * e1000_clean_tx_ring - Free Tx Buffers
2252 * @adapter: board private structure 2271 * @tx_ring: Tx descriptor ring
2253 **/ 2272 **/
2254static void e1000_clean_tx_ring(struct e1000_adapter *adapter) 2273static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
2255{ 2274{
2256 struct e1000_ring *tx_ring = adapter->tx_ring; 2275 struct e1000_adapter *adapter = tx_ring->adapter;
2257 struct e1000_buffer *buffer_info; 2276 struct e1000_buffer *buffer_info;
2258 unsigned long size; 2277 unsigned long size;
2259 unsigned int i; 2278 unsigned int i;
2260 2279
2261 for (i = 0; i < tx_ring->count; i++) { 2280 for (i = 0; i < tx_ring->count; i++) {
2262 buffer_info = &tx_ring->buffer_info[i]; 2281 buffer_info = &tx_ring->buffer_info[i];
2263 e1000_put_txbuf(adapter, buffer_info); 2282 e1000_put_txbuf(tx_ring, buffer_info);
2264 } 2283 }
2265 2284
2266 netdev_reset_queue(adapter->netdev); 2285 netdev_reset_queue(adapter->netdev);
@@ -2272,22 +2291,22 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
2272 tx_ring->next_to_use = 0; 2291 tx_ring->next_to_use = 0;
2273 tx_ring->next_to_clean = 0; 2292 tx_ring->next_to_clean = 0;
2274 2293
2275 writel(0, adapter->hw.hw_addr + tx_ring->head); 2294 writel(0, tx_ring->head);
2276 writel(0, adapter->hw.hw_addr + tx_ring->tail); 2295 writel(0, tx_ring->tail);
2277} 2296}
2278 2297
2279/** 2298/**
2280 * e1000e_free_tx_resources - Free Tx Resources per Queue 2299 * e1000e_free_tx_resources - Free Tx Resources per Queue
2281 * @adapter: board private structure 2300 * @tx_ring: Tx descriptor ring
2282 * 2301 *
2283 * Free all transmit software resources 2302 * Free all transmit software resources
2284 **/ 2303 **/
2285void e1000e_free_tx_resources(struct e1000_adapter *adapter) 2304void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
2286{ 2305{
2306 struct e1000_adapter *adapter = tx_ring->adapter;
2287 struct pci_dev *pdev = adapter->pdev; 2307 struct pci_dev *pdev = adapter->pdev;
2288 struct e1000_ring *tx_ring = adapter->tx_ring;
2289 2308
2290 e1000_clean_tx_ring(adapter); 2309 e1000_clean_tx_ring(tx_ring);
2291 2310
2292 vfree(tx_ring->buffer_info); 2311 vfree(tx_ring->buffer_info);
2293 tx_ring->buffer_info = NULL; 2312 tx_ring->buffer_info = NULL;
@@ -2299,18 +2318,17 @@ void e1000e_free_tx_resources(struct e1000_adapter *adapter)
2299 2318
2300/** 2319/**
2301 * e1000e_free_rx_resources - Free Rx Resources 2320 * e1000e_free_rx_resources - Free Rx Resources
2302 * @adapter: board private structure 2321 * @rx_ring: Rx descriptor ring
2303 * 2322 *
2304 * Free all receive software resources 2323 * Free all receive software resources
2305 **/ 2324 **/
2306 2325void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
2307void e1000e_free_rx_resources(struct e1000_adapter *adapter)
2308{ 2326{
2327 struct e1000_adapter *adapter = rx_ring->adapter;
2309 struct pci_dev *pdev = adapter->pdev; 2328 struct pci_dev *pdev = adapter->pdev;
2310 struct e1000_ring *rx_ring = adapter->rx_ring;
2311 int i; 2329 int i;
2312 2330
2313 e1000_clean_rx_ring(adapter); 2331 e1000_clean_rx_ring(rx_ring);
2314 2332
2315 for (i = 0; i < rx_ring->count; i++) 2333 for (i = 0; i < rx_ring->count; i++)
2316 kfree(rx_ring->buffer_info[i].ps_pages); 2334 kfree(rx_ring->buffer_info[i].ps_pages);
@@ -2464,13 +2482,19 @@ set_itr_now:
2464 **/ 2482 **/
2465static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) 2483static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
2466{ 2484{
2467 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 2485 int size = sizeof(struct e1000_ring);
2486
2487 adapter->tx_ring = kzalloc(size, GFP_KERNEL);
2468 if (!adapter->tx_ring) 2488 if (!adapter->tx_ring)
2469 goto err; 2489 goto err;
2490 adapter->tx_ring->count = adapter->tx_ring_count;
2491 adapter->tx_ring->adapter = adapter;
2470 2492
2471 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 2493 adapter->rx_ring = kzalloc(size, GFP_KERNEL);
2472 if (!adapter->rx_ring) 2494 if (!adapter->rx_ring)
2473 goto err; 2495 goto err;
2496 adapter->rx_ring->count = adapter->rx_ring_count;
2497 adapter->rx_ring->adapter = adapter;
2474 2498
2475 return 0; 2499 return 0;
2476err: 2500err:
@@ -2498,10 +2522,10 @@ static int e1000_clean(struct napi_struct *napi, int budget)
2498 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) 2522 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2499 goto clean_rx; 2523 goto clean_rx;
2500 2524
2501 tx_cleaned = e1000_clean_tx_irq(adapter); 2525 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2502 2526
2503clean_rx: 2527clean_rx:
2504 adapter->clean_rx(adapter, &work_done, budget); 2528 adapter->clean_rx(adapter->rx_ring, &work_done, budget);
2505 2529
2506 if (!tx_cleaned) 2530 if (!tx_cleaned)
2507 work_done = budget; 2531 work_done = budget;
@@ -2746,8 +2770,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2746 struct e1000_hw *hw = &adapter->hw; 2770 struct e1000_hw *hw = &adapter->hw;
2747 struct e1000_ring *tx_ring = adapter->tx_ring; 2771 struct e1000_ring *tx_ring = adapter->tx_ring;
2748 u64 tdba; 2772 u64 tdba;
2749 u32 tdlen, tctl, tipg, tarc; 2773 u32 tdlen, tarc;
2750 u32 ipgr1, ipgr2;
2751 2774
2752 /* Setup the HW Tx Head and Tail descriptor pointers */ 2775 /* Setup the HW Tx Head and Tail descriptor pointers */
2753 tdba = tx_ring->dma; 2776 tdba = tx_ring->dma;
@@ -2757,20 +2780,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2757 ew32(TDLEN, tdlen); 2780 ew32(TDLEN, tdlen);
2758 ew32(TDH, 0); 2781 ew32(TDH, 0);
2759 ew32(TDT, 0); 2782 ew32(TDT, 0);
2760 tx_ring->head = E1000_TDH; 2783 tx_ring->head = adapter->hw.hw_addr + E1000_TDH;
2761 tx_ring->tail = E1000_TDT; 2784 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT;
2762
2763 /* Set the default values for the Tx Inter Packet Gap timer */
2764 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */
2765 ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */
2766 ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */
2767
2768 if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
2769 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */
2770
2771 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
2772 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
2773 ew32(TIPG, tipg);
2774 2785
2775 /* Set the Tx Interrupt Delay register */ 2786 /* Set the Tx Interrupt Delay register */
2776 ew32(TIDV, adapter->tx_int_delay); 2787 ew32(TIDV, adapter->tx_int_delay);
@@ -2793,15 +2804,9 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2793 */ 2804 */
2794 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; 2805 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2795 ew32(TXDCTL(0), txdctl); 2806 ew32(TXDCTL(0), txdctl);
2796 /* erratum work around: set txdctl the same for both queues */
2797 ew32(TXDCTL(1), txdctl);
2798 } 2807 }
2799 2808 /* erratum work around: set txdctl the same for both queues */
2800 /* Program the Transmit Control Register */ 2809 ew32(TXDCTL(1), er32(TXDCTL(0)));
2801 tctl = er32(TCTL);
2802 tctl &= ~E1000_TCTL_CT;
2803 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2804 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2805 2810
2806 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { 2811 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2807 tarc = er32(TARC(0)); 2812 tarc = er32(TARC(0));
@@ -2834,8 +2839,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2834 /* enable Report Status bit */ 2839 /* enable Report Status bit */
2835 adapter->txd_cmd |= E1000_TXD_CMD_RS; 2840 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2836 2841
2837 ew32(TCTL, tctl);
2838
2839 e1000e_config_collision_dist(hw); 2842 e1000e_config_collision_dist(hw);
2840} 2843}
2841 2844
@@ -2944,8 +2947,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2944 * per packet. 2947 * per packet.
2945 */ 2948 */
2946 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 2949 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2947 if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) && 2950 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2948 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2949 adapter->rx_ps_pages = pages; 2951 adapter->rx_ps_pages = pages;
2950 else 2952 else
2951 adapter->rx_ps_pages = 0; 2953 adapter->rx_ps_pages = 0;
@@ -3072,8 +3074,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
3072 ew32(RDLEN, rdlen); 3074 ew32(RDLEN, rdlen);
3073 ew32(RDH, 0); 3075 ew32(RDH, 0);
3074 ew32(RDT, 0); 3076 ew32(RDT, 0);
3075 rx_ring->head = E1000_RDH; 3077 rx_ring->head = adapter->hw.hw_addr + E1000_RDH;
3076 rx_ring->tail = E1000_RDT; 3078 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT;
3077 3079
3078 /* Enable Receive Checksum Offload for TCP and UDP */ 3080 /* Enable Receive Checksum Offload for TCP and UDP */
3079 rxcsum = er32(RXCSUM); 3081 rxcsum = er32(RXCSUM);
@@ -3092,23 +3094,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
3092 } 3094 }
3093 ew32(RXCSUM, rxcsum); 3095 ew32(RXCSUM, rxcsum);
3094 3096
3095 /* 3097 if (adapter->hw.mac.type == e1000_pch2lan) {
3096 * Enable early receives on supported devices, only takes effect when 3098 /*
3097 * packet size is equal or larger than the specified value (in 8 byte 3099 * With jumbo frames, excessive C-state transition
3098 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 3100 * latencies result in dropped transactions.
3099 */ 3101 */
3100 if ((adapter->flags & FLAG_HAS_ERT) ||
3101 (adapter->hw.mac.type == e1000_pch2lan)) {
3102 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3102 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3103 u32 rxdctl = er32(RXDCTL(0)); 3103 u32 rxdctl = er32(RXDCTL(0));
3104 ew32(RXDCTL(0), rxdctl | 0x3); 3104 ew32(RXDCTL(0), rxdctl | 0x3);
3105 if (adapter->flags & FLAG_HAS_ERT)
3106 ew32(ERT, E1000_ERT_2048 | (1 << 13));
3107 /*
3108 * With jumbo frames and early-receive enabled,
3109 * excessive C-state transition latencies result in
3110 * dropped transactions.
3111 */
3112 pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); 3105 pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
3113 } else { 3106 } else {
3114 pm_qos_update_request(&adapter->netdev->pm_qos_req, 3107 pm_qos_update_request(&adapter->netdev->pm_qos_req,
@@ -3268,22 +3261,62 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
3268 e1000e_vlan_strip_disable(adapter); 3261 e1000e_vlan_strip_disable(adapter);
3269} 3262}
3270 3263
3264static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
3265{
3266 struct e1000_hw *hw = &adapter->hw;
3267 u32 mrqc, rxcsum;
3268 int i;
3269 static const u32 rsskey[10] = {
3270 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0,
3271 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe
3272 };
3273
3274 /* Fill out hash function seed */
3275 for (i = 0; i < 10; i++)
3276 ew32(RSSRK(i), rsskey[i]);
3277
3278 /* Direct all traffic to queue 0 */
3279 for (i = 0; i < 32; i++)
3280 ew32(RETA(i), 0);
3281
3282 /*
3283 * Disable raw packet checksumming so that RSS hash is placed in
3284 * descriptor on writeback.
3285 */
3286 rxcsum = er32(RXCSUM);
3287 rxcsum |= E1000_RXCSUM_PCSD;
3288
3289 ew32(RXCSUM, rxcsum);
3290
3291 mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
3292 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3293 E1000_MRQC_RSS_FIELD_IPV6 |
3294 E1000_MRQC_RSS_FIELD_IPV6_TCP |
3295 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
3296
3297 ew32(MRQC, mrqc);
3298}
3299
3271/** 3300/**
3272 * e1000_configure - configure the hardware for Rx and Tx 3301 * e1000_configure - configure the hardware for Rx and Tx
3273 * @adapter: private board structure 3302 * @adapter: private board structure
3274 **/ 3303 **/
3275static void e1000_configure(struct e1000_adapter *adapter) 3304static void e1000_configure(struct e1000_adapter *adapter)
3276{ 3305{
3306 struct e1000_ring *rx_ring = adapter->rx_ring;
3307
3277 e1000e_set_rx_mode(adapter->netdev); 3308 e1000e_set_rx_mode(adapter->netdev);
3278 3309
3279 e1000_restore_vlan(adapter); 3310 e1000_restore_vlan(adapter);
3280 e1000_init_manageability_pt(adapter); 3311 e1000_init_manageability_pt(adapter);
3281 3312
3282 e1000_configure_tx(adapter); 3313 e1000_configure_tx(adapter);
3314
3315 if (adapter->netdev->features & NETIF_F_RXHASH)
3316 e1000e_setup_rss_hash(adapter);
3283 e1000_setup_rctl(adapter); 3317 e1000_setup_rctl(adapter);
3284 e1000_configure_rx(adapter); 3318 e1000_configure_rx(adapter);
3285 adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring), 3319 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
3286 GFP_KERNEL);
3287} 3320}
3288 3321
3289/** 3322/**
@@ -3379,9 +3412,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3379 * if short on Rx space, Rx wins and must trump Tx 3412 * if short on Rx space, Rx wins and must trump Tx
3380 * adjustment or use Early Receive if available 3413 * adjustment or use Early Receive if available
3381 */ 3414 */
3382 if ((pba < min_rx_space) && 3415 if (pba < min_rx_space)
3383 (!(adapter->flags & FLAG_HAS_ERT)))
3384 /* ERT enabled in e1000_configure_rx */
3385 pba = min_rx_space; 3416 pba = min_rx_space;
3386 } 3417 }
3387 3418
@@ -3395,8 +3426,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
3395 * (or the size used for early receive) above it in the Rx FIFO. 3426 * (or the size used for early receive) above it in the Rx FIFO.
3396 * Set it to the lower of: 3427 * Set it to the lower of:
3397 * - 90% of the Rx FIFO size, and 3428 * - 90% of the Rx FIFO size, and
3398 * - the full Rx FIFO size minus the early receive size (for parts
3399 * with ERT support assuming ERT set to E1000_ERT_2048), or
3400 * - the full Rx FIFO size minus one full frame 3429 * - the full Rx FIFO size minus one full frame
3401 */ 3430 */
3402 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) 3431 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
@@ -3407,14 +3436,19 @@ void e1000e_reset(struct e1000_adapter *adapter)
3407 fc->current_mode = fc->requested_mode; 3436 fc->current_mode = fc->requested_mode;
3408 3437
3409 switch (hw->mac.type) { 3438 switch (hw->mac.type) {
3439 case e1000_ich9lan:
3440 case e1000_ich10lan:
3441 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3442 pba = 14;
3443 ew32(PBA, pba);
3444 fc->high_water = 0x2800;
3445 fc->low_water = fc->high_water - 8;
3446 break;
3447 }
3448 /* fall-through */
3410 default: 3449 default:
3411 if ((adapter->flags & FLAG_HAS_ERT) && 3450 hwm = min(((pba << 10) * 9 / 10),
3412 (adapter->netdev->mtu > ETH_DATA_LEN)) 3451 ((pba << 10) - adapter->max_frame_size));
3413 hwm = min(((pba << 10) * 9 / 10),
3414 ((pba << 10) - (E1000_ERT_2048 << 3)));
3415 else
3416 hwm = min(((pba << 10) * 9 / 10),
3417 ((pba << 10) - adapter->max_frame_size));
3418 3452
3419 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ 3453 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3420 fc->low_water = fc->high_water - 8; 3454 fc->low_water = fc->high_water - 8;
@@ -3447,11 +3481,10 @@ void e1000e_reset(struct e1000_adapter *adapter)
3447 3481
3448 /* 3482 /*
3449 * Disable Adaptive Interrupt Moderation if 2 full packets cannot 3483 * Disable Adaptive Interrupt Moderation if 2 full packets cannot
3450 * fit in receive buffer and early-receive not supported. 3484 * fit in receive buffer.
3451 */ 3485 */
3452 if (adapter->itr_setting & 0x3) { 3486 if (adapter->itr_setting & 0x3) {
3453 if (((adapter->max_frame_size * 2) > (pba << 10)) && 3487 if ((adapter->max_frame_size * 2) > (pba << 10)) {
3454 !(adapter->flags & FLAG_HAS_ERT)) {
3455 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { 3488 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3456 dev_info(&adapter->pdev->dev, 3489 dev_info(&adapter->pdev->dev,
3457 "Interrupt Throttle Rate turned off\n"); 3490 "Interrupt Throttle Rate turned off\n");
@@ -3593,8 +3626,8 @@ void e1000e_down(struct e1000_adapter *adapter)
3593 spin_unlock(&adapter->stats64_lock); 3626 spin_unlock(&adapter->stats64_lock);
3594 3627
3595 e1000e_flush_descriptors(adapter); 3628 e1000e_flush_descriptors(adapter);
3596 e1000_clean_tx_ring(adapter); 3629 e1000_clean_tx_ring(adapter->tx_ring);
3597 e1000_clean_rx_ring(adapter); 3630 e1000_clean_rx_ring(adapter->rx_ring);
3598 3631
3599 adapter->link_speed = 0; 3632 adapter->link_speed = 0;
3600 adapter->link_duplex = 0; 3633 adapter->link_duplex = 0;
@@ -3634,6 +3667,8 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
3634 adapter->rx_ps_bsize0 = 128; 3667 adapter->rx_ps_bsize0 = 128;
3635 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 3668 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3636 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3669 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3670 adapter->tx_ring_count = E1000_DEFAULT_TXD;
3671 adapter->rx_ring_count = E1000_DEFAULT_RXD;
3637 3672
3638 spin_lock_init(&adapter->stats64_lock); 3673 spin_lock_init(&adapter->stats64_lock);
3639 3674
@@ -3792,12 +3827,12 @@ static int e1000_open(struct net_device *netdev)
3792 netif_carrier_off(netdev); 3827 netif_carrier_off(netdev);
3793 3828
3794 /* allocate transmit descriptors */ 3829 /* allocate transmit descriptors */
3795 err = e1000e_setup_tx_resources(adapter); 3830 err = e1000e_setup_tx_resources(adapter->tx_ring);
3796 if (err) 3831 if (err)
3797 goto err_setup_tx; 3832 goto err_setup_tx;
3798 3833
3799 /* allocate receive descriptors */ 3834 /* allocate receive descriptors */
3800 err = e1000e_setup_rx_resources(adapter); 3835 err = e1000e_setup_rx_resources(adapter->rx_ring);
3801 if (err) 3836 if (err)
3802 goto err_setup_rx; 3837 goto err_setup_rx;
3803 3838
@@ -3817,9 +3852,8 @@ static int e1000_open(struct net_device *netdev)
3817 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) 3852 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
3818 e1000_update_mng_vlan(adapter); 3853 e1000_update_mng_vlan(adapter);
3819 3854
3820 /* DMA latency requirement to workaround early-receive/jumbo issue */ 3855 /* DMA latency requirement to workaround jumbo issue */
3821 if ((adapter->flags & FLAG_HAS_ERT) || 3856 if (adapter->hw.mac.type == e1000_pch2lan)
3822 (adapter->hw.mac.type == e1000_pch2lan))
3823 pm_qos_add_request(&adapter->netdev->pm_qos_req, 3857 pm_qos_add_request(&adapter->netdev->pm_qos_req,
3824 PM_QOS_CPU_DMA_LATENCY, 3858 PM_QOS_CPU_DMA_LATENCY,
3825 PM_QOS_DEFAULT_VALUE); 3859 PM_QOS_DEFAULT_VALUE);
@@ -3873,9 +3907,9 @@ static int e1000_open(struct net_device *netdev)
3873err_req_irq: 3907err_req_irq:
3874 e1000e_release_hw_control(adapter); 3908 e1000e_release_hw_control(adapter);
3875 e1000_power_down_phy(adapter); 3909 e1000_power_down_phy(adapter);
3876 e1000e_free_rx_resources(adapter); 3910 e1000e_free_rx_resources(adapter->rx_ring);
3877err_setup_rx: 3911err_setup_rx:
3878 e1000e_free_tx_resources(adapter); 3912 e1000e_free_tx_resources(adapter->tx_ring);
3879err_setup_tx: 3913err_setup_tx:
3880 e1000e_reset(adapter); 3914 e1000e_reset(adapter);
3881 pm_runtime_put_sync(&pdev->dev); 3915 pm_runtime_put_sync(&pdev->dev);
@@ -3911,8 +3945,8 @@ static int e1000_close(struct net_device *netdev)
3911 } 3945 }
3912 e1000_power_down_phy(adapter); 3946 e1000_power_down_phy(adapter);
3913 3947
3914 e1000e_free_tx_resources(adapter); 3948 e1000e_free_tx_resources(adapter->tx_ring);
3915 e1000e_free_rx_resources(adapter); 3949 e1000e_free_rx_resources(adapter->rx_ring);
3916 3950
3917 /* 3951 /*
3918 * kill manageability vlan ID if supported, but not if a vlan with 3952 * kill manageability vlan ID if supported, but not if a vlan with
@@ -3930,8 +3964,7 @@ static int e1000_close(struct net_device *netdev)
3930 !test_bit(__E1000_TESTING, &adapter->state)) 3964 !test_bit(__E1000_TESTING, &adapter->state))
3931 e1000e_release_hw_control(adapter); 3965 e1000e_release_hw_control(adapter);
3932 3966
3933 if ((adapter->flags & FLAG_HAS_ERT) || 3967 if (adapter->hw.mac.type == e1000_pch2lan)
3934 (adapter->hw.mac.type == e1000_pch2lan))
3935 pm_qos_remove_request(&adapter->netdev->pm_qos_req); 3968 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
3936 3969
3937 pm_runtime_put_sync(&pdev->dev); 3970 pm_runtime_put_sync(&pdev->dev);
@@ -4569,10 +4602,8 @@ link_up:
4569#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 4602#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
4570#define E1000_TX_FLAGS_VLAN_SHIFT 16 4603#define E1000_TX_FLAGS_VLAN_SHIFT 16
4571 4604
4572static int e1000_tso(struct e1000_adapter *adapter, 4605static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
4573 struct sk_buff *skb)
4574{ 4606{
4575 struct e1000_ring *tx_ring = adapter->tx_ring;
4576 struct e1000_context_desc *context_desc; 4607 struct e1000_context_desc *context_desc;
4577 struct e1000_buffer *buffer_info; 4608 struct e1000_buffer *buffer_info;
4578 unsigned int i; 4609 unsigned int i;
@@ -4641,9 +4672,9 @@ static int e1000_tso(struct e1000_adapter *adapter,
4641 return 1; 4672 return 1;
4642} 4673}
4643 4674
4644static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) 4675static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
4645{ 4676{
4646 struct e1000_ring *tx_ring = adapter->tx_ring; 4677 struct e1000_adapter *adapter = tx_ring->adapter;
4647 struct e1000_context_desc *context_desc; 4678 struct e1000_context_desc *context_desc;
4648 struct e1000_buffer *buffer_info; 4679 struct e1000_buffer *buffer_info;
4649 unsigned int i; 4680 unsigned int i;
@@ -4704,12 +4735,11 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
4704#define E1000_MAX_PER_TXD 8192 4735#define E1000_MAX_PER_TXD 8192
4705#define E1000_MAX_TXD_PWR 12 4736#define E1000_MAX_TXD_PWR 12
4706 4737
4707static int e1000_tx_map(struct e1000_adapter *adapter, 4738static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
4708 struct sk_buff *skb, unsigned int first, 4739 unsigned int first, unsigned int max_per_txd,
4709 unsigned int max_per_txd, unsigned int nr_frags, 4740 unsigned int nr_frags, unsigned int mss)
4710 unsigned int mss)
4711{ 4741{
4712 struct e1000_ring *tx_ring = adapter->tx_ring; 4742 struct e1000_adapter *adapter = tx_ring->adapter;
4713 struct pci_dev *pdev = adapter->pdev; 4743 struct pci_dev *pdev = adapter->pdev;
4714 struct e1000_buffer *buffer_info; 4744 struct e1000_buffer *buffer_info;
4715 unsigned int len = skb_headlen(skb); 4745 unsigned int len = skb_headlen(skb);
@@ -4795,16 +4825,15 @@ dma_error:
4795 i += tx_ring->count; 4825 i += tx_ring->count;
4796 i--; 4826 i--;
4797 buffer_info = &tx_ring->buffer_info[i]; 4827 buffer_info = &tx_ring->buffer_info[i];
4798 e1000_put_txbuf(adapter, buffer_info); 4828 e1000_put_txbuf(tx_ring, buffer_info);
4799 } 4829 }
4800 4830
4801 return 0; 4831 return 0;
4802} 4832}
4803 4833
4804static void e1000_tx_queue(struct e1000_adapter *adapter, 4834static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
4805 int tx_flags, int count)
4806{ 4835{
4807 struct e1000_ring *tx_ring = adapter->tx_ring; 4836 struct e1000_adapter *adapter = tx_ring->adapter;
4808 struct e1000_tx_desc *tx_desc = NULL; 4837 struct e1000_tx_desc *tx_desc = NULL;
4809 struct e1000_buffer *buffer_info; 4838 struct e1000_buffer *buffer_info;
4810 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 4839 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
@@ -4857,9 +4886,9 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
4857 tx_ring->next_to_use = i; 4886 tx_ring->next_to_use = i;
4858 4887
4859 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 4888 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
4860 e1000e_update_tdt_wa(adapter, i); 4889 e1000e_update_tdt_wa(tx_ring, i);
4861 else 4890 else
4862 writel(i, adapter->hw.hw_addr + tx_ring->tail); 4891 writel(i, tx_ring->tail);
4863 4892
4864 /* 4893 /*
4865 * we need this if more than one processor can write to our tail 4894 * we need this if more than one processor can write to our tail
@@ -4907,11 +4936,11 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
4907 return 0; 4936 return 0;
4908} 4937}
4909 4938
4910static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) 4939static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
4911{ 4940{
4912 struct e1000_adapter *adapter = netdev_priv(netdev); 4941 struct e1000_adapter *adapter = tx_ring->adapter;
4913 4942
4914 netif_stop_queue(netdev); 4943 netif_stop_queue(adapter->netdev);
4915 /* 4944 /*
4916 * Herbert's original patch had: 4945 * Herbert's original patch had:
4917 * smp_mb__after_netif_stop_queue(); 4946 * smp_mb__after_netif_stop_queue();
@@ -4923,22 +4952,20 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
4923 * We need to check again in a case another CPU has just 4952 * We need to check again in a case another CPU has just
4924 * made room available. 4953 * made room available.
4925 */ 4954 */
4926 if (e1000_desc_unused(adapter->tx_ring) < size) 4955 if (e1000_desc_unused(tx_ring) < size)
4927 return -EBUSY; 4956 return -EBUSY;
4928 4957
4929 /* A reprieve! */ 4958 /* A reprieve! */
4930 netif_start_queue(netdev); 4959 netif_start_queue(adapter->netdev);
4931 ++adapter->restart_queue; 4960 ++adapter->restart_queue;
4932 return 0; 4961 return 0;
4933} 4962}
4934 4963
4935static int e1000_maybe_stop_tx(struct net_device *netdev, int size) 4964static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
4936{ 4965{
4937 struct e1000_adapter *adapter = netdev_priv(netdev); 4966 if (e1000_desc_unused(tx_ring) >= size)
4938
4939 if (e1000_desc_unused(adapter->tx_ring) >= size)
4940 return 0; 4967 return 0;
4941 return __e1000_maybe_stop_tx(netdev, size); 4968 return __e1000_maybe_stop_tx(tx_ring, size);
4942} 4969}
4943 4970
4944#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) 4971#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
@@ -5024,7 +5051,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5024 * need: count + 2 desc gap to keep tail from touching 5051 * need: count + 2 desc gap to keep tail from touching
5025 * head, otherwise try next time 5052 * head, otherwise try next time
5026 */ 5053 */
5027 if (e1000_maybe_stop_tx(netdev, count + 2)) 5054 if (e1000_maybe_stop_tx(tx_ring, count + 2))
5028 return NETDEV_TX_BUSY; 5055 return NETDEV_TX_BUSY;
5029 5056
5030 if (vlan_tx_tag_present(skb)) { 5057 if (vlan_tx_tag_present(skb)) {
@@ -5034,7 +5061,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5034 5061
5035 first = tx_ring->next_to_use; 5062 first = tx_ring->next_to_use;
5036 5063
5037 tso = e1000_tso(adapter, skb); 5064 tso = e1000_tso(tx_ring, skb);
5038 if (tso < 0) { 5065 if (tso < 0) {
5039 dev_kfree_skb_any(skb); 5066 dev_kfree_skb_any(skb);
5040 return NETDEV_TX_OK; 5067 return NETDEV_TX_OK;
@@ -5042,7 +5069,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5042 5069
5043 if (tso) 5070 if (tso)
5044 tx_flags |= E1000_TX_FLAGS_TSO; 5071 tx_flags |= E1000_TX_FLAGS_TSO;
5045 else if (e1000_tx_csum(adapter, skb)) 5072 else if (e1000_tx_csum(tx_ring, skb))
5046 tx_flags |= E1000_TX_FLAGS_CSUM; 5073 tx_flags |= E1000_TX_FLAGS_CSUM;
5047 5074
5048 /* 5075 /*
@@ -5054,12 +5081,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5054 tx_flags |= E1000_TX_FLAGS_IPV4; 5081 tx_flags |= E1000_TX_FLAGS_IPV4;
5055 5082
5056 /* if count is 0 then mapping error has occurred */ 5083 /* if count is 0 then mapping error has occurred */
5057 count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss); 5084 count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
5058 if (count) { 5085 if (count) {
5059 netdev_sent_queue(netdev, skb->len); 5086 netdev_sent_queue(netdev, skb->len);
5060 e1000_tx_queue(adapter, tx_flags, count); 5087 e1000_tx_queue(tx_ring, tx_flags, count);
5061 /* Make sure there is space in the ring for the next send. */ 5088 /* Make sure there is space in the ring for the next send. */
5062 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); 5089 e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2);
5063 5090
5064 } else { 5091 } else {
5065 dev_kfree_skb_any(skb); 5092 dev_kfree_skb_any(skb);
@@ -5165,10 +5192,22 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5165 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5192 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5166 5193
5167 /* Jumbo frame support */ 5194 /* Jumbo frame support */
5168 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && 5195 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
5169 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { 5196 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
5170 e_err("Jumbo Frames not supported.\n"); 5197 e_err("Jumbo Frames not supported.\n");
5171 return -EINVAL; 5198 return -EINVAL;
5199 }
5200
5201 /*
5202 * IP payload checksum (enabled with jumbos/packet-split when
5203 * Rx checksum is enabled) and generation of RSS hash is
5204 * mutually exclusive in the hardware.
5205 */
5206 if ((netdev->features & NETIF_F_RXCSUM) &&
5207 (netdev->features & NETIF_F_RXHASH)) {
5208 e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disable one of the receive offload features before enabling jumbos.\n");
5209 return -EINVAL;
5210 }
5172 } 5211 }
5173 5212
5174 /* Supported frame sizes */ 5213 /* Supported frame sizes */
@@ -5908,7 +5947,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
5908 ret_val = e1000_read_pba_string_generic(hw, pba_str, 5947 ret_val = e1000_read_pba_string_generic(hw, pba_str,
5909 E1000_PBANUM_LENGTH); 5948 E1000_PBANUM_LENGTH);
5910 if (ret_val) 5949 if (ret_val)
5911 strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1); 5950 strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
5912 e_info("MAC: %d, PHY: %d, PBA No: %s\n", 5951 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
5913 hw->mac.type, hw->phy.type, pba_str); 5952 hw->mac.type, hw->phy.type, pba_str);
5914} 5953}
@@ -5931,7 +5970,7 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
5931} 5970}
5932 5971
5933static int e1000_set_features(struct net_device *netdev, 5972static int e1000_set_features(struct net_device *netdev,
5934 netdev_features_t features) 5973 netdev_features_t features)
5935{ 5974{
5936 struct e1000_adapter *adapter = netdev_priv(netdev); 5975 struct e1000_adapter *adapter = netdev_priv(netdev);
5937 netdev_features_t changed = features ^ netdev->features; 5976 netdev_features_t changed = features ^ netdev->features;
@@ -5940,9 +5979,22 @@ static int e1000_set_features(struct net_device *netdev,
5940 adapter->flags |= FLAG_TSO_FORCE; 5979 adapter->flags |= FLAG_TSO_FORCE;
5941 5980
5942 if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | 5981 if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX |
5943 NETIF_F_RXCSUM))) 5982 NETIF_F_RXCSUM | NETIF_F_RXHASH)))
5944 return 0; 5983 return 0;
5945 5984
5985 /*
5986 * IP payload checksum (enabled with jumbos/packet-split when Rx
5987 * checksum is enabled) and generation of RSS hash is mutually
5988 * exclusive in the hardware.
5989 */
5990 if (adapter->rx_ps_pages &&
5991 (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
5992 e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames. Disable jumbos or enable only one of the receive offload features.\n");
5993 return -EINVAL;
5994 }
5995
5996 netdev->features = features;
5997
5946 if (netif_running(netdev)) 5998 if (netif_running(netdev))
5947 e1000e_reinit_locked(adapter); 5999 e1000e_reinit_locked(adapter);
5948 else 6000 else
@@ -6087,7 +6139,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6087 e1000e_set_ethtool_ops(netdev); 6139 e1000e_set_ethtool_ops(netdev);
6088 netdev->watchdog_timeo = 5 * HZ; 6140 netdev->watchdog_timeo = 5 * HZ;
6089 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 6141 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
6090 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 6142 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
6091 6143
6092 netdev->mem_start = mmio_start; 6144 netdev->mem_start = mmio_start;
6093 netdev->mem_end = mmio_start + mmio_len; 6145 netdev->mem_end = mmio_start + mmio_len;
@@ -6133,6 +6185,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6133 NETIF_F_HW_VLAN_TX | 6185 NETIF_F_HW_VLAN_TX |
6134 NETIF_F_TSO | 6186 NETIF_F_TSO |
6135 NETIF_F_TSO6 | 6187 NETIF_F_TSO6 |
6188 NETIF_F_RXHASH |
6136 NETIF_F_RXCSUM | 6189 NETIF_F_RXCSUM |
6137 NETIF_F_HW_CSUM); 6190 NETIF_F_HW_CSUM);
6138 6191
@@ -6268,7 +6321,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6268 if (!(adapter->flags & FLAG_HAS_AMT)) 6321 if (!(adapter->flags & FLAG_HAS_AMT))
6269 e1000e_get_hw_control(adapter); 6322 e1000e_get_hw_control(adapter);
6270 6323
6271 strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1); 6324 strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
6272 err = register_netdev(netdev); 6325 err = register_netdev(netdev);
6273 if (err) 6326 if (err)
6274 goto err_register; 6327 goto err_register;
@@ -6485,7 +6538,7 @@ static int __init e1000_init_module(void)
6485 int ret; 6538 int ret;
6486 pr_info("Intel(R) PRO/1000 Network Driver - %s\n", 6539 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
6487 e1000e_driver_version); 6540 e1000e_driver_version);
6488 pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n"); 6541 pr_info("Copyright(c) 1999 - 2012 Intel Corporation.\n");
6489 ret = pci_register_driver(&e1000_driver); 6542 ret = pci_register_driver(&e1000_driver);
6490 6543
6491 return ret; 6544 return ret;
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
new file mode 100644
index 000000000000..f6fb7a768ba5
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -0,0 +1,647 @@
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "e1000.h"
30
31/**
32 * e1000_raise_eec_clk - Raise EEPROM clock
33 * @hw: pointer to the HW structure
34 * @eecd: pointer to the EEPROM
35 *
36 * Enable/Raise the EEPROM clock bit.
37 **/
38static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
39{
40 *eecd = *eecd | E1000_EECD_SK;
41 ew32(EECD, *eecd);
42 e1e_flush();
43 udelay(hw->nvm.delay_usec);
44}
45
46/**
47 * e1000_lower_eec_clk - Lower EEPROM clock
48 * @hw: pointer to the HW structure
49 * @eecd: pointer to the EEPROM
50 *
51 * Clear/Lower the EEPROM clock bit.
52 **/
53static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
54{
55 *eecd = *eecd & ~E1000_EECD_SK;
56 ew32(EECD, *eecd);
57 e1e_flush();
58 udelay(hw->nvm.delay_usec);
59}
60
61/**
62 * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
63 * @hw: pointer to the HW structure
64 * @data: data to send to the EEPROM
65 * @count: number of bits to shift out
66 *
67 * We need to shift 'count' bits out to the EEPROM. So, the value in the
68 * "data" parameter will be shifted out to the EEPROM one bit at a time.
69 * In order to do this, "data" must be broken down into bits.
70 **/
71static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
72{
73 struct e1000_nvm_info *nvm = &hw->nvm;
74 u32 eecd = er32(EECD);
75 u32 mask;
76
77 mask = 0x01 << (count - 1);
78 if (nvm->type == e1000_nvm_eeprom_spi)
79 eecd |= E1000_EECD_DO;
80
81 do {
82 eecd &= ~E1000_EECD_DI;
83
84 if (data & mask)
85 eecd |= E1000_EECD_DI;
86
87 ew32(EECD, eecd);
88 e1e_flush();
89
90 udelay(nvm->delay_usec);
91
92 e1000_raise_eec_clk(hw, &eecd);
93 e1000_lower_eec_clk(hw, &eecd);
94
95 mask >>= 1;
96 } while (mask);
97
98 eecd &= ~E1000_EECD_DI;
99 ew32(EECD, eecd);
100}
101
102/**
103 * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
104 * @hw: pointer to the HW structure
105 * @count: number of bits to shift in
106 *
107 * In order to read a register from the EEPROM, we need to shift 'count' bits
108 * in from the EEPROM. Bits are "shifted in" by raising the clock input to
109 * the EEPROM (setting the SK bit), and then reading the value of the data out
110 * "DO" bit. During this "shifting in" process the data in "DI" bit should
111 * always be clear.
112 **/
113static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
114{
115 u32 eecd;
116 u32 i;
117 u16 data;
118
119 eecd = er32(EECD);
120
121 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
122 data = 0;
123
124 for (i = 0; i < count; i++) {
125 data <<= 1;
126 e1000_raise_eec_clk(hw, &eecd);
127
128 eecd = er32(EECD);
129
130 eecd &= ~E1000_EECD_DI;
131 if (eecd & E1000_EECD_DO)
132 data |= 1;
133
134 e1000_lower_eec_clk(hw, &eecd);
135 }
136
137 return data;
138}
139
140/**
141 * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
142 * @hw: pointer to the HW structure
143 * @ee_reg: EEPROM flag for polling
144 *
145 * Polls the EEPROM status bit for either read or write completion based
146 * upon the value of 'ee_reg'.
147 **/
148s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
149{
150 u32 attempts = 100000;
151 u32 i, reg = 0;
152
153 for (i = 0; i < attempts; i++) {
154 if (ee_reg == E1000_NVM_POLL_READ)
155 reg = er32(EERD);
156 else
157 reg = er32(EEWR);
158
159 if (reg & E1000_NVM_RW_REG_DONE)
160 return 0;
161
162 udelay(5);
163 }
164
165 return -E1000_ERR_NVM;
166}
167
168/**
169 * e1000e_acquire_nvm - Generic request for access to EEPROM
170 * @hw: pointer to the HW structure
171 *
172 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
173 * Return successful if access grant bit set, else clear the request for
174 * EEPROM access and return -E1000_ERR_NVM (-1).
175 **/
176s32 e1000e_acquire_nvm(struct e1000_hw *hw)
177{
178 u32 eecd = er32(EECD);
179 s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
180
181 ew32(EECD, eecd | E1000_EECD_REQ);
182 eecd = er32(EECD);
183
184 while (timeout) {
185 if (eecd & E1000_EECD_GNT)
186 break;
187 udelay(5);
188 eecd = er32(EECD);
189 timeout--;
190 }
191
192 if (!timeout) {
193 eecd &= ~E1000_EECD_REQ;
194 ew32(EECD, eecd);
195 e_dbg("Could not acquire NVM grant\n");
196 return -E1000_ERR_NVM;
197 }
198
199 return 0;
200}
201
202/**
203 * e1000_standby_nvm - Return EEPROM to standby state
204 * @hw: pointer to the HW structure
205 *
206 * Return the EEPROM to a standby state.
207 **/
208static void e1000_standby_nvm(struct e1000_hw *hw)
209{
210 struct e1000_nvm_info *nvm = &hw->nvm;
211 u32 eecd = er32(EECD);
212
213 if (nvm->type == e1000_nvm_eeprom_spi) {
214 /* Toggle CS to flush commands */
215 eecd |= E1000_EECD_CS;
216 ew32(EECD, eecd);
217 e1e_flush();
218 udelay(nvm->delay_usec);
219 eecd &= ~E1000_EECD_CS;
220 ew32(EECD, eecd);
221 e1e_flush();
222 udelay(nvm->delay_usec);
223 }
224}
225
226/**
227 * e1000_stop_nvm - Terminate EEPROM command
228 * @hw: pointer to the HW structure
229 *
230 * Terminates the current command by inverting the EEPROM's chip select pin.
231 **/
232static void e1000_stop_nvm(struct e1000_hw *hw)
233{
234 u32 eecd;
235
236 eecd = er32(EECD);
237 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
238 /* Pull CS high */
239 eecd |= E1000_EECD_CS;
240 e1000_lower_eec_clk(hw, &eecd);
241 }
242}
243
244/**
245 * e1000e_release_nvm - Release exclusive access to EEPROM
246 * @hw: pointer to the HW structure
247 *
248 * Stop any current commands to the EEPROM and clear the EEPROM request bit.
249 **/
250void e1000e_release_nvm(struct e1000_hw *hw)
251{
252 u32 eecd;
253
254 e1000_stop_nvm(hw);
255
256 eecd = er32(EECD);
257 eecd &= ~E1000_EECD_REQ;
258 ew32(EECD, eecd);
259}
260
261/**
262 * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
263 * @hw: pointer to the HW structure
264 *
265 * Setups the EEPROM for reading and writing.
266 **/
267static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
268{
269 struct e1000_nvm_info *nvm = &hw->nvm;
270 u32 eecd = er32(EECD);
271 u8 spi_stat_reg;
272
273 if (nvm->type == e1000_nvm_eeprom_spi) {
274 u16 timeout = NVM_MAX_RETRY_SPI;
275
276 /* Clear SK and CS */
277 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
278 ew32(EECD, eecd);
279 e1e_flush();
280 udelay(1);
281
282 /*
283 * Read "Status Register" repeatedly until the LSB is cleared.
284 * The EEPROM will signal that the command has been completed
285 * by clearing bit 0 of the internal status register. If it's
286 * not cleared within 'timeout', then error out.
287 */
288 while (timeout) {
289 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
290 hw->nvm.opcode_bits);
291 spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
292 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
293 break;
294
295 udelay(5);
296 e1000_standby_nvm(hw);
297 timeout--;
298 }
299
300 if (!timeout) {
301 e_dbg("SPI NVM Status error\n");
302 return -E1000_ERR_NVM;
303 }
304 }
305
306 return 0;
307}
308
309/**
310 * e1000e_read_nvm_eerd - Reads EEPROM using EERD register
311 * @hw: pointer to the HW structure
312 * @offset: offset of word in the EEPROM to read
313 * @words: number of words to read
314 * @data: word read from the EEPROM
315 *
316 * Reads a 16 bit word from the EEPROM using the EERD register.
317 **/
318s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
319{
320 struct e1000_nvm_info *nvm = &hw->nvm;
321 u32 i, eerd = 0;
322 s32 ret_val = 0;
323
324 /*
325 * A check for invalid values: offset too large, too many words,
326 * too many words for the offset, and not enough words.
327 */
328 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
329 (words == 0)) {
330 e_dbg("nvm parameter(s) out of bounds\n");
331 return -E1000_ERR_NVM;
332 }
333
334 for (i = 0; i < words; i++) {
335 eerd = ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) +
336 E1000_NVM_RW_REG_START;
337
338 ew32(EERD, eerd);
339 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
340 if (ret_val)
341 break;
342
343 data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
344 }
345
346 return ret_val;
347}
348
349/**
350 * e1000e_write_nvm_spi - Write to EEPROM using SPI
351 * @hw: pointer to the HW structure
352 * @offset: offset within the EEPROM to be written to
353 * @words: number of words to write
354 * @data: 16 bit word(s) to be written to the EEPROM
355 *
356 * Writes data to EEPROM at offset using SPI interface.
357 *
358 * If e1000e_update_nvm_checksum is not called after this function , the
359 * EEPROM will most likely contain an invalid checksum.
360 **/
361s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
362{
363 struct e1000_nvm_info *nvm = &hw->nvm;
364 s32 ret_val;
365 u16 widx = 0;
366
367 /*
368 * A check for invalid values: offset too large, too many words,
369 * and not enough words.
370 */
371 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
372 (words == 0)) {
373 e_dbg("nvm parameter(s) out of bounds\n");
374 return -E1000_ERR_NVM;
375 }
376
377 ret_val = nvm->ops.acquire(hw);
378 if (ret_val)
379 return ret_val;
380
381 while (widx < words) {
382 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
383
384 ret_val = e1000_ready_nvm_eeprom(hw);
385 if (ret_val) {
386 nvm->ops.release(hw);
387 return ret_val;
388 }
389
390 e1000_standby_nvm(hw);
391
392 /* Send the WRITE ENABLE command (8 bit opcode) */
393 e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
394 nvm->opcode_bits);
395
396 e1000_standby_nvm(hw);
397
398 /*
399 * Some SPI eeproms use the 8th address bit embedded in the
400 * opcode
401 */
402 if ((nvm->address_bits == 8) && (offset >= 128))
403 write_opcode |= NVM_A8_OPCODE_SPI;
404
405 /* Send the Write command (8-bit opcode + addr) */
406 e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
407 e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
408 nvm->address_bits);
409
410 /* Loop to allow for up to whole page write of eeprom */
411 while (widx < words) {
412 u16 word_out = data[widx];
413 word_out = (word_out >> 8) | (word_out << 8);
414 e1000_shift_out_eec_bits(hw, word_out, 16);
415 widx++;
416
417 if ((((offset + widx) * 2) % nvm->page_size) == 0) {
418 e1000_standby_nvm(hw);
419 break;
420 }
421 }
422 }
423
424 usleep_range(10000, 20000);
425 nvm->ops.release(hw);
426 return 0;
427}
428
429/**
430 * e1000_read_pba_string_generic - Read device part number
431 * @hw: pointer to the HW structure
432 * @pba_num: pointer to device part number
433 * @pba_num_size: size of part number buffer
434 *
435 * Reads the product board assembly (PBA) number from the EEPROM and stores
436 * the value in pba_num.
437 **/
438s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
439 u32 pba_num_size)
440{
441 s32 ret_val;
442 u16 nvm_data;
443 u16 pba_ptr;
444 u16 offset;
445 u16 length;
446
447 if (pba_num == NULL) {
448 e_dbg("PBA string buffer was null\n");
449 ret_val = E1000_ERR_INVALID_ARGUMENT;
450 goto out;
451 }
452
453 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
454 if (ret_val) {
455 e_dbg("NVM Read Error\n");
456 goto out;
457 }
458
459 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
460 if (ret_val) {
461 e_dbg("NVM Read Error\n");
462 goto out;
463 }
464
465 /*
466 * if nvm_data is not ptr guard the PBA must be in legacy format which
467 * means pba_ptr is actually our second data word for the PBA number
468 * and we can decode it into an ascii string
469 */
470 if (nvm_data != NVM_PBA_PTR_GUARD) {
471 e_dbg("NVM PBA number is not stored as string\n");
472
473 /* we will need 11 characters to store the PBA */
474 if (pba_num_size < 11) {
475 e_dbg("PBA string buffer too small\n");
476 return E1000_ERR_NO_SPACE;
477 }
478
479 /* extract hex string from data and pba_ptr */
480 pba_num[0] = (nvm_data >> 12) & 0xF;
481 pba_num[1] = (nvm_data >> 8) & 0xF;
482 pba_num[2] = (nvm_data >> 4) & 0xF;
483 pba_num[3] = nvm_data & 0xF;
484 pba_num[4] = (pba_ptr >> 12) & 0xF;
485 pba_num[5] = (pba_ptr >> 8) & 0xF;
486 pba_num[6] = '-';
487 pba_num[7] = 0;
488 pba_num[8] = (pba_ptr >> 4) & 0xF;
489 pba_num[9] = pba_ptr & 0xF;
490
491 /* put a null character on the end of our string */
492 pba_num[10] = '\0';
493
494 /* switch all the data but the '-' to hex char */
495 for (offset = 0; offset < 10; offset++) {
496 if (pba_num[offset] < 0xA)
497 pba_num[offset] += '0';
498 else if (pba_num[offset] < 0x10)
499 pba_num[offset] += 'A' - 0xA;
500 }
501
502 goto out;
503 }
504
505 ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length);
506 if (ret_val) {
507 e_dbg("NVM Read Error\n");
508 goto out;
509 }
510
511 if (length == 0xFFFF || length == 0) {
512 e_dbg("NVM PBA number section invalid length\n");
513 ret_val = E1000_ERR_NVM_PBA_SECTION;
514 goto out;
515 }
516 /* check if pba_num buffer is big enough */
517 if (pba_num_size < (((u32)length * 2) - 1)) {
518 e_dbg("PBA string buffer too small\n");
519 ret_val = E1000_ERR_NO_SPACE;
520 goto out;
521 }
522
523 /* trim pba length from start of string */
524 pba_ptr++;
525 length--;
526
527 for (offset = 0; offset < length; offset++) {
528 ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data);
529 if (ret_val) {
530 e_dbg("NVM Read Error\n");
531 goto out;
532 }
533 pba_num[offset * 2] = (u8)(nvm_data >> 8);
534 pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
535 }
536 pba_num[offset * 2] = '\0';
537
538out:
539 return ret_val;
540}
541
542/**
543 * e1000_read_mac_addr_generic - Read device MAC address
544 * @hw: pointer to the HW structure
545 *
546 * Reads the device MAC address from the EEPROM and stores the value.
547 * Since devices with two ports use the same EEPROM, we increment the
548 * last bit in the MAC address for the second port.
549 **/
550s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
551{
552 u32 rar_high;
553 u32 rar_low;
554 u16 i;
555
556 rar_high = er32(RAH(0));
557 rar_low = er32(RAL(0));
558
559 for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
560 hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8));
561
562 for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
563 hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8));
564
565 for (i = 0; i < ETH_ALEN; i++)
566 hw->mac.addr[i] = hw->mac.perm_addr[i];
567
568 return 0;
569}
570
571/**
572 * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
573 * @hw: pointer to the HW structure
574 *
575 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
576 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
577 **/
578s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
579{
580 s32 ret_val;
581 u16 checksum = 0;
582 u16 i, nvm_data;
583
584 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
585 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
586 if (ret_val) {
587 e_dbg("NVM Read Error\n");
588 return ret_val;
589 }
590 checksum += nvm_data;
591 }
592
593 if (checksum != (u16)NVM_SUM) {
594 e_dbg("NVM Checksum Invalid\n");
595 return -E1000_ERR_NVM;
596 }
597
598 return 0;
599}
600
601/**
602 * e1000e_update_nvm_checksum_generic - Update EEPROM checksum
603 * @hw: pointer to the HW structure
604 *
605 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
606 * up to the checksum. Then calculates the EEPROM checksum and writes the
607 * value to the EEPROM.
608 **/
609s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
610{
611 s32 ret_val;
612 u16 checksum = 0;
613 u16 i, nvm_data;
614
615 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
616 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
617 if (ret_val) {
618 e_dbg("NVM Read Error while updating checksum.\n");
619 return ret_val;
620 }
621 checksum += nvm_data;
622 }
623 checksum = (u16)NVM_SUM - checksum;
624 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
625 if (ret_val)
626 e_dbg("NVM Write Error while updating checksum.\n");
627
628 return ret_val;
629}
630
631/**
632 * e1000e_reload_nvm - Reloads EEPROM
633 * @hw: pointer to the HW structure
634 *
635 * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
636 * extended control register.
637 **/
638void e1000e_reload_nvm(struct e1000_hw *hw)
639{
640 u32 ctrl_ext;
641
642 udelay(10);
643 ctrl_ext = er32(CTRL_EXT);
644 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
645 ew32(CTRL_EXT, ctrl_ext);
646 e1e_flush();
647}
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 20e93b08e7f3..9c6a56d804a4 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -113,11 +113,20 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
113#define MAX_ITR 100000 113#define MAX_ITR 100000
114#define MIN_ITR 100 114#define MIN_ITR 100
115 115
116/* IntMode (Interrupt Mode) 116/*
117 * IntMode (Interrupt Mode)
118 *
119 * Valid Range: varies depending on kernel configuration & hardware support
120 *
121 * legacy=0, MSI=1, MSI-X=2
117 * 122 *
118 * Valid Range: 0 - 2 123 * When MSI/MSI-X support is enabled in kernel-
124 * Default Value: 2 (MSI-X) when supported by hardware, 1 (MSI) otherwise
125 * When MSI/MSI-X support is not enabled in kernel-
126 * Default Value: 0 (legacy)
119 * 127 *
120 * Default Value: 2 (MSI-X) 128 * When a mode is specified that is not allowed/supported, it will be
129 * demoted to the most advanced interrupt mode available.
121 */ 130 */
122E1000_PARAM(IntMode, "Interrupt Mode"); 131E1000_PARAM(IntMode, "Interrupt Mode");
123#define MAX_INTMODE 2 132#define MAX_INTMODE 2
@@ -388,12 +397,33 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
388 static struct e1000_option opt = { 397 static struct e1000_option opt = {
389 .type = range_option, 398 .type = range_option,
390 .name = "Interrupt Mode", 399 .name = "Interrupt Mode",
391 .err = "defaulting to 2 (MSI-X)", 400#ifndef CONFIG_PCI_MSI
392 .def = E1000E_INT_MODE_MSIX, 401 .err = "defaulting to 0 (legacy)",
393 .arg = { .r = { .min = MIN_INTMODE, 402 .def = E1000E_INT_MODE_LEGACY,
394 .max = MAX_INTMODE } } 403 .arg = { .r = { .min = 0,
404 .max = 0 } }
405#endif
395 }; 406 };
396 407
408#ifdef CONFIG_PCI_MSI
409 if (adapter->flags & FLAG_HAS_MSIX) {
410 opt.err = kstrdup("defaulting to 2 (MSI-X)",
411 GFP_KERNEL);
412 opt.def = E1000E_INT_MODE_MSIX;
413 opt.arg.r.max = E1000E_INT_MODE_MSIX;
414 } else {
415 opt.err = kstrdup("defaulting to 1 (MSI)", GFP_KERNEL);
416 opt.def = E1000E_INT_MODE_MSI;
417 opt.arg.r.max = E1000E_INT_MODE_MSI;
418 }
419
420 if (!opt.err) {
421 dev_err(&adapter->pdev->dev,
422 "Failed to allocate memory\n");
423 return;
424 }
425#endif
426
397 if (num_IntMode > bd) { 427 if (num_IntMode > bd) {
398 unsigned int int_mode = IntMode[bd]; 428 unsigned int int_mode = IntMode[bd];
399 e1000_validate_option(&int_mode, &opt, adapter); 429 e1000_validate_option(&int_mode, &opt, adapter);
@@ -401,6 +431,10 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
401 } else { 431 } else {
402 adapter->int_mode = opt.def; 432 adapter->int_mode = opt.def;
403 } 433 }
434
435#ifdef CONFIG_PCI_MSI
436 kfree(opt.err);
437#endif
404 } 438 }
405 { /* Smart Power Down */ 439 { /* Smart Power Down */
406 static const struct e1000_option opt = { 440 static const struct e1000_option opt = {
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 8666476cb9be..8dd2ff03f1f7 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -1136,8 +1136,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1136 if (phy->autoneg_wait_to_complete) { 1136 if (phy->autoneg_wait_to_complete) {
1137 ret_val = e1000_wait_autoneg(hw); 1137 ret_val = e1000_wait_autoneg(hw);
1138 if (ret_val) { 1138 if (ret_val) {
1139 e_dbg("Error while waiting for " 1139 e_dbg("Error while waiting for autoneg to complete\n");
1140 "autoneg to complete\n");
1141 return ret_val; 1140 return ret_val;
1142 } 1141 }
1143 } 1142 }
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 9bd5faf64a85..002478801a1b 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1136,10 +1136,8 @@ ixgb_set_multi(struct net_device *netdev)
1136 u8 *mta = kmalloc(IXGB_MAX_NUM_MULTICAST_ADDRESSES * 1136 u8 *mta = kmalloc(IXGB_MAX_NUM_MULTICAST_ADDRESSES *
1137 ETH_ALEN, GFP_ATOMIC); 1137 ETH_ALEN, GFP_ATOMIC);
1138 u8 *addr; 1138 u8 *addr;
1139 if (!mta) { 1139 if (!mta)
1140 pr_err("allocation of multicast memory failed\n");
1141 goto alloc_failed; 1140 goto alloc_failed;
1142 }
1143 1141
1144 IXGB_WRITE_REG(hw, RCTL, rctl); 1142 IXGB_WRITE_REG(hw, RCTL, rctl);
1145 1143
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 92192c6c4f50..ecc46ce8b2c3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -131,6 +131,11 @@ MODULE_PARM_DESC(max_vfs,
131 "Maximum number of virtual functions to allocate per physical function"); 131 "Maximum number of virtual functions to allocate per physical function");
132#endif /* CONFIG_PCI_IOV */ 132#endif /* CONFIG_PCI_IOV */
133 133
134static unsigned int allow_unsupported_sfp;
135module_param(allow_unsupported_sfp, uint, 0);
136MODULE_PARM_DESC(allow_unsupported_sfp,
137 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
138
134MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 139MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
135MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); 140MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
136MODULE_LICENSE("GPL"); 141MODULE_LICENSE("GPL");
@@ -7489,6 +7494,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7489 e_crit(probe, "Fan has stopped, replace the adapter\n"); 7494 e_crit(probe, "Fan has stopped, replace the adapter\n");
7490 } 7495 }
7491 7496
7497 if (allow_unsupported_sfp)
7498 hw->allow_unsupported_sfp = allow_unsupported_sfp;
7499
7492 /* reset_hw fills in the perm_addr as well */ 7500 /* reset_hw fills in the perm_addr as well */
7493 hw->phy.reset_if_overtemp = true; 7501 hw->phy.reset_if_overtemp = true;
7494 err = hw->mac.ops.reset_hw(hw); 7502 err = hw->mac.ops.reset_hw(hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index b91773551a38..bf9f82f4b1ae 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -834,6 +834,7 @@ out:
834 **/ 834 **/
835s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) 835s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
836{ 836{
837 struct ixgbe_adapter *adapter = hw->back;
837 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 838 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
838 u32 vendor_oui = 0; 839 u32 vendor_oui = 0;
839 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; 840 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
@@ -1068,9 +1069,16 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1068 if (hw->phy.type == ixgbe_phy_sfp_intel) { 1069 if (hw->phy.type == ixgbe_phy_sfp_intel) {
1069 status = 0; 1070 status = 0;
1070 } else { 1071 } else {
1071 hw_dbg(hw, "SFP+ module not supported\n"); 1072 if (hw->allow_unsupported_sfp) {
1072 hw->phy.type = ixgbe_phy_sfp_unsupported; 1073 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.");
1073 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 1074 status = 0;
1075 } else {
1076 hw_dbg(hw,
1077 "SFP+ module not supported\n");
1078 hw->phy.type =
1079 ixgbe_phy_sfp_unsupported;
1080 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1081 }
1074 } 1082 }
1075 } else { 1083 } else {
1076 status = 0; 1084 status = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 9b95bef60970..4c060292395f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2892,6 +2892,7 @@ struct ixgbe_hw {
2892 u8 revision_id; 2892 u8 revision_id;
2893 bool adapter_stopped; 2893 bool adapter_stopped;
2894 bool force_full_reset; 2894 bool force_full_reset;
2895 bool allow_unsupported_sfp;
2895}; 2896};
2896 2897
2897struct ixgbe_info { 2898struct ixgbe_info {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index e51d552410ae..58c04b69ce70 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2521,12 +2521,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2521 2521
2522 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2522 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2523 rx_ring->rx_buffer_info = vzalloc(size); 2523 rx_ring->rx_buffer_info = vzalloc(size);
2524 if (!rx_ring->rx_buffer_info) { 2524 if (!rx_ring->rx_buffer_info)
2525 hw_dbg(&adapter->hw,
2526 "Unable to vmalloc buffer memory for "
2527 "the receive descriptor ring\n");
2528 goto alloc_failed; 2525 goto alloc_failed;
2529 }
2530 2526
2531 /* Round up to nearest 4K */ 2527 /* Round up to nearest 4K */
2532 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2528 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 27d651a80f3f..1b86d0b45f3c 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2999,7 +2999,6 @@ jme_init_one(struct pci_dev *pdev,
2999 */ 2999 */
3000 netdev = alloc_etherdev(sizeof(*jme)); 3000 netdev = alloc_etherdev(sizeof(*jme));
3001 if (!netdev) { 3001 if (!netdev) {
3002 pr_err("Cannot allocate netdev structure\n");
3003 rc = -ENOMEM; 3002 rc = -ENOMEM;
3004 goto err_out_release_regions; 3003 goto err_out_release_regions;
3005 } 3004 }
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 6ad094f176f8..f30db1c46600 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1108,10 +1108,9 @@ static int korina_probe(struct platform_device *pdev)
1108 int rc; 1108 int rc;
1109 1109
1110 dev = alloc_etherdev(sizeof(struct korina_private)); 1110 dev = alloc_etherdev(sizeof(struct korina_private));
1111 if (!dev) { 1111 if (!dev)
1112 printk(KERN_ERR DRV_NAME ": alloc_etherdev failed\n");
1113 return -ENOMEM; 1112 return -ENOMEM;
1114 } 1113
1115 SET_NETDEV_DEV(dev, &pdev->dev); 1114 SET_NETDEV_DEV(dev, &pdev->dev);
1116 lp = netdev_priv(dev); 1115 lp = netdev_priv(dev);
1117 1116
@@ -1150,7 +1149,6 @@ static int korina_probe(struct platform_device *pdev)
1150 1149
1151 lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL); 1150 lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
1152 if (!lp->td_ring) { 1151 if (!lp->td_ring) {
1153 printk(KERN_ERR DRV_NAME ": cannot allocate descriptors\n");
1154 rc = -ENXIO; 1152 rc = -ENXIO;
1155 goto probe_err_td_ring; 1153 goto probe_err_td_ring;
1156 } 1154 }
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 85e2c6cd9708..86d2fe6e053d 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -731,6 +731,10 @@ ltq_etop_probe(struct platform_device *pdev)
731 } 731 }
732 732
733 dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4); 733 dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
734 if (!dev) {
735 err = -ENOMEM;
736 goto err_out;
737 }
734 strcpy(dev->name, "eth%d"); 738 strcpy(dev->name, "eth%d");
735 dev->netdev_ops = &ltq_eth_netdev_ops; 739 dev->netdev_ops = &ltq_eth_netdev_ops;
736 dev->ethtool_ops = &ltq_etop_ethtool_ops; 740 dev->ethtool_ops = &ltq_etop_ethtool_ops;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 953ba5851f7b..92b4b4e68e3b 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1017,10 +1017,9 @@ static int rxq_init(struct net_device *dev)
1017 /* Allocate RX skb rings */ 1017 /* Allocate RX skb rings */
1018 pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, 1018 pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
1019 GFP_KERNEL); 1019 GFP_KERNEL);
1020 if (!pep->rx_skb) { 1020 if (!pep->rx_skb)
1021 printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name);
1022 return -ENOMEM; 1021 return -ENOMEM;
1023 } 1022
1024 /* Allocate RX ring */ 1023 /* Allocate RX ring */
1025 pep->rx_desc_count = 0; 1024 pep->rx_desc_count = 0;
1026 size = pep->rx_ring_size * sizeof(struct rx_desc); 1025 size = pep->rx_ring_size * sizeof(struct rx_desc);
@@ -1081,10 +1080,9 @@ static int txq_init(struct net_device *dev)
1081 1080
1082 pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, 1081 pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
1083 GFP_KERNEL); 1082 GFP_KERNEL);
1084 if (!pep->tx_skb) { 1083 if (!pep->tx_skb)
1085 printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name);
1086 return -ENOMEM; 1084 return -ENOMEM;
1087 } 1085
1088 /* Allocate TX ring */ 1086 /* Allocate TX ring */
1089 pep->tx_desc_count = 0; 1087 pep->tx_desc_count = 0;
1090 size = pep->tx_ring_size * sizeof(struct tx_desc); 1088 size = pep->tx_ring_size * sizeof(struct tx_desc);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index edb9bda55d55..1d61eaac8587 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3852,10 +3852,8 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3852 struct skge_port *skge; 3852 struct skge_port *skge;
3853 struct net_device *dev = alloc_etherdev(sizeof(*skge)); 3853 struct net_device *dev = alloc_etherdev(sizeof(*skge));
3854 3854
3855 if (!dev) { 3855 if (!dev)
3856 dev_err(&hw->pdev->dev, "etherdev alloc failed\n");
3857 return NULL; 3856 return NULL;
3858 }
3859 3857
3860 SET_NETDEV_DEV(dev, &hw->pdev->dev); 3858 SET_NETDEV_DEV(dev, &hw->pdev->dev);
3861 dev->netdev_ops = &skge_netdev_ops; 3859 dev->netdev_ops = &skge_netdev_ops;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 760c2b17dfd3..82c2c86a1951 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4700,10 +4700,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4700 struct sky2_port *sky2; 4700 struct sky2_port *sky2;
4701 struct net_device *dev = alloc_etherdev(sizeof(*sky2)); 4701 struct net_device *dev = alloc_etherdev(sizeof(*sky2));
4702 4702
4703 if (!dev) { 4703 if (!dev)
4704 dev_err(&hw->pdev->dev, "etherdev alloc failed\n");
4705 return NULL; 4704 return NULL;
4706 }
4707 4705
4708 SET_NETDEV_DEV(dev, &hw->pdev->dev); 4706 SET_NETDEV_DEV(dev, &hw->pdev->dev);
4709 dev->irq = hw->pdev->irq; 4707 dev->irq = hw->pdev->irq;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 405e6ac3faf6..1831ddeebc42 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1314,7 +1314,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1314 down(&priv->cmd.slave_sem); 1314 down(&priv->cmd.slave_sem);
1315 if (mlx4_master_process_vhcr(dev, slave, NULL)) { 1315 if (mlx4_master_process_vhcr(dev, slave, NULL)) {
1316 mlx4_err(dev, "Failed processing vhcr for slave:%d," 1316 mlx4_err(dev, "Failed processing vhcr for slave:%d,"
1317 " reseting slave.\n", slave); 1317 " resetting slave.\n", slave);
1318 up(&priv->cmd.slave_sem); 1318 up(&priv->cmd.slave_sem);
1319 goto reset_slave; 1319 goto reset_slave;
1320 } 1320 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 467ae5824875..25e6480479df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1047,10 +1047,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1047 1047
1048 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 1048 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
1049 prof->tx_ring_num, prof->rx_ring_num); 1049 prof->tx_ring_num, prof->rx_ring_num);
1050 if (dev == NULL) { 1050 if (dev == NULL)
1051 mlx4_err(mdev, "Net device allocation failed\n");
1052 return -ENOMEM; 1051 return -ENOMEM;
1053 }
1054 1052
1055 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); 1053 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
1056 dev->dev_id = port - 1; 1054 dev->dev_id = port - 1;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 971d4b6b8dfe..f61d0e08f52b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -281,10 +281,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
281 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * 281 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
282 sizeof(struct skb_frag_struct)); 282 sizeof(struct skb_frag_struct));
283 ring->rx_info = vmalloc(tmp); 283 ring->rx_info = vmalloc(tmp);
284 if (!ring->rx_info) { 284 if (!ring->rx_info)
285 en_err(priv, "Failed allocating rx_info ring\n");
286 return -ENOMEM; 285 return -ENOMEM;
287 } 286
288 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", 287 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
289 ring->rx_info, tmp); 288 ring->rx_info, tmp);
290 289
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 9ef9038d0629..ff3250586584 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -71,16 +71,14 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
71 71
72 tmp = size * sizeof(struct mlx4_en_tx_info); 72 tmp = size * sizeof(struct mlx4_en_tx_info);
73 ring->tx_info = vmalloc(tmp); 73 ring->tx_info = vmalloc(tmp);
74 if (!ring->tx_info) { 74 if (!ring->tx_info)
75 en_err(priv, "Failed allocating tx_info ring\n");
76 return -ENOMEM; 75 return -ENOMEM;
77 } 76
78 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", 77 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
79 ring->tx_info, tmp); 78 ring->tx_info, tmp);
80 79
81 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); 80 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
82 if (!ring->bounce_buf) { 81 if (!ring->bounce_buf) {
83 en_err(priv, "Failed allocating bounce buffer\n");
84 err = -ENOMEM; 82 err = -ENOMEM;
85 goto err_tx; 83 goto err_tx;
86 } 84 }
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index ab81c0dc96e2..7c717276502f 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1362,10 +1362,8 @@ ks8695_probe(struct platform_device *pdev)
1362 1362
1363 /* Initialise a net_device */ 1363 /* Initialise a net_device */
1364 ndev = alloc_etherdev(sizeof(struct ks8695_priv)); 1364 ndev = alloc_etherdev(sizeof(struct ks8695_priv));
1365 if (!ndev) { 1365 if (!ndev)
1366 dev_err(&pdev->dev, "could not allocate device.\n");
1367 return -ENOMEM; 1366 return -ENOMEM;
1368 }
1369 1367
1370 SET_NETDEV_DEV(ndev, &pdev->dev); 1368 SET_NETDEV_DEV(ndev, &pdev->dev);
1371 1369
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 6b35e7da9a9c..4b551feae4c5 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -1,4 +1,4 @@
1/* drivers/net/ks8851.c 1/* drivers/net/ethernet/micrel/ks8851.c
2 * 2 *
3 * Copyright 2009 Simtec Electronics 3 * Copyright 2009 Simtec Electronics
4 * http://www.simtec.co.uk/ 4 * http://www.simtec.co.uk/
@@ -1419,10 +1419,8 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1419 int ret; 1419 int ret;
1420 1420
1421 ndev = alloc_etherdev(sizeof(struct ks8851_net)); 1421 ndev = alloc_etherdev(sizeof(struct ks8851_net));
1422 if (!ndev) { 1422 if (!ndev)
1423 dev_err(&spi->dev, "failed to alloc ethernet device\n");
1424 return -ENOMEM; 1423 return -ENOMEM;
1425 }
1426 1424
1427 spi->bits_per_word = 8; 1425 spi->bits_per_word = 8;
1428 1426
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index b0fae86aacad..852256ef1f22 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -1,4 +1,4 @@
1/* drivers/net/ks8851.h 1/* drivers/net/ethernet/micrel/ks8851.h
2 * 2 *
3 * Copyright 2009 Simtec Electronics 3 * Copyright 2009 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk> 4 * Ben Dooks <ben@simtec.co.uk>
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index e58e78e5c930..4a9d57fb9fb4 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * drivers/net/ks8851_mll.c 2 * drivers/net/ethernet/micrel/ks8851_mll.c
3 * Copyright (c) 2009 Micrel Inc. 3 * Copyright (c) 2009 Micrel Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -1501,10 +1501,8 @@ static int ks_hw_init(struct ks_net *ks)
1501 ks->mcast_lst_size = 0; 1501 ks->mcast_lst_size = 0;
1502 1502
1503 ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL); 1503 ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL);
1504 if (!ks->frame_head_info) { 1504 if (!ks->frame_head_info)
1505 pr_err("Error: Fail to allocate frame memory\n");
1506 return false; 1505 return false;
1507 }
1508 1506
1509 ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS); 1507 ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1510 return true; 1508 return true;
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index e52cd310ae76..2725d693c3c4 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * drivers/net/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver 2 * drivers/net/ethernet/micrel/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
3 * 3 *
4 * Copyright (c) 2009-2010 Micrel, Inc. 4 * Copyright (c) 2009-2010 Micrel, Inc.
5 * Tristram Ha <Tristram.Ha@micrel.com> 5 * Tristram Ha <Tristram.Ha@micrel.com>
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 50055e0282ed..c813e5d8db9d 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1553,9 +1553,6 @@ static int __devinit enc28j60_probe(struct spi_device *spi)
1553 1553
1554 dev = alloc_etherdev(sizeof(struct enc28j60_net)); 1554 dev = alloc_etherdev(sizeof(struct enc28j60_net));
1555 if (!dev) { 1555 if (!dev) {
1556 if (netif_msg_drv(&debug))
1557 dev_err(&spi->dev, DRV_NAME
1558 ": unable to alloc new ethernet\n");
1559 ret = -ENOMEM; 1556 ret = -ENOMEM;
1560 goto error_alloc; 1557 goto error_alloc;
1561 } 1558 }
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 20b72ecb020a..27273ae1a6e6 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3910,10 +3910,8 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3910 static int board_number; 3910 static int board_number;
3911 3911
3912 netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES); 3912 netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES);
3913 if (netdev == NULL) { 3913 if (netdev == NULL)
3914 dev_err(dev, "Could not allocate ethernet device\n");
3915 return -ENOMEM; 3914 return -ENOMEM;
3916 }
3917 3915
3918 SET_NETDEV_DEV(netdev, &pdev->dev); 3916 SET_NETDEV_DEV(netdev, &pdev->dev);
3919 3917
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 97f63e12d86e..138584608809 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -7760,7 +7760,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7760 else 7760 else
7761 dev = alloc_etherdev(sizeof(struct s2io_nic)); 7761 dev = alloc_etherdev(sizeof(struct s2io_nic));
7762 if (dev == NULL) { 7762 if (dev == NULL) {
7763 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7764 pci_disable_device(pdev); 7763 pci_disable_device(pdev);
7765 pci_release_regions(pdev); 7764 pci_release_regions(pdev);
7766 return -ENODEV; 7765 return -ENODEV;
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index 8d288af16fc9..5a12276f810d 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/netx-eth.c 2 * drivers/net/ethernet/netx-eth.c
3 * 3 *
4 * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix 4 * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
5 * 5 *
@@ -383,7 +383,6 @@ static int netx_eth_drv_probe(struct platform_device *pdev)
383 383
384 ndev = alloc_etherdev(sizeof (struct netx_eth_priv)); 384 ndev = alloc_etherdev(sizeof (struct netx_eth_priv));
385 if (!ndev) { 385 if (!ndev) {
386 printk("%s: could not allocate device.\n", CARDNAME);
387 ret = -ENOMEM; 386 ret = -ENOMEM;
388 goto exit; 387 goto exit;
389 } 388 }
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 3ead111111e1..bdbec7e04a4c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1587,10 +1587,8 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1587 1587
1588 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count; 1588 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1589 tx_ring->buffer_info = vzalloc(size); 1589 tx_ring->buffer_info = vzalloc(size);
1590 if (!tx_ring->buffer_info) { 1590 if (!tx_ring->buffer_info)
1591 pr_err("Unable to allocate memory for the buffer information\n");
1592 return -ENOMEM; 1591 return -ENOMEM;
1593 }
1594 1592
1595 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); 1593 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1596 1594
@@ -1636,10 +1634,9 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1636 1634
1637 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count; 1635 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1638 rx_ring->buffer_info = vzalloc(size); 1636 rx_ring->buffer_info = vzalloc(size);
1639 if (!rx_ring->buffer_info) { 1637 if (!rx_ring->buffer_info)
1640 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1641 return -ENOMEM; 1638 return -ENOMEM;
1642 } 1639
1643 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); 1640 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1644 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 1641 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1645 &rx_ring->dma, GFP_KERNEL); 1642 &rx_ring->dma, GFP_KERNEL);
@@ -2422,8 +2419,6 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2422 netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter)); 2419 netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2423 if (!netdev) { 2420 if (!netdev) {
2424 ret = -ENOMEM; 2421 ret = -ENOMEM;
2425 dev_err(&pdev->dev,
2426 "ERR: Can't allocate and set up an Ethernet device\n");
2427 goto err_release_pci; 2422 goto err_release_pci;
2428 } 2423 }
2429 SET_NETDEV_DEV(netdev, &pdev->dev); 2424 SET_NETDEV_DEV(netdev, &pdev->dev);
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index db44e9af03c3..4a5774271bd4 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -397,10 +397,9 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
397 if (i) return i; 397 if (i) return i;
398 398
399 dev = alloc_etherdev(sizeof(*np)); 399 dev = alloc_etherdev(sizeof(*np));
400 if (!dev) { 400 if (!dev)
401 pr_err("cannot allocate ethernet device\n");
402 return -ENOMEM; 401 return -ENOMEM;
403 } 402
404 SET_NETDEV_DEV(dev, &pdev->dev); 403 SET_NETDEV_DEV(dev, &pdev->dev);
405 404
406 np = netdev_priv(dev); 405 np = netdev_priv(dev);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 49b549ff2c78..3bb725ed2703 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1740,8 +1740,6 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1740 1740
1741 dev = alloc_etherdev(sizeof(struct pasemi_mac)); 1741 dev = alloc_etherdev(sizeof(struct pasemi_mac));
1742 if (dev == NULL) { 1742 if (dev == NULL) {
1743 dev_err(&pdev->dev,
1744 "pasemi_mac: Could not allocate ethernet device.\n");
1745 err = -ENOMEM; 1743 err = -ENOMEM;
1746 goto out_disable_device; 1744 goto out_disable_device;
1747 } 1745 }
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index a876dffd7101..2eeac32f7fdd 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
53 53
54#define _NETXEN_NIC_LINUX_MAJOR 4 54#define _NETXEN_NIC_LINUX_MAJOR 4
55#define _NETXEN_NIC_LINUX_MINOR 0 55#define _NETXEN_NIC_LINUX_MINOR 0
56#define _NETXEN_NIC_LINUX_SUBVERSION 77 56#define _NETXEN_NIC_LINUX_SUBVERSION 78
57#define NETXEN_NIC_LINUX_VERSIONID "4.0.77" 57#define NETXEN_NIC_LINUX_VERSIONID "4.0.78"
58 58
59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
60#define _major(v) (((v) >> 24) & 0xff) 60#define _major(v) (((v) >> 24) & 0xff)
@@ -686,6 +686,18 @@ struct netxen_recv_context {
686 dma_addr_t phys_addr; 686 dma_addr_t phys_addr;
687}; 687};
688 688
689struct _cdrp_cmd {
690 u32 cmd;
691 u32 arg1;
692 u32 arg2;
693 u32 arg3;
694};
695
696struct netxen_cmd_args {
697 struct _cdrp_cmd req;
698 struct _cdrp_cmd rsp;
699};
700
689/* New HW context creation */ 701/* New HW context creation */
690 702
691#define NX_OS_CRB_RETRY_COUNT 4000 703#define NX_OS_CRB_RETRY_COUNT 4000
@@ -1142,6 +1154,7 @@ typedef struct {
1142#define NETXEN_NIC_LRO_DISABLED 0x00 1154#define NETXEN_NIC_LRO_DISABLED 0x00
1143#define NETXEN_NIC_BRIDGE_ENABLED 0X10 1155#define NETXEN_NIC_BRIDGE_ENABLED 0X10
1144#define NETXEN_NIC_DIAG_ENABLED 0x20 1156#define NETXEN_NIC_DIAG_ENABLED 0x20
1157#define NETXEN_FW_RESET_OWNER 0x40
1145#define NETXEN_IS_MSI_FAMILY(adapter) \ 1158#define NETXEN_IS_MSI_FAMILY(adapter) \
1146 ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED)) 1159 ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
1147 1160
@@ -1159,6 +1172,419 @@ typedef struct {
1159#define __NX_DEV_UP 1 1172#define __NX_DEV_UP 1
1160#define __NX_RESETTING 2 1173#define __NX_RESETTING 2
1161 1174
1175/* Mini Coredump FW supported version */
1176#define NX_MD_SUPPORT_MAJOR 4
1177#define NX_MD_SUPPORT_MINOR 0
1178#define NX_MD_SUPPORT_SUBVERSION 579
1179
1180#define LSW(x) ((uint16_t)(x))
1181#define LSD(x) ((uint32_t)((uint64_t)(x)))
1182#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
1183
1184/* Mini Coredump mask level */
1185#define NX_DUMP_MASK_MIN 0x03
1186#define NX_DUMP_MASK_DEF 0x1f
1187#define NX_DUMP_MASK_MAX 0xff
1188
1189/* Mini Coredump CDRP commands */
1190#define NX_CDRP_CMD_TEMP_SIZE 0x0000002f
1191#define NX_CDRP_CMD_GET_TEMP_HDR 0x00000030
1192
1193
1194#define NX_DUMP_STATE_ARRAY_LEN 16
1195#define NX_DUMP_CAP_SIZE_ARRAY_LEN 8
1196
1197/* Mini Coredump sysfs entries flags*/
1198#define NX_FORCE_FW_DUMP_KEY 0xdeadfeed
1199#define NX_ENABLE_FW_DUMP 0xaddfeed
1200#define NX_DISABLE_FW_DUMP 0xbadfeed
1201#define NX_FORCE_FW_RESET 0xdeaddead
1202
1203
1204/* Flash read/write address */
1205#define NX_FW_DUMP_REG1 0x00130060
1206#define NX_FW_DUMP_REG2 0x001e0000
1207#define NX_FLASH_SEM2_LK 0x0013C010
1208#define NX_FLASH_SEM2_ULK 0x0013C014
1209#define NX_FLASH_LOCK_ID 0x001B2100
1210#define FLASH_ROM_WINDOW 0x42110030
1211#define FLASH_ROM_DATA 0x42150000
1212
1213/* Mini Coredump register read/write routine */
1214#define NX_RD_DUMP_REG(addr, bar0, data) do { \
1215 writel((addr & 0xFFFF0000), (void __iomem *) (bar0 + \
1216 NX_FW_DUMP_REG1)); \
1217 readl((void __iomem *) (bar0 + NX_FW_DUMP_REG1)); \
1218 *data = readl((void __iomem *) (bar0 + NX_FW_DUMP_REG2 + \
1219 LSW(addr))); \
1220} while (0)
1221
1222#define NX_WR_DUMP_REG(addr, bar0, data) do { \
1223 writel((addr & 0xFFFF0000), (void __iomem *) (bar0 + \
1224 NX_FW_DUMP_REG1)); \
1225 readl((void __iomem *) (bar0 + NX_FW_DUMP_REG1)); \
1226 writel(data, (void __iomem *) (bar0 + NX_FW_DUMP_REG2 + LSW(addr)));\
1227 readl((void __iomem *) (bar0 + NX_FW_DUMP_REG2 + LSW(addr))); \
1228} while (0)
1229
1230
1231/*
1232Entry Type Defines
1233*/
1234
1235#define RDNOP 0
1236#define RDCRB 1
1237#define RDMUX 2
1238#define QUEUE 3
1239#define BOARD 4
1240#define RDSRE 5
1241#define RDOCM 6
1242#define PREGS 7
1243#define L1DTG 8
1244#define L1ITG 9
1245#define CACHE 10
1246
1247#define L1DAT 11
1248#define L1INS 12
1249#define RDSTK 13
1250#define RDCON 14
1251
1252#define L2DTG 21
1253#define L2ITG 22
1254#define L2DAT 23
1255#define L2INS 24
1256#define RDOC3 25
1257
1258#define MEMBK 32
1259
1260#define RDROM 71
1261#define RDMEM 72
1262#define RDMN 73
1263
1264#define INFOR 81
1265#define CNTRL 98
1266
1267#define TLHDR 99
1268#define RDEND 255
1269
1270#define PRIMQ 103
1271#define SQG2Q 104
1272#define SQG3Q 105
1273
1274/*
1275* Opcodes for Control Entries.
1276* These Flags are bit fields.
1277*/
1278#define NX_DUMP_WCRB 0x01
1279#define NX_DUMP_RWCRB 0x02
1280#define NX_DUMP_ANDCRB 0x04
1281#define NX_DUMP_ORCRB 0x08
1282#define NX_DUMP_POLLCRB 0x10
1283#define NX_DUMP_RD_SAVE 0x20
1284#define NX_DUMP_WRT_SAVED 0x40
1285#define NX_DUMP_MOD_SAVE_ST 0x80
1286
1287/* Driver Flags */
1288#define NX_DUMP_SKIP 0x80 /* driver skipped this entry */
1289#define NX_DUMP_SIZE_ERR 0x40 /*entry size vs capture size mismatch*/
1290
1291#define NX_PCI_READ_32(ADDR) readl((ADDR))
1292#define NX_PCI_WRITE_32(DATA, ADDR) writel(DATA, (ADDR))
1293
1294
1295
1296struct netxen_minidump {
1297 u32 pos; /* position in the dump buffer */
1298 u8 fw_supports_md; /* FW supports Mini cordump */
1299 u8 has_valid_dump; /* indicates valid dump */
1300 u8 md_capture_mask; /* driver capture mask */
1301 u8 md_enabled; /* Turn Mini Coredump on/off */
1302 u32 md_dump_size; /* Total FW Mini Coredump size */
1303 u32 md_capture_size; /* FW dump capture size */
1304 u32 md_template_size; /* FW template size */
1305 u32 md_template_ver; /* FW template version */
1306 u64 md_timestamp; /* FW Mini dump timestamp */
1307 void *md_template; /* FW template will be stored */
1308 void *md_capture_buff; /* FW dump will be stored */
1309};
1310
1311
1312
1313struct netxen_minidump_template_hdr {
1314 u32 entry_type;
1315 u32 first_entry_offset;
1316 u32 size_of_template;
1317 u32 capture_mask;
1318 u32 num_of_entries;
1319 u32 version;
1320 u32 driver_timestamp;
1321 u32 checksum;
1322 u32 driver_capture_mask;
1323 u32 driver_info_word2;
1324 u32 driver_info_word3;
1325 u32 driver_info_word4;
1326 u32 saved_state_array[NX_DUMP_STATE_ARRAY_LEN];
1327 u32 capture_size_array[NX_DUMP_CAP_SIZE_ARRAY_LEN];
1328 u32 rsvd[0];
1329};
1330
1331/* Common Entry Header: Common to All Entry Types */
1332/*
1333 * Driver Code is for driver to write some info about the entry.
1334 * Currently not used.
1335 */
1336
1337struct netxen_common_entry_hdr {
1338 u32 entry_type;
1339 u32 entry_size;
1340 u32 entry_capture_size;
1341 union {
1342 struct {
1343 u8 entry_capture_mask;
1344 u8 entry_code;
1345 u8 driver_code;
1346 u8 driver_flags;
1347 };
1348 u32 entry_ctrl_word;
1349 };
1350};
1351
1352
1353/* Generic Entry Including Header */
1354struct netxen_minidump_entry {
1355 struct netxen_common_entry_hdr hdr;
1356 u32 entry_data00;
1357 u32 entry_data01;
1358 u32 entry_data02;
1359 u32 entry_data03;
1360 u32 entry_data04;
1361 u32 entry_data05;
1362 u32 entry_data06;
1363 u32 entry_data07;
1364};
1365
1366/* Read ROM Header */
1367struct netxen_minidump_entry_rdrom {
1368 struct netxen_common_entry_hdr h;
1369 union {
1370 struct {
1371 u32 select_addr_reg;
1372 };
1373 u32 rsvd_0;
1374 };
1375 union {
1376 struct {
1377 u8 addr_stride;
1378 u8 addr_cnt;
1379 u16 data_size;
1380 };
1381 u32 rsvd_1;
1382 };
1383 union {
1384 struct {
1385 u32 op_count;
1386 };
1387 u32 rsvd_2;
1388 };
1389 union {
1390 struct {
1391 u32 read_addr_reg;
1392 };
1393 u32 rsvd_3;
1394 };
1395 union {
1396 struct {
1397 u32 write_mask;
1398 };
1399 u32 rsvd_4;
1400 };
1401 union {
1402 struct {
1403 u32 read_mask;
1404 };
1405 u32 rsvd_5;
1406 };
1407 u32 read_addr;
1408 u32 read_data_size;
1409};
1410
1411
1412/* Read CRB and Control Entry Header */
1413struct netxen_minidump_entry_crb {
1414 struct netxen_common_entry_hdr h;
1415 u32 addr;
1416 union {
1417 struct {
1418 u8 addr_stride;
1419 u8 state_index_a;
1420 u16 poll_timeout;
1421 };
1422 u32 addr_cntrl;
1423 };
1424 u32 data_size;
1425 u32 op_count;
1426 union {
1427 struct {
1428 u8 opcode;
1429 u8 state_index_v;
1430 u8 shl;
1431 u8 shr;
1432 };
1433 u32 control_value;
1434 };
1435 u32 value_1;
1436 u32 value_2;
1437 u32 value_3;
1438};
1439
1440/* Read Memory and MN Header */
1441struct netxen_minidump_entry_rdmem {
1442 struct netxen_common_entry_hdr h;
1443 union {
1444 struct {
1445 u32 select_addr_reg;
1446 };
1447 u32 rsvd_0;
1448 };
1449 union {
1450 struct {
1451 u8 addr_stride;
1452 u8 addr_cnt;
1453 u16 data_size;
1454 };
1455 u32 rsvd_1;
1456 };
1457 union {
1458 struct {
1459 u32 op_count;
1460 };
1461 u32 rsvd_2;
1462 };
1463 union {
1464 struct {
1465 u32 read_addr_reg;
1466 };
1467 u32 rsvd_3;
1468 };
1469 union {
1470 struct {
1471 u32 cntrl_addr_reg;
1472 };
1473 u32 rsvd_4;
1474 };
1475 union {
1476 struct {
1477 u8 wr_byte0;
1478 u8 wr_byte1;
1479 u8 poll_mask;
1480 u8 poll_cnt;
1481 };
1482 u32 rsvd_5;
1483 };
1484 u32 read_addr;
1485 u32 read_data_size;
1486};
1487
1488/* Read Cache L1 and L2 Header */
1489struct netxen_minidump_entry_cache {
1490 struct netxen_common_entry_hdr h;
1491 u32 tag_reg_addr;
1492 union {
1493 struct {
1494 u16 tag_value_stride;
1495 u16 init_tag_value;
1496 };
1497 u32 select_addr_cntrl;
1498 };
1499 u32 data_size;
1500 u32 op_count;
1501 u32 control_addr;
1502 union {
1503 struct {
1504 u16 write_value;
1505 u8 poll_mask;
1506 u8 poll_wait;
1507 };
1508 u32 control_value;
1509 };
1510 u32 read_addr;
1511 union {
1512 struct {
1513 u8 read_addr_stride;
1514 u8 read_addr_cnt;
1515 u16 rsvd_1;
1516 };
1517 u32 read_addr_cntrl;
1518 };
1519};
1520
1521/* Read OCM Header */
1522struct netxen_minidump_entry_rdocm {
1523 struct netxen_common_entry_hdr h;
1524 u32 rsvd_0;
1525 union {
1526 struct {
1527 u32 rsvd_1;
1528 };
1529 u32 select_addr_cntrl;
1530 };
1531 u32 data_size;
1532 u32 op_count;
1533 u32 rsvd_2;
1534 u32 rsvd_3;
1535 u32 read_addr;
1536 union {
1537 struct {
1538 u32 read_addr_stride;
1539 };
1540 u32 read_addr_cntrl;
1541 };
1542};
1543
1544/* Read MUX Header */
1545struct netxen_minidump_entry_mux {
1546 struct netxen_common_entry_hdr h;
1547 u32 select_addr;
1548 union {
1549 struct {
1550 u32 rsvd_0;
1551 };
1552 u32 select_addr_cntrl;
1553 };
1554 u32 data_size;
1555 u32 op_count;
1556 u32 select_value;
1557 u32 select_value_stride;
1558 u32 read_addr;
1559 u32 rsvd_1;
1560};
1561
1562/* Read Queue Header */
1563struct netxen_minidump_entry_queue {
1564 struct netxen_common_entry_hdr h;
1565 u32 select_addr;
1566 union {
1567 struct {
1568 u16 queue_id_stride;
1569 u16 rsvd_0;
1570 };
1571 u32 select_addr_cntrl;
1572 };
1573 u32 data_size;
1574 u32 op_count;
1575 u32 rsvd_1;
1576 u32 rsvd_2;
1577 u32 read_addr;
1578 union {
1579 struct {
1580 u8 read_addr_stride;
1581 u8 read_addr_cnt;
1582 u16 rsvd_3;
1583 };
1584 u32 read_addr_cntrl;
1585 };
1586};
1587
1162struct netxen_dummy_dma { 1588struct netxen_dummy_dma {
1163 void *addr; 1589 void *addr;
1164 dma_addr_t phys_addr; 1590 dma_addr_t phys_addr;
@@ -1263,6 +1689,8 @@ struct netxen_adapter {
1263 __le32 file_prd_off; /*File fw product offset*/ 1689 __le32 file_prd_off; /*File fw product offset*/
1264 u32 fw_version; 1690 u32 fw_version;
1265 const struct firmware *fw; 1691 const struct firmware *fw;
1692 struct netxen_minidump mdump; /* mdump ptr */
1693 int fw_mdump_rdy; /* for mdump ready */
1266}; 1694};
1267 1695
1268int nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val); 1696int nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val);
@@ -1365,13 +1793,16 @@ int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
1365int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable); 1793int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable);
1366int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable); 1794int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable);
1367int netxen_send_lro_cleanup(struct netxen_adapter *adapter); 1795int netxen_send_lro_cleanup(struct netxen_adapter *adapter);
1368 1796int netxen_setup_minidump(struct netxen_adapter *adapter);
1797void netxen_dump_fw(struct netxen_adapter *adapter);
1369void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, 1798void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
1370 struct nx_host_tx_ring *tx_ring); 1799 struct nx_host_tx_ring *tx_ring);
1371 1800
1372/* Functions from netxen_nic_main.c */ 1801/* Functions from netxen_nic_main.c */
1373int netxen_nic_reset_context(struct netxen_adapter *); 1802int netxen_nic_reset_context(struct netxen_adapter *);
1374 1803
1804int nx_dev_request_reset(struct netxen_adapter *adapter);
1805
1375/* 1806/*
1376 * NetXen Board information 1807 * NetXen Board information
1377 */ 1808 */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index a925392abd6f..f3c0057a802b 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -48,28 +48,27 @@ netxen_poll_rsp(struct netxen_adapter *adapter)
48} 48}
49 49
50static u32 50static u32
51netxen_issue_cmd(struct netxen_adapter *adapter, 51netxen_issue_cmd(struct netxen_adapter *adapter, struct netxen_cmd_args *cmd)
52 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
53{ 52{
54 u32 rsp; 53 u32 rsp;
55 u32 signature = 0; 54 u32 signature = 0;
56 u32 rcode = NX_RCODE_SUCCESS; 55 u32 rcode = NX_RCODE_SUCCESS;
57 56
58 signature = NX_CDRP_SIGNATURE_MAKE(pci_fn, version); 57 signature = NX_CDRP_SIGNATURE_MAKE(adapter->ahw.pci_func,
59 58 NXHAL_VERSION);
60 /* Acquire semaphore before accessing CRB */ 59 /* Acquire semaphore before accessing CRB */
61 if (netxen_api_lock(adapter)) 60 if (netxen_api_lock(adapter))
62 return NX_RCODE_TIMEOUT; 61 return NX_RCODE_TIMEOUT;
63 62
64 NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature); 63 NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
65 64
66 NXWR32(adapter, NX_ARG1_CRB_OFFSET, arg1); 65 NXWR32(adapter, NX_ARG1_CRB_OFFSET, cmd->req.arg1);
67 66
68 NXWR32(adapter, NX_ARG2_CRB_OFFSET, arg2); 67 NXWR32(adapter, NX_ARG2_CRB_OFFSET, cmd->req.arg2);
69 68
70 NXWR32(adapter, NX_ARG3_CRB_OFFSET, arg3); 69 NXWR32(adapter, NX_ARG3_CRB_OFFSET, cmd->req.arg3);
71 70
72 NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd)); 71 NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd->req.cmd));
73 72
74 rsp = netxen_poll_rsp(adapter); 73 rsp = netxen_poll_rsp(adapter);
75 74
@@ -83,28 +82,179 @@ netxen_issue_cmd(struct netxen_adapter *adapter,
83 82
84 printk(KERN_ERR "%s: failed card response code:0x%x\n", 83 printk(KERN_ERR "%s: failed card response code:0x%x\n",
85 netxen_nic_driver_name, rcode); 84 netxen_nic_driver_name, rcode);
85 } else if (rsp == NX_CDRP_RSP_OK) {
86 cmd->rsp.cmd = NX_RCODE_SUCCESS;
87 if (cmd->rsp.arg2)
88 cmd->rsp.arg2 = NXRD32(adapter, NX_ARG2_CRB_OFFSET);
89 if (cmd->rsp.arg3)
90 cmd->rsp.arg3 = NXRD32(adapter, NX_ARG3_CRB_OFFSET);
86 } 91 }
87 92
93 if (cmd->rsp.arg1)
94 cmd->rsp.arg1 = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
88 /* Release semaphore */ 95 /* Release semaphore */
89 netxen_api_unlock(adapter); 96 netxen_api_unlock(adapter);
90 97
91 return rcode; 98 return rcode;
92} 99}
93 100
101static int
102netxen_get_minidump_template_size(struct netxen_adapter *adapter)
103{
104 struct netxen_cmd_args cmd;
105 memset(&cmd, 0, sizeof(cmd));
106 cmd.req.cmd = NX_CDRP_CMD_TEMP_SIZE;
107 memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
108 netxen_issue_cmd(adapter, &cmd);
109 if (cmd.rsp.cmd != NX_RCODE_SUCCESS) {
110 dev_info(&adapter->pdev->dev,
111 "Can't get template size %d\n", cmd.rsp.cmd);
112 return -EIO;
113 }
114 adapter->mdump.md_template_size = cmd.rsp.arg2;
115 adapter->mdump.md_template_ver = cmd.rsp.arg3;
116 return 0;
117}
118
119static int
120netxen_get_minidump_template(struct netxen_adapter *adapter)
121{
122 dma_addr_t md_template_addr;
123 void *addr;
124 u32 size;
125 struct netxen_cmd_args cmd;
126 size = adapter->mdump.md_template_size;
127
128 if (size == 0) {
129 dev_err(&adapter->pdev->dev, "Can not capture Minidump "
130 "template. Invalid template size.\n");
131 return NX_RCODE_INVALID_ARGS;
132 }
133
134 addr = pci_alloc_consistent(adapter->pdev, size, &md_template_addr);
135
136 if (!addr) {
137 dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n");
138 return -ENOMEM;
139 }
140
141 memset(addr, 0, size);
142 memset(&cmd, 0, sizeof(cmd));
143 memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
144 cmd.req.cmd = NX_CDRP_CMD_GET_TEMP_HDR;
145 cmd.req.arg1 = LSD(md_template_addr);
146 cmd.req.arg2 = MSD(md_template_addr);
147 cmd.req.arg3 |= size;
148 netxen_issue_cmd(adapter, &cmd);
149
150 if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) {
151 memcpy(adapter->mdump.md_template, addr, size);
152 } else {
153 dev_err(&adapter->pdev->dev, "Failed to get minidump template, "
154 "err_code : %d, requested_size : %d, actual_size : %d\n ",
155 cmd.rsp.cmd, size, cmd.rsp.arg2);
156 }
157 pci_free_consistent(adapter->pdev, size, addr, md_template_addr);
158 return 0;
159}
160
161static u32
162netxen_check_template_checksum(struct netxen_adapter *adapter)
163{
164 u64 sum = 0 ;
165 u32 *buff = adapter->mdump.md_template;
166 int count = adapter->mdump.md_template_size/sizeof(uint32_t) ;
167
168 while (count-- > 0)
169 sum += *buff++ ;
170 while (sum >> 32)
171 sum = (sum & 0xFFFFFFFF) + (sum >> 32) ;
172
173 return ~sum;
174}
175
176int
177netxen_setup_minidump(struct netxen_adapter *adapter)
178{
179 int err = 0, i;
180 u32 *template, *tmp_buf;
181 struct netxen_minidump_template_hdr *hdr;
182 err = netxen_get_minidump_template_size(adapter);
183 if (err) {
184 adapter->mdump.fw_supports_md = 0;
185 if ((err == NX_RCODE_CMD_INVALID) ||
186 (err == NX_RCODE_CMD_NOT_IMPL)) {
187 dev_info(&adapter->pdev->dev,
188 "Flashed firmware version does not support minidump, "
189 "minimum version required is [ %u.%u.%u ].\n ",
190 NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR,
191 NX_MD_SUPPORT_SUBVERSION);
192 }
193 return err;
194 }
195
196 if (!adapter->mdump.md_template_size) {
197 dev_err(&adapter->pdev->dev, "Error : Invalid template size "
198 ",should be non-zero.\n");
199 return -EIO;
200 }
201 adapter->mdump.md_template =
202 kmalloc(adapter->mdump.md_template_size, GFP_KERNEL);
203
204 if (!adapter->mdump.md_template) {
205 dev_err(&adapter->pdev->dev, "Unable to allocate memory "
206 "for minidump template.\n");
207 return -ENOMEM;
208 }
209
210 err = netxen_get_minidump_template(adapter);
211 if (err) {
212 if (err == NX_RCODE_CMD_NOT_IMPL)
213 adapter->mdump.fw_supports_md = 0;
214 goto free_template;
215 }
216
217 if (netxen_check_template_checksum(adapter)) {
218 dev_err(&adapter->pdev->dev, "Minidump template checksum Error\n");
219 err = -EIO;
220 goto free_template;
221 }
222
223 adapter->mdump.md_capture_mask = NX_DUMP_MASK_DEF;
224 tmp_buf = (u32 *) adapter->mdump.md_template;
225 template = (u32 *) adapter->mdump.md_template;
226 for (i = 0; i < adapter->mdump.md_template_size/sizeof(u32); i++)
227 *template++ = __le32_to_cpu(*tmp_buf++);
228 hdr = (struct netxen_minidump_template_hdr *)
229 adapter->mdump.md_template;
230 adapter->mdump.md_capture_buff = NULL;
231 adapter->mdump.fw_supports_md = 1;
232 adapter->mdump.md_enabled = 1;
233
234 return err;
235
236free_template:
237 kfree(adapter->mdump.md_template);
238 adapter->mdump.md_template = NULL;
239 return err;
240}
241
242
94int 243int
95nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) 244nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
96{ 245{
97 u32 rcode = NX_RCODE_SUCCESS; 246 u32 rcode = NX_RCODE_SUCCESS;
98 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 247 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
248 struct netxen_cmd_args cmd;
249
250 memset(&cmd, 0, sizeof(cmd));
251 cmd.req.cmd = NX_CDRP_CMD_SET_MTU;
252 cmd.req.arg1 = recv_ctx->context_id;
253 cmd.req.arg2 = mtu;
254 cmd.req.arg3 = 0;
99 255
100 if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) 256 if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
101 rcode = netxen_issue_cmd(adapter, 257 netxen_issue_cmd(adapter, &cmd);
102 adapter->ahw.pci_func,
103 NXHAL_VERSION,
104 recv_ctx->context_id,
105 mtu,
106 0,
107 NX_CDRP_CMD_SET_MTU);
108 258
109 if (rcode != NX_RCODE_SUCCESS) 259 if (rcode != NX_RCODE_SUCCESS)
110 return -EIO; 260 return -EIO;
@@ -116,15 +266,14 @@ int
116nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter, 266nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
117 u32 speed, u32 duplex, u32 autoneg) 267 u32 speed, u32 duplex, u32 autoneg)
118{ 268{
119 269 struct netxen_cmd_args cmd;
120 return netxen_issue_cmd(adapter, 270
121 adapter->ahw.pci_func, 271 memset(&cmd, 0, sizeof(cmd));
122 NXHAL_VERSION, 272 cmd.req.cmd = NX_CDRP_CMD_CONFIG_GBE_PORT;
123 speed, 273 cmd.req.arg1 = speed;
124 duplex, 274 cmd.req.arg2 = duplex;
125 autoneg, 275 cmd.req.arg3 = autoneg;
126 NX_CDRP_CMD_CONFIG_GBE_PORT); 276 return netxen_issue_cmd(adapter, &cmd);
127
128} 277}
129 278
130static int 279static int
@@ -139,6 +288,7 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
139 nx_cardrsp_sds_ring_t *prsp_sds; 288 nx_cardrsp_sds_ring_t *prsp_sds;
140 struct nx_host_rds_ring *rds_ring; 289 struct nx_host_rds_ring *rds_ring;
141 struct nx_host_sds_ring *sds_ring; 290 struct nx_host_sds_ring *sds_ring;
291 struct netxen_cmd_args cmd;
142 292
143 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; 293 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
144 u64 phys_addr; 294 u64 phys_addr;
@@ -218,13 +368,12 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
218 } 368 }
219 369
220 phys_addr = hostrq_phys_addr; 370 phys_addr = hostrq_phys_addr;
221 err = netxen_issue_cmd(adapter, 371 memset(&cmd, 0, sizeof(cmd));
222 adapter->ahw.pci_func, 372 cmd.req.arg1 = (u32)(phys_addr >> 32);
223 NXHAL_VERSION, 373 cmd.req.arg2 = (u32)(phys_addr & 0xffffffff);
224 (u32)(phys_addr >> 32), 374 cmd.req.arg3 = rq_size;
225 (u32)(phys_addr & 0xffffffff), 375 cmd.req.cmd = NX_CDRP_CMD_CREATE_RX_CTX;
226 rq_size, 376 err = netxen_issue_cmd(adapter, &cmd);
227 NX_CDRP_CMD_CREATE_RX_CTX);
228 if (err) { 377 if (err) {
229 printk(KERN_WARNING 378 printk(KERN_WARNING
230 "Failed to create rx ctx in firmware%d\n", err); 379 "Failed to create rx ctx in firmware%d\n", err);
@@ -273,15 +422,15 @@ static void
273nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter) 422nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
274{ 423{
275 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 424 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
425 struct netxen_cmd_args cmd;
276 426
277 if (netxen_issue_cmd(adapter, 427 memset(&cmd, 0, sizeof(cmd));
278 adapter->ahw.pci_func, 428 cmd.req.arg1 = recv_ctx->context_id;
279 NXHAL_VERSION, 429 cmd.req.arg2 = NX_DESTROY_CTX_RESET;
280 recv_ctx->context_id, 430 cmd.req.arg3 = 0;
281 NX_DESTROY_CTX_RESET, 431 cmd.req.cmd = NX_CDRP_CMD_DESTROY_RX_CTX;
282 0,
283 NX_CDRP_CMD_DESTROY_RX_CTX)) {
284 432
433 if (netxen_issue_cmd(adapter, &cmd)) {
285 printk(KERN_WARNING 434 printk(KERN_WARNING
286 "%s: Failed to destroy rx ctx in firmware\n", 435 "%s: Failed to destroy rx ctx in firmware\n",
287 netxen_nic_driver_name); 436 netxen_nic_driver_name);
@@ -302,6 +451,7 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
302 dma_addr_t rq_phys_addr, rsp_phys_addr; 451 dma_addr_t rq_phys_addr, rsp_phys_addr;
303 struct nx_host_tx_ring *tx_ring = adapter->tx_ring; 452 struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
304 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 453 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
454 struct netxen_cmd_args cmd;
305 455
306 rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t); 456 rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
307 rq_addr = pci_alloc_consistent(adapter->pdev, 457 rq_addr = pci_alloc_consistent(adapter->pdev,
@@ -345,13 +495,12 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
345 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); 495 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
346 496
347 phys_addr = rq_phys_addr; 497 phys_addr = rq_phys_addr;
348 err = netxen_issue_cmd(adapter, 498 memset(&cmd, 0, sizeof(cmd));
349 adapter->ahw.pci_func, 499 cmd.req.arg1 = (u32)(phys_addr >> 32);
350 NXHAL_VERSION, 500 cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
351 (u32)(phys_addr >> 32), 501 cmd.req.arg3 = rq_size;
352 ((u32)phys_addr & 0xffffffff), 502 cmd.req.cmd = NX_CDRP_CMD_CREATE_TX_CTX;
353 rq_size, 503 err = netxen_issue_cmd(adapter, &cmd);
354 NX_CDRP_CMD_CREATE_TX_CTX);
355 504
356 if (err == NX_RCODE_SUCCESS) { 505 if (err == NX_RCODE_SUCCESS) {
357 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); 506 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
@@ -380,14 +529,14 @@ out_free_rq:
380static void 529static void
381nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter) 530nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
382{ 531{
383 if (netxen_issue_cmd(adapter, 532 struct netxen_cmd_args cmd;
384 adapter->ahw.pci_func, 533
385 NXHAL_VERSION, 534 memset(&cmd, 0, sizeof(cmd));
386 adapter->tx_context_id, 535 cmd.req.arg1 = adapter->tx_context_id;
387 NX_DESTROY_CTX_RESET, 536 cmd.req.arg2 = NX_DESTROY_CTX_RESET;
388 0, 537 cmd.req.arg3 = 0;
389 NX_CDRP_CMD_DESTROY_TX_CTX)) { 538 cmd.req.cmd = NX_CDRP_CMD_DESTROY_TX_CTX;
390 539 if (netxen_issue_cmd(adapter, &cmd)) {
391 printk(KERN_WARNING 540 printk(KERN_WARNING
392 "%s: Failed to destroy tx ctx in firmware\n", 541 "%s: Failed to destroy tx ctx in firmware\n",
393 netxen_nic_driver_name); 542 netxen_nic_driver_name);
@@ -398,34 +547,37 @@ int
398nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val) 547nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val)
399{ 548{
400 u32 rcode; 549 u32 rcode;
401 550 struct netxen_cmd_args cmd;
402 rcode = netxen_issue_cmd(adapter, 551
403 adapter->ahw.pci_func, 552 memset(&cmd, 0, sizeof(cmd));
404 NXHAL_VERSION, 553 cmd.req.arg1 = reg;
405 reg, 554 cmd.req.arg2 = 0;
406 0, 555 cmd.req.arg3 = 0;
407 0, 556 cmd.req.cmd = NX_CDRP_CMD_READ_PHY;
408 NX_CDRP_CMD_READ_PHY); 557 cmd.rsp.arg1 = 1;
409 558 rcode = netxen_issue_cmd(adapter, &cmd);
410 if (rcode != NX_RCODE_SUCCESS) 559 if (rcode != NX_RCODE_SUCCESS)
411 return -EIO; 560 return -EIO;
412 561
413 return NXRD32(adapter, NX_ARG1_CRB_OFFSET); 562 if (val == NULL)
563 return -EIO;
564
565 *val = cmd.rsp.arg1;
566 return 0;
414} 567}
415 568
416int 569int
417nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val) 570nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val)
418{ 571{
419 u32 rcode; 572 u32 rcode;
420 573 struct netxen_cmd_args cmd;
421 rcode = netxen_issue_cmd(adapter, 574
422 adapter->ahw.pci_func, 575 memset(&cmd, 0, sizeof(cmd));
423 NXHAL_VERSION, 576 cmd.req.arg1 = reg;
424 reg, 577 cmd.req.arg2 = val;
425 val, 578 cmd.req.arg3 = 0;
426 0, 579 cmd.req.cmd = NX_CDRP_CMD_WRITE_PHY;
427 NX_CDRP_CMD_WRITE_PHY); 580 rcode = netxen_issue_cmd(adapter, &cmd);
428
429 if (rcode != NX_RCODE_SUCCESS) 581 if (rcode != NX_RCODE_SUCCESS)
430 return -EIO; 582 return -EIO;
431 583
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 8a371985319f..8c39299331a2 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -248,6 +248,11 @@ skip:
248 } 248 }
249 } 249 }
250 250
251 if (!netif_running(dev) || !adapter->ahw.linkup) {
252 ecmd->duplex = DUPLEX_UNKNOWN;
253 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
254 }
255
251 return 0; 256 return 0;
252} 257}
253 258
@@ -812,6 +817,107 @@ static int netxen_get_intr_coalesce(struct net_device *netdev,
812 return 0; 817 return 0;
813} 818}
814 819
820static int
821netxen_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
822{
823 struct netxen_adapter *adapter = netdev_priv(netdev);
824 struct netxen_minidump *mdump = &adapter->mdump;
825 if (adapter->fw_mdump_rdy)
826 dump->len = mdump->md_dump_size;
827 else
828 dump->len = 0;
829 dump->flag = mdump->md_capture_mask;
830 dump->version = adapter->fw_version;
831 return 0;
832}
833
834static int
835netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val)
836{
837 int ret = 0;
838 struct netxen_adapter *adapter = netdev_priv(netdev);
839 struct netxen_minidump *mdump = &adapter->mdump;
840
841 switch (val->flag) {
842 case NX_FORCE_FW_DUMP_KEY:
843 if (!mdump->md_enabled)
844 mdump->md_enabled = 1;
845 if (adapter->fw_mdump_rdy) {
846 netdev_info(netdev, "Previous dump not cleared, not forcing dump\n");
847 return ret;
848 }
849 netdev_info(netdev, "Forcing a fw dump\n");
850 nx_dev_request_reset(adapter);
851 break;
852 case NX_DISABLE_FW_DUMP:
853 if (mdump->md_enabled) {
854 netdev_info(netdev, "Disabling FW Dump\n");
855 mdump->md_enabled = 0;
856 }
857 break;
858 case NX_ENABLE_FW_DUMP:
859 if (!mdump->md_enabled) {
860 netdev_info(netdev, "Enabling FW dump\n");
861 mdump->md_enabled = 1;
862 }
863 break;
864 case NX_FORCE_FW_RESET:
865 netdev_info(netdev, "Forcing FW reset\n");
866 nx_dev_request_reset(adapter);
867 adapter->flags &= ~NETXEN_FW_RESET_OWNER;
868 break;
869 default:
870 if (val->flag <= NX_DUMP_MASK_MAX &&
871 val->flag >= NX_DUMP_MASK_MIN) {
872 mdump->md_capture_mask = val->flag & 0xff;
873 netdev_info(netdev, "Driver mask changed to: 0x%x\n",
874 mdump->md_capture_mask);
875 break;
876 }
877 netdev_info(netdev,
878 "Invalid dump level: 0x%x\n", val->flag);
879 return -EINVAL;
880 }
881
882 return ret;
883}
884
885static int
886netxen_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
887 void *buffer)
888{
889 int i, copy_sz;
890 u32 *hdr_ptr, *data;
891 struct netxen_adapter *adapter = netdev_priv(netdev);
892 struct netxen_minidump *mdump = &adapter->mdump;
893
894
895 if (!adapter->fw_mdump_rdy) {
896 netdev_info(netdev, "Dump not available\n");
897 return -EINVAL;
898 }
899 /* Copy template header first */
900 copy_sz = mdump->md_template_size;
901 hdr_ptr = (u32 *) mdump->md_template;
902 data = buffer;
903 for (i = 0; i < copy_sz/sizeof(u32); i++)
904 *data++ = cpu_to_le32(*hdr_ptr++);
905
906 /* Copy captured dump data */
907 memcpy(buffer + copy_sz,
908 mdump->md_capture_buff + mdump->md_template_size,
909 mdump->md_capture_size);
910 dump->len = copy_sz + mdump->md_capture_size;
911 dump->flag = mdump->md_capture_mask;
912
913 /* Free dump area once data has been captured */
914 vfree(mdump->md_capture_buff);
915 mdump->md_capture_buff = NULL;
916 adapter->fw_mdump_rdy = 0;
917 netdev_info(netdev, "extracted the fw dump Successfully\n");
918 return 0;
919}
920
815const struct ethtool_ops netxen_nic_ethtool_ops = { 921const struct ethtool_ops netxen_nic_ethtool_ops = {
816 .get_settings = netxen_nic_get_settings, 922 .get_settings = netxen_nic_get_settings,
817 .set_settings = netxen_nic_set_settings, 923 .set_settings = netxen_nic_set_settings,
@@ -833,4 +939,7 @@ const struct ethtool_ops netxen_nic_ethtool_ops = {
833 .get_sset_count = netxen_get_sset_count, 939 .get_sset_count = netxen_get_sset_count,
834 .get_coalesce = netxen_get_intr_coalesce, 940 .get_coalesce = netxen_get_intr_coalesce,
835 .set_coalesce = netxen_set_intr_coalesce, 941 .set_coalesce = netxen_set_intr_coalesce,
942 .get_dump_flag = netxen_get_dump_flag,
943 .get_dump_data = netxen_get_dump_data,
944 .set_dump = netxen_set_dump,
836}; 945};
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
index dc1967c1f312..b1a897cd9a8d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -969,6 +969,7 @@ enum {
969#define NX_RCODE_FATAL_ERROR 0x80000000 969#define NX_RCODE_FATAL_ERROR 0x80000000
970#define NX_FWERROR_PEGNUM(code) ((code) & 0xff) 970#define NX_FWERROR_PEGNUM(code) ((code) & 0xff)
971#define NX_FWERROR_CODE(code) ((code >> 8) & 0xfffff) 971#define NX_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
972#define NX_FWERROR_PEGSTAT1(code) ((code >> 8) & 0x1fffff)
972 973
973#define FW_POLL_DELAY (2 * HZ) 974#define FW_POLL_DELAY (2 * HZ)
974#define FW_FAIL_THRESH 3 975#define FW_FAIL_THRESH 3
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 3f89e57cae50..0a8122851025 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -46,7 +46,6 @@ static void netxen_nic_io_write_128M(struct netxen_adapter *adapter,
46 void __iomem *addr, u32 data); 46 void __iomem *addr, u32 data);
47static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, 47static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
48 void __iomem *addr); 48 void __iomem *addr);
49
50#ifndef readq 49#ifndef readq
51static inline u64 readq(void __iomem *addr) 50static inline u64 readq(void __iomem *addr)
52{ 51{
@@ -1974,3 +1973,631 @@ netxen_nic_wol_supported(struct netxen_adapter *adapter)
1974 1973
1975 return 0; 1974 return 0;
1976} 1975}
1976
1977static u32 netxen_md_cntrl(struct netxen_adapter *adapter,
1978 struct netxen_minidump_template_hdr *template_hdr,
1979 struct netxen_minidump_entry_crb *crtEntry)
1980{
1981 int loop_cnt, i, rv = 0, timeout_flag;
1982 u32 op_count, stride;
1983 u32 opcode, read_value, addr;
1984 unsigned long timeout, timeout_jiffies;
1985 addr = crtEntry->addr;
1986 op_count = crtEntry->op_count;
1987 stride = crtEntry->addr_stride;
1988
1989 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
1990 for (i = 0; i < sizeof(crtEntry->opcode) * 8; i++) {
1991 opcode = (crtEntry->opcode & (0x1 << i));
1992 if (opcode) {
1993 switch (opcode) {
1994 case NX_DUMP_WCRB:
1995 NX_WR_DUMP_REG(addr,
1996 adapter->ahw.pci_base0,
1997 crtEntry->value_1);
1998 break;
1999 case NX_DUMP_RWCRB:
2000 NX_RD_DUMP_REG(addr,
2001 adapter->ahw.pci_base0,
2002 &read_value);
2003 NX_WR_DUMP_REG(addr,
2004 adapter->ahw.pci_base0,
2005 read_value);
2006 break;
2007 case NX_DUMP_ANDCRB:
2008 NX_RD_DUMP_REG(addr,
2009 adapter->ahw.pci_base0,
2010 &read_value);
2011 read_value &= crtEntry->value_2;
2012 NX_WR_DUMP_REG(addr,
2013 adapter->ahw.pci_base0,
2014 read_value);
2015 break;
2016 case NX_DUMP_ORCRB:
2017 NX_RD_DUMP_REG(addr,
2018 adapter->ahw.pci_base0,
2019 &read_value);
2020 read_value |= crtEntry->value_3;
2021 NX_WR_DUMP_REG(addr,
2022 adapter->ahw.pci_base0,
2023 read_value);
2024 break;
2025 case NX_DUMP_POLLCRB:
2026 timeout = crtEntry->poll_timeout;
2027 NX_RD_DUMP_REG(addr,
2028 adapter->ahw.pci_base0,
2029 &read_value);
2030 timeout_jiffies =
2031 msecs_to_jiffies(timeout) + jiffies;
2032 for (timeout_flag = 0;
2033 !timeout_flag
2034 && ((read_value & crtEntry->value_2)
2035 != crtEntry->value_1);) {
2036 if (time_after(jiffies,
2037 timeout_jiffies))
2038 timeout_flag = 1;
2039 NX_RD_DUMP_REG(addr,
2040 adapter->ahw.pci_base0,
2041 &read_value);
2042 }
2043
2044 if (timeout_flag) {
2045 dev_err(&adapter->pdev->dev, "%s : "
2046 "Timeout in poll_crb control operation.\n"
2047 , __func__);
2048 return -1;
2049 }
2050 break;
2051 case NX_DUMP_RD_SAVE:
2052 /* Decide which address to use */
2053 if (crtEntry->state_index_a)
2054 addr =
2055 template_hdr->saved_state_array
2056 [crtEntry->state_index_a];
2057 NX_RD_DUMP_REG(addr,
2058 adapter->ahw.pci_base0,
2059 &read_value);
2060 template_hdr->saved_state_array
2061 [crtEntry->state_index_v]
2062 = read_value;
2063 break;
2064 case NX_DUMP_WRT_SAVED:
2065 /* Decide which value to use */
2066 if (crtEntry->state_index_v)
2067 read_value =
2068 template_hdr->saved_state_array
2069 [crtEntry->state_index_v];
2070 else
2071 read_value = crtEntry->value_1;
2072
2073 /* Decide which address to use */
2074 if (crtEntry->state_index_a)
2075 addr =
2076 template_hdr->saved_state_array
2077 [crtEntry->state_index_a];
2078
2079 NX_WR_DUMP_REG(addr,
2080 adapter->ahw.pci_base0,
2081 read_value);
2082 break;
2083 case NX_DUMP_MOD_SAVE_ST:
2084 read_value =
2085 template_hdr->saved_state_array
2086 [crtEntry->state_index_v];
2087 read_value <<= crtEntry->shl;
2088 read_value >>= crtEntry->shr;
2089 if (crtEntry->value_2)
2090 read_value &=
2091 crtEntry->value_2;
2092 read_value |= crtEntry->value_3;
2093 read_value += crtEntry->value_1;
2094 /* Write value back to state area.*/
2095 template_hdr->saved_state_array
2096 [crtEntry->state_index_v]
2097 = read_value;
2098 break;
2099 default:
2100 rv = 1;
2101 break;
2102 }
2103 }
2104 }
2105 addr = addr + stride;
2106 }
2107 return rv;
2108}
2109
2110/* Read memory or MN */
2111static u32
2112netxen_md_rdmem(struct netxen_adapter *adapter,
2113 struct netxen_minidump_entry_rdmem
2114 *memEntry, u64 *data_buff)
2115{
2116 u64 addr, value = 0;
2117 int i = 0, loop_cnt;
2118
2119 addr = (u64)memEntry->read_addr;
2120 loop_cnt = memEntry->read_data_size; /* This is size in bytes */
2121 loop_cnt /= sizeof(value);
2122
2123 for (i = 0; i < loop_cnt; i++) {
2124 if (netxen_nic_pci_mem_read_2M(adapter, addr, &value))
2125 goto out;
2126 *data_buff++ = value;
2127 addr += sizeof(value);
2128 }
2129out:
2130 return i * sizeof(value);
2131}
2132
2133/* Read CRB operation */
2134static u32 netxen_md_rd_crb(struct netxen_adapter *adapter,
2135 struct netxen_minidump_entry_crb
2136 *crbEntry, u32 *data_buff)
2137{
2138 int loop_cnt;
2139 u32 op_count, addr, stride, value;
2140
2141 addr = crbEntry->addr;
2142 op_count = crbEntry->op_count;
2143 stride = crbEntry->addr_stride;
2144
2145 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
2146 NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &value);
2147 *data_buff++ = addr;
2148 *data_buff++ = value;
2149 addr = addr + stride;
2150 }
2151 return loop_cnt * (2 * sizeof(u32));
2152}
2153
2154/* Read ROM */
2155static u32
2156netxen_md_rdrom(struct netxen_adapter *adapter,
2157 struct netxen_minidump_entry_rdrom
2158 *romEntry, u32 *data_buff)
2159{
2160 int i, count = 0;
2161 u32 size, lck_val;
2162 u32 val;
2163 u32 fl_addr, waddr, raddr;
2164 fl_addr = romEntry->read_addr;
2165 size = romEntry->read_data_size/4;
2166lock_try:
2167 lck_val = readl((void __iomem *)(adapter->ahw.pci_base0 +
2168 NX_FLASH_SEM2_LK));
2169 if (!lck_val && count < MAX_CTL_CHECK) {
2170 msleep(20);
2171 count++;
2172 goto lock_try;
2173 }
2174 writel(adapter->ahw.pci_func, (void __iomem *)(adapter->ahw.pci_base0 +
2175 NX_FLASH_LOCK_ID));
2176 for (i = 0; i < size; i++) {
2177 waddr = fl_addr & 0xFFFF0000;
2178 NX_WR_DUMP_REG(FLASH_ROM_WINDOW, adapter->ahw.pci_base0, waddr);
2179 raddr = FLASH_ROM_DATA + (fl_addr & 0x0000FFFF);
2180 NX_RD_DUMP_REG(raddr, adapter->ahw.pci_base0, &val);
2181 *data_buff++ = cpu_to_le32(val);
2182 fl_addr += sizeof(val);
2183 }
2184 readl((void __iomem *)(adapter->ahw.pci_base0 + NX_FLASH_SEM2_ULK));
2185 return romEntry->read_data_size;
2186}
2187
2188/* Handle L2 Cache */
2189static u32
2190netxen_md_L2Cache(struct netxen_adapter *adapter,
2191 struct netxen_minidump_entry_cache
2192 *cacheEntry, u32 *data_buff)
2193{
2194 int loop_cnt, i, k, timeout_flag = 0;
2195 u32 addr, read_addr, read_value, cntrl_addr, tag_reg_addr;
2196 u32 tag_value, read_cnt;
2197 u8 cntl_value_w, cntl_value_r;
2198 unsigned long timeout, timeout_jiffies;
2199
2200 loop_cnt = cacheEntry->op_count;
2201 read_addr = cacheEntry->read_addr;
2202 cntrl_addr = cacheEntry->control_addr;
2203 cntl_value_w = (u32) cacheEntry->write_value;
2204 tag_reg_addr = cacheEntry->tag_reg_addr;
2205 tag_value = cacheEntry->init_tag_value;
2206 read_cnt = cacheEntry->read_addr_cnt;
2207
2208 for (i = 0; i < loop_cnt; i++) {
2209 NX_WR_DUMP_REG(tag_reg_addr, adapter->ahw.pci_base0, tag_value);
2210 if (cntl_value_w)
2211 NX_WR_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0,
2212 (u32)cntl_value_w);
2213 if (cacheEntry->poll_mask) {
2214 timeout = cacheEntry->poll_wait;
2215 NX_RD_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0,
2216 &cntl_value_r);
2217 timeout_jiffies = msecs_to_jiffies(timeout) + jiffies;
2218 for (timeout_flag = 0; !timeout_flag &&
2219 ((cntl_value_r & cacheEntry->poll_mask) != 0);) {
2220 if (time_after(jiffies, timeout_jiffies))
2221 timeout_flag = 1;
2222 NX_RD_DUMP_REG(cntrl_addr,
2223 adapter->ahw.pci_base0,
2224 &cntl_value_r);
2225 }
2226 if (timeout_flag) {
2227 dev_err(&adapter->pdev->dev,
2228 "Timeout in processing L2 Tag poll.\n");
2229 return -1;
2230 }
2231 }
2232 addr = read_addr;
2233 for (k = 0; k < read_cnt; k++) {
2234 NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0,
2235 &read_value);
2236 *data_buff++ = read_value;
2237 addr += cacheEntry->read_addr_stride;
2238 }
2239 tag_value += cacheEntry->tag_value_stride;
2240 }
2241 return read_cnt * loop_cnt * sizeof(read_value);
2242}
2243
2244
2245/* Handle L1 Cache */
2246static u32 netxen_md_L1Cache(struct netxen_adapter *adapter,
2247 struct netxen_minidump_entry_cache
2248 *cacheEntry, u32 *data_buff)
2249{
2250 int i, k, loop_cnt;
2251 u32 addr, read_addr, read_value, cntrl_addr, tag_reg_addr;
2252 u32 tag_value, read_cnt;
2253 u8 cntl_value_w;
2254
2255 loop_cnt = cacheEntry->op_count;
2256 read_addr = cacheEntry->read_addr;
2257 cntrl_addr = cacheEntry->control_addr;
2258 cntl_value_w = (u32) cacheEntry->write_value;
2259 tag_reg_addr = cacheEntry->tag_reg_addr;
2260 tag_value = cacheEntry->init_tag_value;
2261 read_cnt = cacheEntry->read_addr_cnt;
2262
2263 for (i = 0; i < loop_cnt; i++) {
2264 NX_WR_DUMP_REG(tag_reg_addr, adapter->ahw.pci_base0, tag_value);
2265 NX_WR_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0,
2266 (u32) cntl_value_w);
2267 addr = read_addr;
2268 for (k = 0; k < read_cnt; k++) {
2269 NX_RD_DUMP_REG(addr,
2270 adapter->ahw.pci_base0,
2271 &read_value);
2272 *data_buff++ = read_value;
2273 addr += cacheEntry->read_addr_stride;
2274 }
2275 tag_value += cacheEntry->tag_value_stride;
2276 }
2277 return read_cnt * loop_cnt * sizeof(read_value);
2278}
2279
2280/* Reading OCM memory */
2281static u32
2282netxen_md_rdocm(struct netxen_adapter *adapter,
2283 struct netxen_minidump_entry_rdocm
2284 *ocmEntry, u32 *data_buff)
2285{
2286 int i, loop_cnt;
2287 u32 value;
2288 void __iomem *addr;
2289 addr = (ocmEntry->read_addr + adapter->ahw.pci_base0);
2290 loop_cnt = ocmEntry->op_count;
2291
2292 for (i = 0; i < loop_cnt; i++) {
2293 value = readl(addr);
2294 *data_buff++ = value;
2295 addr += ocmEntry->read_addr_stride;
2296 }
2297 return i * sizeof(u32);
2298}
2299
2300/* Read MUX data */
2301static u32
2302netxen_md_rdmux(struct netxen_adapter *adapter, struct netxen_minidump_entry_mux
2303 *muxEntry, u32 *data_buff)
2304{
2305 int loop_cnt = 0;
2306 u32 read_addr, read_value, select_addr, sel_value;
2307
2308 read_addr = muxEntry->read_addr;
2309 sel_value = muxEntry->select_value;
2310 select_addr = muxEntry->select_addr;
2311
2312 for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
2313 NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, sel_value);
2314 NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0, &read_value);
2315 *data_buff++ = sel_value;
2316 *data_buff++ = read_value;
2317 sel_value += muxEntry->select_value_stride;
2318 }
2319 return loop_cnt * (2 * sizeof(u32));
2320}
2321
2322/* Handling Queue State Reads */
2323static u32
2324netxen_md_rdqueue(struct netxen_adapter *adapter,
2325 struct netxen_minidump_entry_queue
2326 *queueEntry, u32 *data_buff)
2327{
2328 int loop_cnt, k;
2329 u32 queue_id, read_addr, read_value, read_stride, select_addr, read_cnt;
2330
2331 read_cnt = queueEntry->read_addr_cnt;
2332 read_stride = queueEntry->read_addr_stride;
2333 select_addr = queueEntry->select_addr;
2334
2335 for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
2336 loop_cnt++) {
2337 NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, queue_id);
2338 read_addr = queueEntry->read_addr;
2339 for (k = 0; k < read_cnt; k--) {
2340 NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0,
2341 &read_value);
2342 *data_buff++ = read_value;
2343 read_addr += read_stride;
2344 }
2345 queue_id += queueEntry->queue_id_stride;
2346 }
2347 return loop_cnt * (read_cnt * sizeof(read_value));
2348}
2349
2350
2351/*
2352* We catch an error where driver does not read
2353* as much data as we expect from the entry.
2354*/
2355
2356static int netxen_md_entry_err_chk(struct netxen_adapter *adapter,
2357 struct netxen_minidump_entry *entry, u32 esize)
2358{
2359 if (esize < 0) {
2360 entry->hdr.driver_flags |= NX_DUMP_SKIP;
2361 return esize;
2362 }
2363 if (esize != entry->hdr.entry_capture_size) {
2364 entry->hdr.entry_capture_size = esize;
2365 entry->hdr.driver_flags |= NX_DUMP_SIZE_ERR;
2366 dev_info(&adapter->pdev->dev,
2367 "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
2368 entry->hdr.entry_type, entry->hdr.entry_capture_mask,
2369 esize, entry->hdr.entry_capture_size);
2370 dev_info(&adapter->pdev->dev, "Aborting further dump capture\n");
2371 }
2372 return 0;
2373}
2374
2375static int netxen_parse_md_template(struct netxen_adapter *adapter)
2376{
2377 int num_of_entries, buff_level, e_cnt, esize;
2378 int end_cnt = 0, rv = 0, sane_start = 0, sane_end = 0;
2379 char *dbuff;
2380 void *template_buff = adapter->mdump.md_template;
2381 char *dump_buff = adapter->mdump.md_capture_buff;
2382 int capture_mask = adapter->mdump.md_capture_mask;
2383 struct netxen_minidump_template_hdr *template_hdr;
2384 struct netxen_minidump_entry *entry;
2385
2386 if ((capture_mask & 0x3) != 0x3) {
2387 dev_err(&adapter->pdev->dev, "Capture mask %02x below minimum needed "
2388 "for valid firmware dump\n", capture_mask);
2389 return -EINVAL;
2390 }
2391 template_hdr = (struct netxen_minidump_template_hdr *) template_buff;
2392 num_of_entries = template_hdr->num_of_entries;
2393 entry = (struct netxen_minidump_entry *) ((char *) template_buff +
2394 template_hdr->first_entry_offset);
2395 memcpy(dump_buff, template_buff, adapter->mdump.md_template_size);
2396 dump_buff = dump_buff + adapter->mdump.md_template_size;
2397
2398 if (template_hdr->entry_type == TLHDR)
2399 sane_start = 1;
2400
2401 for (e_cnt = 0, buff_level = 0; e_cnt < num_of_entries; e_cnt++) {
2402 if (!(entry->hdr.entry_capture_mask & capture_mask)) {
2403 entry->hdr.driver_flags |= NX_DUMP_SKIP;
2404 entry = (struct netxen_minidump_entry *)
2405 ((char *) entry + entry->hdr.entry_size);
2406 continue;
2407 }
2408 switch (entry->hdr.entry_type) {
2409 case RDNOP:
2410 entry->hdr.driver_flags |= NX_DUMP_SKIP;
2411 break;
2412 case RDEND:
2413 entry->hdr.driver_flags |= NX_DUMP_SKIP;
2414 if (!sane_end)
2415 end_cnt = e_cnt;
2416 sane_end += 1;
2417 break;
2418 case CNTRL:
2419 rv = netxen_md_cntrl(adapter,
2420 template_hdr, (void *)entry);
2421 if (rv)
2422 entry->hdr.driver_flags |= NX_DUMP_SKIP;
2423 break;
2424 case RDCRB:
2425 dbuff = dump_buff + buff_level;
2426 esize = netxen_md_rd_crb(adapter,
2427 (void *) entry, (void *) dbuff);
2428 rv = netxen_md_entry_err_chk
2429 (adapter, entry, esize);
2430 if (rv < 0)
2431 break;
2432 buff_level += esize;
2433 break;
2434 case RDMN:
2435 case RDMEM:
2436 dbuff = dump_buff + buff_level;
2437 esize = netxen_md_rdmem(adapter,
2438 (void *) entry, (void *) dbuff);
2439 rv = netxen_md_entry_err_chk
2440 (adapter, entry, esize);
2441 if (rv < 0)
2442 break;
2443 buff_level += esize;
2444 break;
2445 case BOARD:
2446 case RDROM:
2447 dbuff = dump_buff + buff_level;
2448 esize = netxen_md_rdrom(adapter,
2449 (void *) entry, (void *) dbuff);
2450 rv = netxen_md_entry_err_chk
2451 (adapter, entry, esize);
2452 if (rv < 0)
2453 break;
2454 buff_level += esize;
2455 break;
2456 case L2ITG:
2457 case L2DTG:
2458 case L2DAT:
2459 case L2INS:
2460 dbuff = dump_buff + buff_level;
2461 esize = netxen_md_L2Cache(adapter,
2462 (void *) entry, (void *) dbuff);
2463 rv = netxen_md_entry_err_chk
2464 (adapter, entry, esize);
2465 if (rv < 0)
2466 break;
2467 buff_level += esize;
2468 break;
2469 case L1DAT:
2470 case L1INS:
2471 dbuff = dump_buff + buff_level;
2472 esize = netxen_md_L1Cache(adapter,
2473 (void *) entry, (void *) dbuff);
2474 rv = netxen_md_entry_err_chk
2475 (adapter, entry, esize);
2476 if (rv < 0)
2477 break;
2478 buff_level += esize;
2479 break;
2480 case RDOCM:
2481 dbuff = dump_buff + buff_level;
2482 esize = netxen_md_rdocm(adapter,
2483 (void *) entry, (void *) dbuff);
2484 rv = netxen_md_entry_err_chk
2485 (adapter, entry, esize);
2486 if (rv < 0)
2487 break;
2488 buff_level += esize;
2489 break;
2490 case RDMUX:
2491 dbuff = dump_buff + buff_level;
2492 esize = netxen_md_rdmux(adapter,
2493 (void *) entry, (void *) dbuff);
2494 rv = netxen_md_entry_err_chk
2495 (adapter, entry, esize);
2496 if (rv < 0)
2497 break;
2498 buff_level += esize;
2499 break;
2500 case QUEUE:
2501 dbuff = dump_buff + buff_level;
2502 esize = netxen_md_rdqueue(adapter,
2503 (void *) entry, (void *) dbuff);
2504 rv = netxen_md_entry_err_chk
2505 (adapter, entry, esize);
2506 if (rv < 0)
2507 break;
2508 buff_level += esize;
2509 break;
2510 default:
2511 entry->hdr.driver_flags |= NX_DUMP_SKIP;
2512 break;
2513 }
2514 /* Next entry in the template */
2515 entry = (struct netxen_minidump_entry *)
2516 ((char *) entry + entry->hdr.entry_size);
2517 }
2518 if (!sane_start || sane_end > 1) {
2519 dev_err(&adapter->pdev->dev,
2520 "Firmware minidump template configuration error.\n");
2521 }
2522 return 0;
2523}
2524
2525static int
2526netxen_collect_minidump(struct netxen_adapter *adapter)
2527{
2528 int ret = 0;
2529 struct netxen_minidump_template_hdr *hdr;
2530 struct timespec val;
2531 hdr = (struct netxen_minidump_template_hdr *)
2532 adapter->mdump.md_template;
2533 hdr->driver_capture_mask = adapter->mdump.md_capture_mask;
2534 jiffies_to_timespec(jiffies, &val);
2535 hdr->driver_timestamp = (u32) val.tv_sec;
2536 hdr->driver_info_word2 = adapter->fw_version;
2537 hdr->driver_info_word3 = NXRD32(adapter, CRB_DRIVER_VERSION);
2538 ret = netxen_parse_md_template(adapter);
2539 if (ret)
2540 return ret;
2541
2542 return ret;
2543}
2544
2545
2546void
2547netxen_dump_fw(struct netxen_adapter *adapter)
2548{
2549 struct netxen_minidump_template_hdr *hdr;
2550 int i, k, data_size = 0;
2551 u32 capture_mask;
2552 hdr = (struct netxen_minidump_template_hdr *)
2553 adapter->mdump.md_template;
2554 capture_mask = adapter->mdump.md_capture_mask;
2555
2556 for (i = 0x2, k = 1; (i & NX_DUMP_MASK_MAX); i <<= 1, k++) {
2557 if (i & capture_mask)
2558 data_size += hdr->capture_size_array[k];
2559 }
2560 if (!data_size) {
2561 dev_err(&adapter->pdev->dev,
2562 "Invalid cap sizes for capture_mask=0x%x\n",
2563 adapter->mdump.md_capture_mask);
2564 return;
2565 }
2566 adapter->mdump.md_capture_size = data_size;
2567 adapter->mdump.md_dump_size = adapter->mdump.md_template_size +
2568 adapter->mdump.md_capture_size;
2569 if (!adapter->mdump.md_capture_buff) {
2570 adapter->mdump.md_capture_buff =
2571 vmalloc(adapter->mdump.md_dump_size);
2572 if (!adapter->mdump.md_capture_buff) {
2573 dev_info(&adapter->pdev->dev,
2574 "Unable to allocate memory for minidump "
2575 "capture_buffer(%d bytes).\n",
2576 adapter->mdump.md_dump_size);
2577 return;
2578 }
2579 memset(adapter->mdump.md_capture_buff, 0,
2580 adapter->mdump.md_dump_size);
2581 if (netxen_collect_minidump(adapter)) {
2582 adapter->mdump.has_valid_dump = 0;
2583 adapter->mdump.md_dump_size = 0;
2584 vfree(adapter->mdump.md_capture_buff);
2585 adapter->mdump.md_capture_buff = NULL;
2586 dev_err(&adapter->pdev->dev,
2587 "Error in collecting firmware minidump.\n");
2588 } else {
2589 adapter->mdump.md_timestamp = jiffies;
2590 adapter->mdump.has_valid_dump = 1;
2591 adapter->fw_mdump_rdy = 1;
2592 dev_info(&adapter->pdev->dev, "%s Successfully "
2593 "collected fw dump.\n", adapter->netdev->name);
2594 }
2595
2596 } else {
2597 dev_info(&adapter->pdev->dev,
2598 "Cannot overwrite previously collected "
2599 "firmware minidump.\n");
2600 adapter->fw_mdump_rdy = 1;
2601 return;
2602 }
2603}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index a8259cc19a63..f69ac442c6a1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -280,13 +280,10 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
280 280
281 } 281 }
282 rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); 282 rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
283 if (rds_ring->rx_buf_arr == NULL) { 283 if (rds_ring->rx_buf_arr == NULL)
284 printk(KERN_ERR "%s: Failed to allocate "
285 "rx buffer ring %d\n",
286 netdev->name, ring);
287 /* free whatever was already allocated */ 284 /* free whatever was already allocated */
288 goto err_out; 285 goto err_out;
289 } 286
290 INIT_LIST_HEAD(&rds_ring->free_list); 287 INIT_LIST_HEAD(&rds_ring->free_list);
291 /* 288 /*
292 * Now go through all of them, set reference handles 289 * Now go through all of them, set reference handles
@@ -449,7 +446,7 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter)
449 446
450 /* resetall */ 447 /* resetall */
451 netxen_rom_lock(adapter); 448 netxen_rom_lock(adapter);
452 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xffffffff); 449 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xfeffffff);
453 netxen_rom_unlock(adapter); 450 netxen_rom_unlock(adapter);
454 451
455 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 452 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
@@ -480,11 +477,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter)
480 } 477 }
481 478
482 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); 479 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
483 if (buf == NULL) { 480 if (buf == NULL)
484 printk("%s: netxen_pinit_from_rom: Unable to calloc memory.\n",
485 netxen_nic_driver_name);
486 return -ENOMEM; 481 return -ENOMEM;
487 }
488 482
489 for (i = 0; i < n; i++) { 483 for (i = 0; i < n; i++) {
490 if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || 484 if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
@@ -1353,7 +1347,6 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
1353 1347
1354 do { 1348 do {
1355 val = NXRD32(adapter, CRB_CMDPEG_STATE); 1349 val = NXRD32(adapter, CRB_CMDPEG_STATE);
1356
1357 switch (val) { 1350 switch (val) {
1358 case PHAN_INITIALIZE_COMPLETE: 1351 case PHAN_INITIALIZE_COMPLETE:
1359 case PHAN_INITIALIZE_ACK: 1352 case PHAN_INITIALIZE_ACK:
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 7dd9a4b107e6..8dc4a134dece 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -82,7 +82,6 @@ static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
82static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter); 82static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
83static void netxen_create_diag_entries(struct netxen_adapter *adapter); 83static void netxen_create_diag_entries(struct netxen_adapter *adapter);
84static void netxen_remove_diag_entries(struct netxen_adapter *adapter); 84static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
85
86static int nx_dev_request_aer(struct netxen_adapter *adapter); 85static int nx_dev_request_aer(struct netxen_adapter *adapter);
87static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter); 86static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
88static int netxen_can_start_firmware(struct netxen_adapter *adapter); 87static int netxen_can_start_firmware(struct netxen_adapter *adapter);
@@ -802,10 +801,10 @@ err_out:
802static void 801static void
803netxen_check_options(struct netxen_adapter *adapter) 802netxen_check_options(struct netxen_adapter *adapter)
804{ 803{
805 u32 fw_major, fw_minor, fw_build; 804 u32 fw_major, fw_minor, fw_build, prev_fw_version;
806 char brd_name[NETXEN_MAX_SHORT_NAME]; 805 char brd_name[NETXEN_MAX_SHORT_NAME];
807 char serial_num[32]; 806 char serial_num[32];
808 int i, offset, val; 807 int i, offset, val, err;
809 int *ptr32; 808 int *ptr32;
810 struct pci_dev *pdev = adapter->pdev; 809 struct pci_dev *pdev = adapter->pdev;
811 810
@@ -826,9 +825,22 @@ netxen_check_options(struct netxen_adapter *adapter)
826 fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); 825 fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
827 fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); 826 fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
828 fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); 827 fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
829 828 prev_fw_version = adapter->fw_version;
830 adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build); 829 adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build);
831 830
831 /* Get FW Mini Coredump template and store it */
832 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
833 if (adapter->mdump.md_template == NULL ||
834 adapter->fw_version > prev_fw_version) {
835 kfree(adapter->mdump.md_template);
836 adapter->mdump.md_template = NULL;
837 err = netxen_setup_minidump(adapter);
838 if (err)
839 dev_err(&adapter->pdev->dev,
840 "Failed to setup minidump rcode = %d\n", err);
841 }
842 }
843
832 if (adapter->portnum == 0) { 844 if (adapter->portnum == 0) {
833 get_brd_name_by_type(adapter->ahw.board_type, brd_name); 845 get_brd_name_by_type(adapter->ahw.board_type, brd_name);
834 846
@@ -909,7 +921,12 @@ netxen_start_firmware(struct netxen_adapter *adapter)
909 if (err) 921 if (err)
910 return err; 922 return err;
911 923
912 if (!netxen_can_start_firmware(adapter)) 924 err = netxen_can_start_firmware(adapter);
925
926 if (err < 0)
927 return err;
928
929 if (!err)
913 goto wait_init; 930 goto wait_init;
914 931
915 first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); 932 first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
@@ -1403,7 +1420,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1403 1420
1404 netdev = alloc_etherdev(sizeof(struct netxen_adapter)); 1421 netdev = alloc_etherdev(sizeof(struct netxen_adapter));
1405 if(!netdev) { 1422 if(!netdev) {
1406 dev_err(&pdev->dev, "failed to allocate net_device\n");
1407 err = -ENOMEM; 1423 err = -ENOMEM;
1408 goto err_out_free_res; 1424 goto err_out_free_res;
1409 } 1425 }
@@ -1529,6 +1545,18 @@ err_out_disable_pdev:
1529 return err; 1545 return err;
1530} 1546}
1531 1547
1548static
1549void netxen_cleanup_minidump(struct netxen_adapter *adapter)
1550{
1551 kfree(adapter->mdump.md_template);
1552 adapter->mdump.md_template = NULL;
1553
1554 if (adapter->mdump.md_capture_buff) {
1555 vfree(adapter->mdump.md_capture_buff);
1556 adapter->mdump.md_capture_buff = NULL;
1557 }
1558}
1559
1532static void __devexit netxen_nic_remove(struct pci_dev *pdev) 1560static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1533{ 1561{
1534 struct netxen_adapter *adapter; 1562 struct netxen_adapter *adapter;
@@ -1564,8 +1592,10 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1564 1592
1565 netxen_release_firmware(adapter); 1593 netxen_release_firmware(adapter);
1566 1594
1567 if (NX_IS_REVISION_P3(pdev->revision)) 1595 if (NX_IS_REVISION_P3(pdev->revision)) {
1596 netxen_cleanup_minidump(adapter);
1568 pci_disable_pcie_error_reporting(pdev); 1597 pci_disable_pcie_error_reporting(pdev);
1598 }
1569 1599
1570 pci_release_regions(pdev); 1600 pci_release_regions(pdev);
1571 pci_disable_device(pdev); 1601 pci_disable_device(pdev);
@@ -2317,7 +2347,7 @@ nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
2317static int 2347static int
2318nx_decr_dev_ref_cnt(struct netxen_adapter *adapter) 2348nx_decr_dev_ref_cnt(struct netxen_adapter *adapter)
2319{ 2349{
2320 int count; 2350 int count, state;
2321 if (netxen_api_lock(adapter)) 2351 if (netxen_api_lock(adapter))
2322 return -EIO; 2352 return -EIO;
2323 2353
@@ -2325,8 +2355,9 @@ nx_decr_dev_ref_cnt(struct netxen_adapter *adapter)
2325 WARN_ON(count == 0); 2355 WARN_ON(count == 0);
2326 2356
2327 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count); 2357 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count);
2358 state = NXRD32(adapter, NX_CRB_DEV_STATE);
2328 2359
2329 if (count == 0) 2360 if (count == 0 && state != NX_DEV_FAILED)
2330 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD); 2361 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD);
2331 2362
2332 netxen_api_unlock(adapter); 2363 netxen_api_unlock(adapter);
@@ -2355,7 +2386,7 @@ nx_dev_request_aer(struct netxen_adapter *adapter)
2355 return ret; 2386 return ret;
2356} 2387}
2357 2388
2358static int 2389int
2359nx_dev_request_reset(struct netxen_adapter *adapter) 2390nx_dev_request_reset(struct netxen_adapter *adapter)
2360{ 2391{
2361 u32 state; 2392 u32 state;
@@ -2366,10 +2397,11 @@ nx_dev_request_reset(struct netxen_adapter *adapter)
2366 2397
2367 state = NXRD32(adapter, NX_CRB_DEV_STATE); 2398 state = NXRD32(adapter, NX_CRB_DEV_STATE);
2368 2399
2369 if (state == NX_DEV_NEED_RESET) 2400 if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED)
2370 ret = 0; 2401 ret = 0;
2371 else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) { 2402 else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) {
2372 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET); 2403 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET);
2404 adapter->flags |= NETXEN_FW_RESET_OWNER;
2373 ret = 0; 2405 ret = 0;
2374 } 2406 }
2375 2407
@@ -2384,8 +2416,10 @@ netxen_can_start_firmware(struct netxen_adapter *adapter)
2384 int count; 2416 int count;
2385 int can_start = 0; 2417 int can_start = 0;
2386 2418
2387 if (netxen_api_lock(adapter)) 2419 if (netxen_api_lock(adapter)) {
2388 return 0; 2420 nx_incr_dev_ref_cnt(adapter);
2421 return -1;
2422 }
2389 2423
2390 count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); 2424 count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
2391 2425
@@ -2457,8 +2491,31 @@ netxen_fwinit_work(struct work_struct *work)
2457 struct netxen_adapter *adapter = container_of(work, 2491 struct netxen_adapter *adapter = container_of(work,
2458 struct netxen_adapter, fw_work.work); 2492 struct netxen_adapter, fw_work.work);
2459 int dev_state; 2493 int dev_state;
2460 2494 int count;
2461 dev_state = NXRD32(adapter, NX_CRB_DEV_STATE); 2495 dev_state = NXRD32(adapter, NX_CRB_DEV_STATE);
2496 if (adapter->flags & NETXEN_FW_RESET_OWNER) {
2497 count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
2498 WARN_ON(count == 0);
2499 if (count == 1) {
2500 if (adapter->mdump.md_enabled) {
2501 rtnl_lock();
2502 netxen_dump_fw(adapter);
2503 rtnl_unlock();
2504 }
2505 adapter->flags &= ~NETXEN_FW_RESET_OWNER;
2506 if (netxen_api_lock(adapter)) {
2507 clear_bit(__NX_RESETTING, &adapter->state);
2508 NXWR32(adapter, NX_CRB_DEV_STATE,
2509 NX_DEV_FAILED);
2510 return;
2511 }
2512 count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
2513 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count);
2514 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD);
2515 dev_state = NX_DEV_COLD;
2516 netxen_api_unlock(adapter);
2517 }
2518 }
2462 2519
2463 switch (dev_state) { 2520 switch (dev_state) {
2464 case NX_DEV_COLD: 2521 case NX_DEV_COLD:
@@ -2471,11 +2528,9 @@ netxen_fwinit_work(struct work_struct *work)
2471 2528
2472 case NX_DEV_NEED_RESET: 2529 case NX_DEV_NEED_RESET:
2473 case NX_DEV_INITALIZING: 2530 case NX_DEV_INITALIZING:
2474 if (++adapter->fw_wait_cnt < FW_POLL_THRESH) {
2475 netxen_schedule_work(adapter, 2531 netxen_schedule_work(adapter,
2476 netxen_fwinit_work, 2 * FW_POLL_DELAY); 2532 netxen_fwinit_work, 2 * FW_POLL_DELAY);
2477 return; 2533 return;
2478 }
2479 2534
2480 case NX_DEV_FAILED: 2535 case NX_DEV_FAILED:
2481 default: 2536 default:
@@ -2483,6 +2538,15 @@ netxen_fwinit_work(struct work_struct *work)
2483 break; 2538 break;
2484 } 2539 }
2485 2540
2541 if (netxen_api_lock(adapter)) {
2542 clear_bit(__NX_RESETTING, &adapter->state);
2543 return;
2544 }
2545 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_FAILED);
2546 netxen_api_unlock(adapter);
2547 dev_err(&adapter->pdev->dev, "%s: Device initialization Failed\n",
2548 adapter->netdev->name);
2549
2486 clear_bit(__NX_RESETTING, &adapter->state); 2550 clear_bit(__NX_RESETTING, &adapter->state);
2487} 2551}
2488 2552
@@ -2492,7 +2556,7 @@ netxen_detach_work(struct work_struct *work)
2492 struct netxen_adapter *adapter = container_of(work, 2556 struct netxen_adapter *adapter = container_of(work,
2493 struct netxen_adapter, fw_work.work); 2557 struct netxen_adapter, fw_work.work);
2494 struct net_device *netdev = adapter->netdev; 2558 struct net_device *netdev = adapter->netdev;
2495 int ref_cnt, delay; 2559 int ref_cnt = 0, delay;
2496 u32 status; 2560 u32 status;
2497 2561
2498 netif_device_detach(netdev); 2562 netif_device_detach(netdev);
@@ -2511,7 +2575,8 @@ netxen_detach_work(struct work_struct *work)
2511 if (adapter->temp == NX_TEMP_PANIC) 2575 if (adapter->temp == NX_TEMP_PANIC)
2512 goto err_ret; 2576 goto err_ret;
2513 2577
2514 ref_cnt = nx_decr_dev_ref_cnt(adapter); 2578 if (!(adapter->flags & NETXEN_FW_RESET_OWNER))
2579 ref_cnt = nx_decr_dev_ref_cnt(adapter);
2515 2580
2516 if (ref_cnt == -EIO) 2581 if (ref_cnt == -EIO)
2517 goto err_ret; 2582 goto err_ret;
@@ -2531,6 +2596,7 @@ static int
2531netxen_check_health(struct netxen_adapter *adapter) 2596netxen_check_health(struct netxen_adapter *adapter)
2532{ 2597{
2533 u32 state, heartbit; 2598 u32 state, heartbit;
2599 u32 peg_status;
2534 struct net_device *netdev = adapter->netdev; 2600 struct net_device *netdev = adapter->netdev;
2535 2601
2536 state = NXRD32(adapter, NX_CRB_DEV_STATE); 2602 state = NXRD32(adapter, NX_CRB_DEV_STATE);
@@ -2551,7 +2617,7 @@ netxen_check_health(struct netxen_adapter *adapter)
2551 * Send request to destroy context in case of tx timeout only 2617 * Send request to destroy context in case of tx timeout only
2552 * and doesn't required in case of Fw hang 2618 * and doesn't required in case of Fw hang
2553 */ 2619 */
2554 if (state == NX_DEV_NEED_RESET) { 2620 if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED) {
2555 adapter->need_fw_reset = 1; 2621 adapter->need_fw_reset = 1;
2556 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 2622 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
2557 goto detach; 2623 goto detach;
@@ -2577,8 +2643,24 @@ netxen_check_health(struct netxen_adapter *adapter)
2577 2643
2578 clear_bit(__NX_FW_ATTACHED, &adapter->state); 2644 clear_bit(__NX_FW_ATTACHED, &adapter->state);
2579 2645
2580 dev_info(&netdev->dev, "firmware hang detected\n"); 2646 dev_err(&netdev->dev, "firmware hang detected\n");
2581 2647 peg_status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
2648 dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
2649 "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
2650 "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
2651 "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
2652 "PEG_NET_4_PC: 0x%x\n",
2653 peg_status,
2654 NXRD32(adapter, NETXEN_PEG_HALT_STATUS2),
2655 NXRD32(adapter, NETXEN_CRB_PEG_NET_0 + 0x3c),
2656 NXRD32(adapter, NETXEN_CRB_PEG_NET_1 + 0x3c),
2657 NXRD32(adapter, NETXEN_CRB_PEG_NET_2 + 0x3c),
2658 NXRD32(adapter, NETXEN_CRB_PEG_NET_3 + 0x3c),
2659 NXRD32(adapter, NETXEN_CRB_PEG_NET_4 + 0x3c));
2660 if (NX_FWERROR_PEGSTAT1(peg_status) == 0x67)
2661 dev_err(&adapter->pdev->dev,
2662 "Firmware aborted with error code 0x00006700. "
2663 "Device is being reset.\n");
2582detach: 2664detach:
2583 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) && 2665 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
2584 !test_and_set_bit(__NX_RESETTING, &adapter->state)) 2666 !test_and_set_bit(__NX_RESETTING, &adapter->state))
@@ -2848,13 +2930,12 @@ static struct bin_attribute bin_attr_mem = {
2848static void 2930static void
2849netxen_create_sysfs_entries(struct netxen_adapter *adapter) 2931netxen_create_sysfs_entries(struct netxen_adapter *adapter)
2850{ 2932{
2851 struct net_device *netdev = adapter->netdev; 2933 struct device *dev = &adapter->pdev->dev;
2852 struct device *dev = &netdev->dev;
2853 2934
2854 if (adapter->capabilities & NX_FW_CAPABILITY_BDG) { 2935 if (adapter->capabilities & NX_FW_CAPABILITY_BDG) {
2855 /* bridged_mode control */ 2936 /* bridged_mode control */
2856 if (device_create_file(dev, &dev_attr_bridged_mode)) { 2937 if (device_create_file(dev, &dev_attr_bridged_mode)) {
2857 dev_warn(&netdev->dev, 2938 dev_warn(dev,
2858 "failed to create bridged_mode sysfs entry\n"); 2939 "failed to create bridged_mode sysfs entry\n");
2859 } 2940 }
2860 } 2941 }
@@ -2863,8 +2944,7 @@ netxen_create_sysfs_entries(struct netxen_adapter *adapter)
2863static void 2944static void
2864netxen_remove_sysfs_entries(struct netxen_adapter *adapter) 2945netxen_remove_sysfs_entries(struct netxen_adapter *adapter)
2865{ 2946{
2866 struct net_device *netdev = adapter->netdev; 2947 struct device *dev = &adapter->pdev->dev;
2867 struct device *dev = &netdev->dev;
2868 2948
2869 if (adapter->capabilities & NX_FW_CAPABILITY_BDG) 2949 if (adapter->capabilities & NX_FW_CAPABILITY_BDG)
2870 device_remove_file(dev, &dev_attr_bridged_mode); 2950 device_remove_file(dev, &dev_attr_bridged_mode);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 7931531c3a40..d49f6dac51fd 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3805,7 +3805,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3805 3805
3806 ndev = alloc_etherdev(sizeof(struct ql3_adapter)); 3806 ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3807 if (!ndev) { 3807 if (!ndev) {
3808 pr_err("%s could not alloc etherdev\n", pci_name(pdev));
3809 err = -ENOMEM; 3808 err = -ENOMEM;
3810 goto err_out_free_regions; 3809 goto err_out_free_regions;
3811 } 3810 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 60976fc4ccc6..2b5af22419a5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,7 +37,7 @@
37#define _QLCNIC_LINUX_MAJOR 5 37#define _QLCNIC_LINUX_MAJOR 5
38#define _QLCNIC_LINUX_MINOR 0 38#define _QLCNIC_LINUX_MINOR 0
39#define _QLCNIC_LINUX_SUBVERSION 25 39#define _QLCNIC_LINUX_SUBVERSION 25
40#define QLCNIC_LINUX_VERSIONID "5.0.25" 40#define QLCNIC_LINUX_VERSIONID "5.0.26"
41#define QLCNIC_DRV_IDC_VER 0x01 41#define QLCNIC_DRV_IDC_VER 0x01
42#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 42#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
43 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 43 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index cc228cf3d84b..30dcbfba8f24 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -155,7 +155,6 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
155{ 155{
156 struct qlcnic_adapter *adapter = netdev_priv(dev); 156 struct qlcnic_adapter *adapter = netdev_priv(dev);
157 int check_sfp_module = 0; 157 int check_sfp_module = 0;
158 u16 pcifn = adapter->ahw->pci_func;
159 158
160 /* read which mode */ 159 /* read which mode */
161 if (adapter->ahw->port_type == QLCNIC_GBE) { 160 if (adapter->ahw->port_type == QLCNIC_GBE) {
@@ -194,10 +193,8 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
194 goto skip; 193 goto skip;
195 } 194 }
196 195
197 val = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn)); 196 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
198 ethtool_cmd_speed_set(ecmd, P3P_LINK_SPEED_MHZ * 197 ecmd->duplex = DUPLEX_UNKNOWN;
199 P3P_LINK_SPEED_VAL(pcifn, val));
200 ecmd->duplex = DUPLEX_FULL;
201 ecmd->autoneg = AUTONEG_DISABLE; 198 ecmd->autoneg = AUTONEG_DISABLE;
202 } else 199 } else
203 return -EIO; 200 return -EIO;
@@ -1155,7 +1152,6 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1155 1152
1156 if (!fw_dump->clr) { 1153 if (!fw_dump->clr) {
1157 netdev_info(netdev, "Dump not available\n"); 1154 netdev_info(netdev, "Dump not available\n");
1158 qlcnic_api_unlock(adapter);
1159 return -EINVAL; 1155 return -EINVAL;
1160 } 1156 }
1161 /* Copy template header first */ 1157 /* Copy template header first */
@@ -1174,7 +1170,7 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1174 vfree(fw_dump->data); 1170 vfree(fw_dump->data);
1175 fw_dump->data = NULL; 1171 fw_dump->data = NULL;
1176 fw_dump->clr = 0; 1172 fw_dump->clr = 0;
1177 1173 netdev_info(netdev, "extracted the FW dump Successfully\n");
1178 return 0; 1174 return 0;
1179} 1175}
1180 1176
@@ -1192,7 +1188,7 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
1192 return ret; 1188 return ret;
1193 } 1189 }
1194 if (fw_dump->clr) { 1190 if (fw_dump->clr) {
1195 dev_info(&adapter->pdev->dev, 1191 netdev_info(netdev,
1196 "Previous dump not cleared, not forcing dump\n"); 1192 "Previous dump not cleared, not forcing dump\n");
1197 return ret; 1193 return ret;
1198 } 1194 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 38669583840c..41d85efee422 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -1369,7 +1369,13 @@ qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
1369 1369
1370 adapter->module_type = module; 1370 adapter->module_type = module;
1371 adapter->link_autoneg = autoneg; 1371 adapter->link_autoneg = autoneg;
1372 adapter->link_speed = link_speed; 1372
1373 if (link_status) {
1374 adapter->link_speed = link_speed;
1375 } else {
1376 adapter->link_speed = SPEED_UNKNOWN;
1377 adapter->link_duplex = DUPLEX_UNKNOWN;
1378 }
1373} 1379}
1374 1380
1375static void 1381static void
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 69b8e4ef14d9..dba95311a462 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1576,7 +1576,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1576 1576
1577 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter)); 1577 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1578 if (!netdev) { 1578 if (!netdev) {
1579 dev_err(&pdev->dev, "failed to allocate net_device\n");
1580 err = -ENOMEM; 1579 err = -ENOMEM;
1581 goto err_out_free_res; 1580 goto err_out_free_res;
1582 } 1581 }
@@ -3000,8 +2999,18 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
3000void 2999void
3001qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) 3000qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
3002{ 3001{
3003 u32 state; 3002 u32 state, xg_val = 0, gb_val = 0;
3004 3003
3004 qlcnic_xg_set_xg0_mask(xg_val);
3005 qlcnic_xg_set_xg1_mask(xg_val);
3006 QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, xg_val);
3007 qlcnic_gb_set_gb0_mask(gb_val);
3008 qlcnic_gb_set_gb1_mask(gb_val);
3009 qlcnic_gb_set_gb2_mask(gb_val);
3010 qlcnic_gb_set_gb3_mask(gb_val);
3011 QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, gb_val);
3012 dev_info(&adapter->pdev->dev, "Pause control frames disabled"
3013 " on all ports\n");
3005 adapter->need_fw_reset = 1; 3014 adapter->need_fw_reset = 1;
3006 if (qlcnic_api_lock(adapter)) 3015 if (qlcnic_api_lock(adapter))
3007 return; 3016 return;
@@ -3150,7 +3159,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
3150 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c), 3159 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c),
3151 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c)); 3160 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c));
3152 peg_status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1); 3161 peg_status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
3153 if (LSW(MSB(peg_status)) == 0x67) 3162 if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
3154 dev_err(&adapter->pdev->dev, 3163 dev_err(&adapter->pdev->dev,
3155 "Firmware aborted with error code 0x00006700. " 3164 "Firmware aborted with error code 0x00006700. "
3156 "Device is being reset.\n"); 3165 "Device is being reset.\n");
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index b8478aab050e..5a639df33f18 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
18 */ 18 */
19#define DRV_NAME "qlge" 19#define DRV_NAME "qlge"
20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
21#define DRV_VERSION "v1.00.00.29.00.00-01" 21#define DRV_VERSION "v1.00.00.30.00.00-01"
22 22
23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
24 24
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index fca804f36d61..58185b604b72 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -1824,10 +1824,8 @@ void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
1824 pr_err("%s: Enter\n", __func__); 1824 pr_err("%s: Enter\n", __func__);
1825 1825
1826 ptr = kmalloc(size, GFP_ATOMIC); 1826 ptr = kmalloc(size, GFP_ATOMIC);
1827 if (ptr == NULL) { 1827 if (ptr == NULL)
1828 pr_err("%s: Couldn't allocate a buffer\n", __func__);
1829 return; 1828 return;
1830 }
1831 1829
1832 if (ql_write_cfg(qdev, ptr, size, bit, q_id)) { 1830 if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
1833 pr_err("%s: Failed to upload control block!\n", __func__); 1831 pr_err("%s: Failed to upload control block!\n", __func__);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b54898737284..49343ec21c82 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -375,13 +375,6 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
375 u32 lower = 375 u32 lower =
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | 376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 (addr[5]); 377 (addr[5]);
378
379 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380 "Adding %s address %pM at index %d in the CAM.\n",
381 type == MAC_ADDR_TYPE_MULTI_MAC ?
382 "MULTICAST" : "UNICAST",
383 addr, index);
384
385 status = 378 status =
386 ql_wait_reg_rdy(qdev, 379 ql_wait_reg_rdy(qdev,
387 MAC_ADDR_IDX, MAC_ADDR_MW, 0); 380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
@@ -430,12 +423,6 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
430 * addressing. It's either MAC_ADDR_E on or off. 423 * addressing. It's either MAC_ADDR_E on or off.
431 * That's bit-27 we're talking about. 424 * That's bit-27 we're talking about.
432 */ 425 */
433 netif_info(qdev, ifup, qdev->ndev,
434 "%s VLAN ID %d %s the CAM.\n",
435 enable_bit ? "Adding" : "Removing",
436 index,
437 enable_bit ? "to" : "from");
438
439 status = 426 status =
440 ql_wait_reg_rdy(qdev, 427 ql_wait_reg_rdy(qdev,
441 MAC_ADDR_IDX, MAC_ADDR_MW, 0); 428 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
@@ -535,28 +522,6 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
535 int status = -EINVAL; /* Return error if no mask match. */ 522 int status = -EINVAL; /* Return error if no mask match. */
536 u32 value = 0; 523 u32 value = 0;
537 524
538 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539 "%s %s mask %s the routing reg.\n",
540 enable ? "Adding" : "Removing",
541 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554 index == RT_IDX_UNUSED013 ? "UNUSED13" :
555 index == RT_IDX_UNUSED014 ? "UNUSED14" :
556 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557 "(Bad index != RT_IDX)",
558 enable ? "to" : "from");
559
560 switch (mask) { 525 switch (mask) {
561 case RT_IDX_CAM_HIT: 526 case RT_IDX_CAM_HIT:
562 { 527 {
@@ -1178,14 +1143,16 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1178 int i; 1143 int i;
1179 1144
1180 while (rx_ring->lbq_free_cnt > 32) { 1145 while (rx_ring->lbq_free_cnt > 32) {
1181 for (i = 0; i < 16; i++) { 1146 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, 1147 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: try cleaning clean_idx = %d.\n", 1148 "lbq: try cleaning clean_idx = %d.\n",
1184 clean_idx); 1149 clean_idx);
1185 lbq_desc = &rx_ring->lbq[clean_idx]; 1150 lbq_desc = &rx_ring->lbq[clean_idx];
1186 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { 1151 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1152 rx_ring->lbq_clean_idx = clean_idx;
1187 netif_err(qdev, ifup, qdev->ndev, 1153 netif_err(qdev, ifup, qdev->ndev,
1188 "Could not get a page chunk.\n"); 1154 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1155 i, clean_idx);
1189 return; 1156 return;
1190 } 1157 }
1191 1158
@@ -1230,7 +1197,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1230 int i; 1197 int i;
1231 1198
1232 while (rx_ring->sbq_free_cnt > 16) { 1199 while (rx_ring->sbq_free_cnt > 16) {
1233 for (i = 0; i < 16; i++) { 1200 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1234 sbq_desc = &rx_ring->sbq[clean_idx]; 1201 sbq_desc = &rx_ring->sbq[clean_idx];
1235 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, 1202 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236 "sbq: try cleaning clean_idx = %d.\n", 1203 "sbq: try cleaning clean_idx = %d.\n",
@@ -1576,13 +1543,14 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1576 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && 1543 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1577 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { 1544 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1578 /* Unfragmented ipv4 UDP frame. */ 1545 /* Unfragmented ipv4 UDP frame. */
1579 struct iphdr *iph = (struct iphdr *) skb->data; 1546 struct iphdr *iph =
1547 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
1580 if (!(iph->frag_off & 1548 if (!(iph->frag_off &
1581 cpu_to_be16(IP_MF|IP_OFFSET))) { 1549 cpu_to_be16(IP_MF|IP_OFFSET))) {
1582 skb->ip_summed = CHECKSUM_UNNECESSARY; 1550 skb->ip_summed = CHECKSUM_UNNECESSARY;
1583 netif_printk(qdev, rx_status, KERN_DEBUG, 1551 netif_printk(qdev, rx_status, KERN_DEBUG,
1584 qdev->ndev, 1552 qdev->ndev,
1585 "TCP checksum done!\n"); 1553 "UDP checksum done!\n");
1586 } 1554 }
1587 } 1555 }
1588 } 1556 }
@@ -1690,7 +1658,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1690 skb->ip_summed = CHECKSUM_UNNECESSARY; 1658 skb->ip_summed = CHECKSUM_UNNECESSARY;
1691 netif_printk(qdev, rx_status, KERN_DEBUG, 1659 netif_printk(qdev, rx_status, KERN_DEBUG,
1692 qdev->ndev, 1660 qdev->ndev,
1693 "TCP checksum done!\n"); 1661 "UDP checksum done!\n");
1694 } 1662 }
1695 } 1663 }
1696 } 1664 }
@@ -2312,13 +2280,9 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2312 struct ql_adapter *qdev = netdev_priv(ndev); 2280 struct ql_adapter *qdev = netdev_priv(ndev);
2313 2281
2314 if (features & NETIF_F_HW_VLAN_RX) { 2282 if (features & NETIF_F_HW_VLAN_RX) {
2315 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2316 "Turning on VLAN in NIC_RCV_CFG.\n");
2317 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | 2283 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2318 NIC_RCV_CFG_VLAN_MATCH_AND_NON); 2284 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2319 } else { 2285 } else {
2320 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2321 "Turning off VLAN in NIC_RCV_CFG.\n");
2322 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); 2286 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2323 } 2287 }
2324} 2288}
@@ -3183,8 +3147,6 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3183 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, 3147 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3184 "Invalid rx_ring->type = %d.\n", rx_ring->type); 3148 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3185 } 3149 }
3186 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3187 "Initializing rx work queue.\n");
3188 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), 3150 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3189 CFG_LCQ, rx_ring->cq_id); 3151 CFG_LCQ, rx_ring->cq_id);
3190 if (err) { 3152 if (err) {
@@ -3237,8 +3199,6 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3237 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n"); 3199 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3238 return err; 3200 return err;
3239 } 3201 }
3240 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3241 "Successfully loaded WQICB.\n");
3242 return err; 3202 return err;
3243} 3203}
3244 3204
@@ -3488,12 +3448,8 @@ static void ql_free_irq(struct ql_adapter *qdev)
3488 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { 3448 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3489 free_irq(qdev->msi_x_entry[i].vector, 3449 free_irq(qdev->msi_x_entry[i].vector,
3490 &qdev->rx_ring[i]); 3450 &qdev->rx_ring[i]);
3491 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3492 "freeing msix interrupt %d.\n", i);
3493 } else { 3451 } else {
3494 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); 3452 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3495 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3496 "freeing msi interrupt %d.\n", i);
3497 } 3453 }
3498 } 3454 }
3499 } 3455 }
@@ -3522,17 +3478,6 @@ static int ql_request_irq(struct ql_adapter *qdev)
3522 "Failed request for MSIX interrupt %d.\n", 3478 "Failed request for MSIX interrupt %d.\n",
3523 i); 3479 i);
3524 goto err_irq; 3480 goto err_irq;
3525 } else {
3526 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3527 "Hooked intr %d, queue type %s, with name %s.\n",
3528 i,
3529 qdev->rx_ring[i].type == DEFAULT_Q ?
3530 "DEFAULT_Q" :
3531 qdev->rx_ring[i].type == TX_Q ?
3532 "TX_Q" :
3533 qdev->rx_ring[i].type == RX_Q ?
3534 "RX_Q" : "",
3535 intr_context->name);
3536 } 3481 }
3537 } else { 3482 } else {
3538 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, 3483 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
@@ -3602,15 +3547,11 @@ static int ql_start_rss(struct ql_adapter *qdev)
3602 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); 3547 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3603 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); 3548 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3604 3549
3605 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3606
3607 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); 3550 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3608 if (status) { 3551 if (status) {
3609 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n"); 3552 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3610 return status; 3553 return status;
3611 } 3554 }
3612 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3613 "Successfully loaded RICB.\n");
3614 return status; 3555 return status;
3615} 3556}
3616 3557
@@ -3817,11 +3758,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3817 } 3758 }
3818 3759
3819 /* Start NAPI for the RSS queues. */ 3760 /* Start NAPI for the RSS queues. */
3820 for (i = 0; i < qdev->rss_ring_count; i++) { 3761 for (i = 0; i < qdev->rss_ring_count; i++)
3821 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3822 "Enabling NAPI for rx_ring[%d].\n", i);
3823 napi_enable(&qdev->rx_ring[i].napi); 3762 napi_enable(&qdev->rx_ring[i].napi);
3824 }
3825 3763
3826 return status; 3764 return status;
3827} 3765}
@@ -4121,10 +4059,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
4121 rx_ring->lbq_size = 4059 rx_ring->lbq_size =
4122 rx_ring->lbq_len * sizeof(__le64); 4060 rx_ring->lbq_len * sizeof(__le64);
4123 rx_ring->lbq_buf_size = (u16)lbq_buf_len; 4061 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4124 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4125 "lbq_buf_size %d, order = %d\n",
4126 rx_ring->lbq_buf_size,
4127 qdev->lbq_buf_order);
4128 rx_ring->sbq_len = NUM_SMALL_BUFFERS; 4062 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4129 rx_ring->sbq_size = 4063 rx_ring->sbq_size =
4130 rx_ring->sbq_len * sizeof(__le64); 4064 rx_ring->sbq_len * sizeof(__le64);
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index cb0eca807852..76cab284876b 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1107,7 +1107,6 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1107 1107
1108 dev = alloc_etherdev(sizeof(struct r6040_private)); 1108 dev = alloc_etherdev(sizeof(struct r6040_private));
1109 if (!dev) { 1109 if (!dev) {
1110 dev_err(&pdev->dev, "Failed to allocate etherdev\n");
1111 err = -ENOMEM; 1110 err = -ENOMEM;
1112 goto err_out; 1111 goto err_out;
1113 } 1112 }
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index a8779bedb3d9..1c3feb0116b5 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -754,10 +754,9 @@ static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev)
754 754
755 /* dev and priv zeroed in alloc_etherdev */ 755 /* dev and priv zeroed in alloc_etherdev */
756 dev = alloc_etherdev (sizeof (*tp)); 756 dev = alloc_etherdev (sizeof (*tp));
757 if (dev == NULL) { 757 if (dev == NULL)
758 dev_err(&pdev->dev, "Unable to alloc new net device\n");
759 return ERR_PTR(-ENOMEM); 758 return ERR_PTR(-ENOMEM);
760 } 759
761 SET_NETDEV_DEV(dev, &pdev->dev); 760 SET_NETDEV_DEV(dev, &pdev->dev);
762 761
763 tp = netdev_priv(dev); 762 tp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 0578859a3c73..5821966f9f28 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -24,11 +24,11 @@ config ATP
24 select CRC32 24 select CRC32
25 ---help--- 25 ---help---
26 This is a network (Ethernet) device which attaches to your parallel 26 This is a network (Ethernet) device which attaches to your parallel
27 port. Read <file:drivers/net/atp.c> as well as the Ethernet-HOWTO, 27 port. Read <file:drivers/net/ethernet/realtek/atp.c> as well as the
28 available from <http://www.tldp.org/docs.html#howto>, if you 28 Ethernet-HOWTO, available from <http://www.tldp.org/docs.html#howto>,
29 want to use this. If you intend to use this driver, you should have 29 if you want to use this. If you intend to use this driver, you
30 said N to the "Parallel printer support", because the two drivers 30 should have said N to the "Parallel printer support", because the two
31 don't like each other. 31 drivers don't like each other.
32 32
33 To compile this driver as a module, choose M here: the module 33 To compile this driver as a module, choose M here: the module
34 will be called atp. 34 will be called atp.
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 7a0c800b50ad..5eb6858ed0a7 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -667,12 +667,19 @@ struct rtl8169_counters {
667 __le16 tx_underun; 667 __le16 tx_underun;
668}; 668};
669 669
670enum rtl_flag {
671 RTL_FLAG_TASK_ENABLED,
672 RTL_FLAG_TASK_SLOW_PENDING,
673 RTL_FLAG_TASK_RESET_PENDING,
674 RTL_FLAG_TASK_PHY_PENDING,
675 RTL_FLAG_MAX
676};
677
670struct rtl8169_private { 678struct rtl8169_private {
671 void __iomem *mmio_addr; /* memory map physical address */ 679 void __iomem *mmio_addr; /* memory map physical address */
672 struct pci_dev *pci_dev; 680 struct pci_dev *pci_dev;
673 struct net_device *dev; 681 struct net_device *dev;
674 struct napi_struct napi; 682 struct napi_struct napi;
675 spinlock_t lock;
676 u32 msg_enable; 683 u32 msg_enable;
677 u16 txd_version; 684 u16 txd_version;
678 u16 mac_version; 685 u16 mac_version;
@@ -688,9 +695,8 @@ struct rtl8169_private {
688 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ 695 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
689 struct timer_list timer; 696 struct timer_list timer;
690 u16 cp_cmd; 697 u16 cp_cmd;
691 u16 intr_event; 698
692 u16 napi_event; 699 u16 event_slow;
693 u16 intr_mask;
694 700
695 struct mdio_ops { 701 struct mdio_ops {
696 void (*write)(void __iomem *, int, int); 702 void (*write)(void __iomem *, int, int);
@@ -714,7 +720,13 @@ struct rtl8169_private {
714 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); 720 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
715 unsigned int (*link_ok)(void __iomem *); 721 unsigned int (*link_ok)(void __iomem *);
716 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); 722 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
717 struct delayed_work task; 723
724 struct {
725 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
726 struct mutex mutex;
727 struct work_struct work;
728 } wk;
729
718 unsigned features; 730 unsigned features;
719 731
720 struct mii_if_info mii; 732 struct mii_if_info mii;
@@ -764,13 +776,20 @@ static int rtl8169_close(struct net_device *dev);
764static void rtl_set_rx_mode(struct net_device *dev); 776static void rtl_set_rx_mode(struct net_device *dev);
765static void rtl8169_tx_timeout(struct net_device *dev); 777static void rtl8169_tx_timeout(struct net_device *dev);
766static struct net_device_stats *rtl8169_get_stats(struct net_device *dev); 778static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
767static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
768 void __iomem *, u32 budget);
769static int rtl8169_change_mtu(struct net_device *dev, int new_mtu); 779static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
770static void rtl8169_down(struct net_device *dev);
771static void rtl8169_rx_clear(struct rtl8169_private *tp); 780static void rtl8169_rx_clear(struct rtl8169_private *tp);
772static int rtl8169_poll(struct napi_struct *napi, int budget); 781static int rtl8169_poll(struct napi_struct *napi, int budget);
773 782
783static void rtl_lock_work(struct rtl8169_private *tp)
784{
785 mutex_lock(&tp->wk.mutex);
786}
787
788static void rtl_unlock_work(struct rtl8169_private *tp)
789{
790 mutex_unlock(&tp->wk.mutex);
791}
792
774static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) 793static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
775{ 794{
776 int cap = pci_pcie_cap(pdev); 795 int cap = pci_pcie_cap(pdev);
@@ -1180,12 +1199,51 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
1180 return value; 1199 return value;
1181} 1200}
1182 1201
1202static u16 rtl_get_events(struct rtl8169_private *tp)
1203{
1204 void __iomem *ioaddr = tp->mmio_addr;
1205
1206 return RTL_R16(IntrStatus);
1207}
1208
1209static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1210{
1211 void __iomem *ioaddr = tp->mmio_addr;
1212
1213 RTL_W16(IntrStatus, bits);
1214 mmiowb();
1215}
1216
1217static void rtl_irq_disable(struct rtl8169_private *tp)
1218{
1219 void __iomem *ioaddr = tp->mmio_addr;
1220
1221 RTL_W16(IntrMask, 0);
1222 mmiowb();
1223}
1224
1225static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1226{
1227 void __iomem *ioaddr = tp->mmio_addr;
1228
1229 RTL_W16(IntrMask, bits);
1230}
1231
1232#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1233#define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1234#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1235
1236static void rtl_irq_enable_all(struct rtl8169_private *tp)
1237{
1238 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1239}
1240
1183static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) 1241static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1184{ 1242{
1185 void __iomem *ioaddr = tp->mmio_addr; 1243 void __iomem *ioaddr = tp->mmio_addr;
1186 1244
1187 RTL_W16(IntrMask, 0x0000); 1245 rtl_irq_disable(tp);
1188 RTL_W16(IntrStatus, tp->intr_event); 1246 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1189 RTL_R8(ChipCmd); 1247 RTL_R8(ChipCmd);
1190} 1248}
1191 1249
@@ -1276,9 +1334,6 @@ static void __rtl8169_check_link_status(struct net_device *dev,
1276 struct rtl8169_private *tp, 1334 struct rtl8169_private *tp,
1277 void __iomem *ioaddr, bool pm) 1335 void __iomem *ioaddr, bool pm)
1278{ 1336{
1279 unsigned long flags;
1280
1281 spin_lock_irqsave(&tp->lock, flags);
1282 if (tp->link_ok(ioaddr)) { 1337 if (tp->link_ok(ioaddr)) {
1283 rtl_link_chg_patch(tp); 1338 rtl_link_chg_patch(tp);
1284 /* This is to cancel a scheduled suspend if there's one. */ 1339 /* This is to cancel a scheduled suspend if there's one. */
@@ -1293,7 +1348,6 @@ static void __rtl8169_check_link_status(struct net_device *dev,
1293 if (pm) 1348 if (pm)
1294 pm_schedule_suspend(&tp->pci_dev->dev, 5000); 1349 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1295 } 1350 }
1296 spin_unlock_irqrestore(&tp->lock, flags);
1297} 1351}
1298 1352
1299static void rtl8169_check_link_status(struct net_device *dev, 1353static void rtl8169_check_link_status(struct net_device *dev,
@@ -1336,12 +1390,12 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1336{ 1390{
1337 struct rtl8169_private *tp = netdev_priv(dev); 1391 struct rtl8169_private *tp = netdev_priv(dev);
1338 1392
1339 spin_lock_irq(&tp->lock); 1393 rtl_lock_work(tp);
1340 1394
1341 wol->supported = WAKE_ANY; 1395 wol->supported = WAKE_ANY;
1342 wol->wolopts = __rtl8169_get_wol(tp); 1396 wol->wolopts = __rtl8169_get_wol(tp);
1343 1397
1344 spin_unlock_irq(&tp->lock); 1398 rtl_unlock_work(tp);
1345} 1399}
1346 1400
1347static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) 1401static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
@@ -1378,14 +1432,15 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1378{ 1432{
1379 struct rtl8169_private *tp = netdev_priv(dev); 1433 struct rtl8169_private *tp = netdev_priv(dev);
1380 1434
1381 spin_lock_irq(&tp->lock); 1435 rtl_lock_work(tp);
1382 1436
1383 if (wol->wolopts) 1437 if (wol->wolopts)
1384 tp->features |= RTL_FEATURE_WOL; 1438 tp->features |= RTL_FEATURE_WOL;
1385 else 1439 else
1386 tp->features &= ~RTL_FEATURE_WOL; 1440 tp->features &= ~RTL_FEATURE_WOL;
1387 __rtl8169_set_wol(tp, wol->wolopts); 1441 __rtl8169_set_wol(tp, wol->wolopts);
1388 spin_unlock_irq(&tp->lock); 1442
1443 rtl_unlock_work(tp);
1389 1444
1390 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); 1445 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1391 1446
@@ -1540,15 +1595,14 @@ out:
1540static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1595static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1541{ 1596{
1542 struct rtl8169_private *tp = netdev_priv(dev); 1597 struct rtl8169_private *tp = netdev_priv(dev);
1543 unsigned long flags;
1544 int ret; 1598 int ret;
1545 1599
1546 del_timer_sync(&tp->timer); 1600 del_timer_sync(&tp->timer);
1547 1601
1548 spin_lock_irqsave(&tp->lock, flags); 1602 rtl_lock_work(tp);
1549 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd), 1603 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1550 cmd->duplex, cmd->advertising); 1604 cmd->duplex, cmd->advertising);
1551 spin_unlock_irqrestore(&tp->lock, flags); 1605 rtl_unlock_work(tp);
1552 1606
1553 return ret; 1607 return ret;
1554} 1608}
@@ -1568,14 +1622,12 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1568 return features; 1622 return features;
1569} 1623}
1570 1624
1571static int rtl8169_set_features(struct net_device *dev, 1625static void __rtl8169_set_features(struct net_device *dev,
1572 netdev_features_t features) 1626 netdev_features_t features)
1573{ 1627{
1574 struct rtl8169_private *tp = netdev_priv(dev); 1628 struct rtl8169_private *tp = netdev_priv(dev);
1575 void __iomem *ioaddr = tp->mmio_addr;
1576 unsigned long flags;
1577 1629
1578 spin_lock_irqsave(&tp->lock, flags); 1630 void __iomem *ioaddr = tp->mmio_addr;
1579 1631
1580 if (features & NETIF_F_RXCSUM) 1632 if (features & NETIF_F_RXCSUM)
1581 tp->cp_cmd |= RxChkSum; 1633 tp->cp_cmd |= RxChkSum;
@@ -1589,12 +1641,21 @@ static int rtl8169_set_features(struct net_device *dev,
1589 1641
1590 RTL_W16(CPlusCmd, tp->cp_cmd); 1642 RTL_W16(CPlusCmd, tp->cp_cmd);
1591 RTL_R16(CPlusCmd); 1643 RTL_R16(CPlusCmd);
1644}
1645
1646static int rtl8169_set_features(struct net_device *dev,
1647 netdev_features_t features)
1648{
1649 struct rtl8169_private *tp = netdev_priv(dev);
1592 1650
1593 spin_unlock_irqrestore(&tp->lock, flags); 1651 rtl_lock_work(tp);
1652 __rtl8169_set_features(dev, features);
1653 rtl_unlock_work(tp);
1594 1654
1595 return 0; 1655 return 0;
1596} 1656}
1597 1657
1658
1598static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, 1659static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1599 struct sk_buff *skb) 1660 struct sk_buff *skb)
1600{ 1661{
@@ -1643,14 +1704,12 @@ static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1643static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1704static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1644{ 1705{
1645 struct rtl8169_private *tp = netdev_priv(dev); 1706 struct rtl8169_private *tp = netdev_priv(dev);
1646 unsigned long flags;
1647 int rc; 1707 int rc;
1648 1708
1649 spin_lock_irqsave(&tp->lock, flags); 1709 rtl_lock_work(tp);
1650
1651 rc = tp->get_settings(dev, cmd); 1710 rc = tp->get_settings(dev, cmd);
1711 rtl_unlock_work(tp);
1652 1712
1653 spin_unlock_irqrestore(&tp->lock, flags);
1654 return rc; 1713 return rc;
1655} 1714}
1656 1715
@@ -1658,14 +1717,13 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1658 void *p) 1717 void *p)
1659{ 1718{
1660 struct rtl8169_private *tp = netdev_priv(dev); 1719 struct rtl8169_private *tp = netdev_priv(dev);
1661 unsigned long flags;
1662 1720
1663 if (regs->len > R8169_REGS_SIZE) 1721 if (regs->len > R8169_REGS_SIZE)
1664 regs->len = R8169_REGS_SIZE; 1722 regs->len = R8169_REGS_SIZE;
1665 1723
1666 spin_lock_irqsave(&tp->lock, flags); 1724 rtl_lock_work(tp);
1667 memcpy_fromio(p, tp->mmio_addr, regs->len); 1725 memcpy_fromio(p, tp->mmio_addr, regs->len);
1668 spin_unlock_irqrestore(&tp->lock, flags); 1726 rtl_unlock_work(tp);
1669} 1727}
1670 1728
1671static u32 rtl8169_get_msglevel(struct net_device *dev) 1729static u32 rtl8169_get_msglevel(struct net_device *dev)
@@ -3182,18 +3240,14 @@ static void rtl_hw_phy_config(struct net_device *dev)
3182 } 3240 }
3183} 3241}
3184 3242
3185static void rtl8169_phy_timer(unsigned long __opaque) 3243static void rtl_phy_work(struct rtl8169_private *tp)
3186{ 3244{
3187 struct net_device *dev = (struct net_device *)__opaque;
3188 struct rtl8169_private *tp = netdev_priv(dev);
3189 struct timer_list *timer = &tp->timer; 3245 struct timer_list *timer = &tp->timer;
3190 void __iomem *ioaddr = tp->mmio_addr; 3246 void __iomem *ioaddr = tp->mmio_addr;
3191 unsigned long timeout = RTL8169_PHY_TIMEOUT; 3247 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3192 3248
3193 assert(tp->mac_version > RTL_GIGA_MAC_VER_01); 3249 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3194 3250
3195 spin_lock_irq(&tp->lock);
3196
3197 if (tp->phy_reset_pending(tp)) { 3251 if (tp->phy_reset_pending(tp)) {
3198 /* 3252 /*
3199 * A busy loop could burn quite a few cycles on nowadays CPU. 3253 * A busy loop could burn quite a few cycles on nowadays CPU.
@@ -3204,32 +3258,36 @@ static void rtl8169_phy_timer(unsigned long __opaque)
3204 } 3258 }
3205 3259
3206 if (tp->link_ok(ioaddr)) 3260 if (tp->link_ok(ioaddr))
3207 goto out_unlock; 3261 return;
3208 3262
3209 netif_warn(tp, link, dev, "PHY reset until link up\n"); 3263 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3210 3264
3211 tp->phy_reset_enable(tp); 3265 tp->phy_reset_enable(tp);
3212 3266
3213out_mod_timer: 3267out_mod_timer:
3214 mod_timer(timer, jiffies + timeout); 3268 mod_timer(timer, jiffies + timeout);
3215out_unlock: 3269}
3216 spin_unlock_irq(&tp->lock); 3270
3271static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3272{
3273 if (!test_and_set_bit(flag, tp->wk.flags))
3274 schedule_work(&tp->wk.work);
3275}
3276
3277static void rtl8169_phy_timer(unsigned long __opaque)
3278{
3279 struct net_device *dev = (struct net_device *)__opaque;
3280 struct rtl8169_private *tp = netdev_priv(dev);
3281
3282 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3217} 3283}
3218 3284
3219#ifdef CONFIG_NET_POLL_CONTROLLER 3285#ifdef CONFIG_NET_POLL_CONTROLLER
3220/*
3221 * Polling 'interrupt' - used by things like netconsole to send skbs
3222 * without having to re-enable interrupts. It's not called while
3223 * the interrupt routine is executing.
3224 */
3225static void rtl8169_netpoll(struct net_device *dev) 3286static void rtl8169_netpoll(struct net_device *dev)
3226{ 3287{
3227 struct rtl8169_private *tp = netdev_priv(dev); 3288 struct rtl8169_private *tp = netdev_priv(dev);
3228 struct pci_dev *pdev = tp->pci_dev;
3229 3289
3230 disable_irq(pdev->irq); 3290 rtl8169_interrupt(tp->pci_dev->irq, dev);
3231 rtl8169_interrupt(pdev->irq, dev);
3232 enable_irq(pdev->irq);
3233} 3291}
3234#endif 3292#endif
3235 3293
@@ -3310,7 +3368,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3310 low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24); 3368 low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
3311 high = addr[4] | (addr[5] << 8); 3369 high = addr[4] | (addr[5] << 8);
3312 3370
3313 spin_lock_irq(&tp->lock); 3371 rtl_lock_work(tp);
3314 3372
3315 RTL_W8(Cfg9346, Cfg9346_Unlock); 3373 RTL_W8(Cfg9346, Cfg9346_Unlock);
3316 3374
@@ -3334,7 +3392,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3334 3392
3335 RTL_W8(Cfg9346, Cfg9346_Lock); 3393 RTL_W8(Cfg9346, Cfg9346_Lock);
3336 3394
3337 spin_unlock_irq(&tp->lock); 3395 rtl_unlock_work(tp);
3338} 3396}
3339 3397
3340static int rtl_set_mac_address(struct net_device *dev, void *p) 3398static int rtl_set_mac_address(struct net_device *dev, void *p)
@@ -3388,8 +3446,7 @@ static const struct rtl_cfg_info {
3388 void (*hw_start)(struct net_device *); 3446 void (*hw_start)(struct net_device *);
3389 unsigned int region; 3447 unsigned int region;
3390 unsigned int align; 3448 unsigned int align;
3391 u16 intr_event; 3449 u16 event_slow;
3392 u16 napi_event;
3393 unsigned features; 3450 unsigned features;
3394 u8 default_ver; 3451 u8 default_ver;
3395} rtl_cfg_infos [] = { 3452} rtl_cfg_infos [] = {
@@ -3397,9 +3454,7 @@ static const struct rtl_cfg_info {
3397 .hw_start = rtl_hw_start_8169, 3454 .hw_start = rtl_hw_start_8169,
3398 .region = 1, 3455 .region = 1,
3399 .align = 0, 3456 .align = 0,
3400 .intr_event = SYSErr | LinkChg | RxOverflow | 3457 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
3401 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
3402 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
3403 .features = RTL_FEATURE_GMII, 3458 .features = RTL_FEATURE_GMII,
3404 .default_ver = RTL_GIGA_MAC_VER_01, 3459 .default_ver = RTL_GIGA_MAC_VER_01,
3405 }, 3460 },
@@ -3407,9 +3462,7 @@ static const struct rtl_cfg_info {
3407 .hw_start = rtl_hw_start_8168, 3462 .hw_start = rtl_hw_start_8168,
3408 .region = 2, 3463 .region = 2,
3409 .align = 8, 3464 .align = 8,
3410 .intr_event = SYSErr | LinkChg | RxOverflow | 3465 .event_slow = SYSErr | LinkChg | RxOverflow,
3411 TxErr | TxOK | RxOK | RxErr,
3412 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
3413 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, 3466 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
3414 .default_ver = RTL_GIGA_MAC_VER_11, 3467 .default_ver = RTL_GIGA_MAC_VER_11,
3415 }, 3468 },
@@ -3417,9 +3470,8 @@ static const struct rtl_cfg_info {
3417 .hw_start = rtl_hw_start_8101, 3470 .hw_start = rtl_hw_start_8101,
3418 .region = 2, 3471 .region = 2,
3419 .align = 8, 3472 .align = 8,
3420 .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout | 3473 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
3421 RxFIFOOver | TxErr | TxOK | RxOK | RxErr, 3474 PCSTimeout,
3422 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
3423 .features = RTL_FEATURE_MSI, 3475 .features = RTL_FEATURE_MSI,
3424 .default_ver = RTL_GIGA_MAC_VER_13, 3476 .default_ver = RTL_GIGA_MAC_VER_13,
3425 } 3477 }
@@ -3824,23 +3876,21 @@ static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
3824static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) 3876static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
3825{ 3877{
3826 void __iomem *ioaddr = tp->mmio_addr; 3878 void __iomem *ioaddr = tp->mmio_addr;
3827 struct pci_dev *pdev = tp->pci_dev;
3828 3879
3829 RTL_W8(MaxTxPacketSize, 0x3f); 3880 RTL_W8(MaxTxPacketSize, 0x3f);
3830 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); 3881 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
3831 RTL_W8(Config4, RTL_R8(Config4) | 0x01); 3882 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
3832 pci_write_config_byte(pdev, 0x79, 0x20); 3883 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
3833} 3884}
3834 3885
3835static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) 3886static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
3836{ 3887{
3837 void __iomem *ioaddr = tp->mmio_addr; 3888 void __iomem *ioaddr = tp->mmio_addr;
3838 struct pci_dev *pdev = tp->pci_dev;
3839 3889
3840 RTL_W8(MaxTxPacketSize, 0x0c); 3890 RTL_W8(MaxTxPacketSize, 0x0c);
3841 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); 3891 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
3842 RTL_W8(Config4, RTL_R8(Config4) & ~0x01); 3892 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
3843 pci_write_config_byte(pdev, 0x79, 0x50); 3893 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
3844} 3894}
3845 3895
3846static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) 3896static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
@@ -3958,8 +4008,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3958 4008
3959 dev = alloc_etherdev(sizeof (*tp)); 4009 dev = alloc_etherdev(sizeof (*tp));
3960 if (!dev) { 4010 if (!dev) {
3961 if (netif_msg_drv(&debug))
3962 dev_err(&pdev->dev, "unable to alloc new ethernet\n");
3963 rc = -ENOMEM; 4011 rc = -ENOMEM;
3964 goto out; 4012 goto out;
3965 } 4013 }
@@ -4048,11 +4096,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4048 4096
4049 rtl_init_rxcfg(tp); 4097 rtl_init_rxcfg(tp);
4050 4098
4051 RTL_W16(IntrMask, 0x0000); 4099 rtl_irq_disable(tp);
4052 4100
4053 rtl_hw_reset(tp); 4101 rtl_hw_reset(tp);
4054 4102
4055 RTL_W16(IntrStatus, 0xffff); 4103 rtl_ack_events(tp, 0xffff);
4056 4104
4057 pci_set_master(pdev); 4105 pci_set_master(pdev);
4058 4106
@@ -4098,7 +4146,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4098 tp->do_ioctl = rtl_xmii_ioctl; 4146 tp->do_ioctl = rtl_xmii_ioctl;
4099 } 4147 }
4100 4148
4101 spin_lock_init(&tp->lock); 4149 mutex_init(&tp->wk.mutex);
4102 4150
4103 /* Get MAC address */ 4151 /* Get MAC address */
4104 for (i = 0; i < ETH_ALEN; i++) 4152 for (i = 0; i < ETH_ALEN; i++)
@@ -4126,10 +4174,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4126 /* 8110SCd requires hardware Rx VLAN - disallow toggling */ 4174 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
4127 dev->hw_features &= ~NETIF_F_HW_VLAN_RX; 4175 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
4128 4176
4129 tp->intr_mask = 0xffff;
4130 tp->hw_start = cfg->hw_start; 4177 tp->hw_start = cfg->hw_start;
4131 tp->intr_event = cfg->intr_event; 4178 tp->event_slow = cfg->event_slow;
4132 tp->napi_event = cfg->napi_event;
4133 4179
4134 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? 4180 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
4135 ~(RxBOVF | RxFOVF) : ~0; 4181 ~(RxBOVF | RxFOVF) : ~0;
@@ -4196,7 +4242,7 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
4196 rtl8168_driver_stop(tp); 4242 rtl8168_driver_stop(tp);
4197 } 4243 }
4198 4244
4199 cancel_delayed_work_sync(&tp->task); 4245 cancel_work_sync(&tp->wk.work);
4200 4246
4201 unregister_netdev(dev); 4247 unregister_netdev(dev);
4202 4248
@@ -4257,6 +4303,8 @@ static void rtl_request_firmware(struct rtl8169_private *tp)
4257 rtl_request_uncached_firmware(tp); 4303 rtl_request_uncached_firmware(tp);
4258} 4304}
4259 4305
4306static void rtl_task(struct work_struct *);
4307
4260static int rtl8169_open(struct net_device *dev) 4308static int rtl8169_open(struct net_device *dev)
4261{ 4309{
4262 struct rtl8169_private *tp = netdev_priv(dev); 4310 struct rtl8169_private *tp = netdev_priv(dev);
@@ -4284,7 +4332,7 @@ static int rtl8169_open(struct net_device *dev)
4284 if (retval < 0) 4332 if (retval < 0)
4285 goto err_free_rx_1; 4333 goto err_free_rx_1;
4286 4334
4287 INIT_DELAYED_WORK(&tp->task, NULL); 4335 INIT_WORK(&tp->wk.work, rtl_task);
4288 4336
4289 smp_mb(); 4337 smp_mb();
4290 4338
@@ -4296,16 +4344,24 @@ static int rtl8169_open(struct net_device *dev)
4296 if (retval < 0) 4344 if (retval < 0)
4297 goto err_release_fw_2; 4345 goto err_release_fw_2;
4298 4346
4347 rtl_lock_work(tp);
4348
4349 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
4350
4299 napi_enable(&tp->napi); 4351 napi_enable(&tp->napi);
4300 4352
4301 rtl8169_init_phy(dev, tp); 4353 rtl8169_init_phy(dev, tp);
4302 4354
4303 rtl8169_set_features(dev, dev->features); 4355 __rtl8169_set_features(dev, dev->features);
4304 4356
4305 rtl_pll_power_up(tp); 4357 rtl_pll_power_up(tp);
4306 4358
4307 rtl_hw_start(dev); 4359 rtl_hw_start(dev);
4308 4360
4361 netif_start_queue(dev);
4362
4363 rtl_unlock_work(tp);
4364
4309 tp->saved_wolopts = 0; 4365 tp->saved_wolopts = 0;
4310 pm_runtime_put_noidle(&pdev->dev); 4366 pm_runtime_put_noidle(&pdev->dev);
4311 4367
@@ -4379,7 +4435,7 @@ static void rtl_hw_start(struct net_device *dev)
4379 4435
4380 tp->hw_start(dev); 4436 tp->hw_start(dev);
4381 4437
4382 netif_start_queue(dev); 4438 rtl_irq_enable_all(tp);
4383} 4439}
4384 4440
4385static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp, 4441static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
@@ -4506,9 +4562,6 @@ static void rtl_hw_start_8169(struct net_device *dev)
4506 4562
4507 /* no early-rx interrupts */ 4563 /* no early-rx interrupts */
4508 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); 4564 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4509
4510 /* Enable all known interrupts by setting the interrupt mask. */
4511 RTL_W16(IntrMask, tp->intr_event);
4512} 4565}
4513 4566
4514static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits) 4567static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
@@ -4888,8 +4941,8 @@ static void rtl_hw_start_8168(struct net_device *dev)
4888 4941
4889 /* Work around for RxFIFO overflow. */ 4942 /* Work around for RxFIFO overflow. */
4890 if (tp->mac_version == RTL_GIGA_MAC_VER_11) { 4943 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
4891 tp->intr_event |= RxFIFOOver | PCSTimeout; 4944 tp->event_slow |= RxFIFOOver | PCSTimeout;
4892 tp->intr_event &= ~RxOverflow; 4945 tp->event_slow &= ~RxOverflow;
4893 } 4946 }
4894 4947
4895 rtl_set_rx_tx_desc_registers(tp, ioaddr); 4948 rtl_set_rx_tx_desc_registers(tp, ioaddr);
@@ -4977,8 +5030,6 @@ static void rtl_hw_start_8168(struct net_device *dev)
4977 RTL_W8(Cfg9346, Cfg9346_Lock); 5030 RTL_W8(Cfg9346, Cfg9346_Lock);
4978 5031
4979 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); 5032 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4980
4981 RTL_W16(IntrMask, tp->intr_event);
4982} 5033}
4983 5034
4984#define R810X_CPCMD_QUIRK_MASK (\ 5035#define R810X_CPCMD_QUIRK_MASK (\
@@ -5077,10 +5128,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
5077 void __iomem *ioaddr = tp->mmio_addr; 5128 void __iomem *ioaddr = tp->mmio_addr;
5078 struct pci_dev *pdev = tp->pci_dev; 5129 struct pci_dev *pdev = tp->pci_dev;
5079 5130
5080 if (tp->mac_version >= RTL_GIGA_MAC_VER_30) { 5131 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5081 tp->intr_event &= ~RxFIFOOver; 5132 tp->event_slow &= ~RxFIFOOver;
5082 tp->napi_event &= ~RxFIFOOver;
5083 }
5084 5133
5085 if (tp->mac_version == RTL_GIGA_MAC_VER_13 || 5134 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5086 tp->mac_version == RTL_GIGA_MAC_VER_16) { 5135 tp->mac_version == RTL_GIGA_MAC_VER_16) {
@@ -5136,8 +5185,6 @@ static void rtl_hw_start_8101(struct net_device *dev)
5136 rtl_set_rx_mode(dev); 5185 rtl_set_rx_mode(dev);
5137 5186
5138 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); 5187 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5139
5140 RTL_W16(IntrMask, tp->intr_event);
5141} 5188}
5142 5189
5143static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) 5190static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
@@ -5330,92 +5377,34 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
5330 tp->cur_tx = tp->dirty_tx = 0; 5377 tp->cur_tx = tp->dirty_tx = 0;
5331} 5378}
5332 5379
5333static void rtl8169_schedule_work(struct net_device *dev, work_func_t task) 5380static void rtl_reset_work(struct rtl8169_private *tp)
5334{
5335 struct rtl8169_private *tp = netdev_priv(dev);
5336
5337 PREPARE_DELAYED_WORK(&tp->task, task);
5338 schedule_delayed_work(&tp->task, 4);
5339}
5340
5341static void rtl8169_wait_for_quiescence(struct net_device *dev)
5342{
5343 struct rtl8169_private *tp = netdev_priv(dev);
5344 void __iomem *ioaddr = tp->mmio_addr;
5345
5346 synchronize_irq(dev->irq);
5347
5348 /* Wait for any pending NAPI task to complete */
5349 napi_disable(&tp->napi);
5350
5351 rtl8169_irq_mask_and_ack(tp);
5352
5353 tp->intr_mask = 0xffff;
5354 RTL_W16(IntrMask, tp->intr_event);
5355 napi_enable(&tp->napi);
5356}
5357
5358static void rtl8169_reinit_task(struct work_struct *work)
5359{ 5381{
5360 struct rtl8169_private *tp =
5361 container_of(work, struct rtl8169_private, task.work);
5362 struct net_device *dev = tp->dev;
5363 int ret;
5364
5365 rtnl_lock();
5366
5367 if (!netif_running(dev))
5368 goto out_unlock;
5369
5370 rtl8169_wait_for_quiescence(dev);
5371 rtl8169_close(dev);
5372
5373 ret = rtl8169_open(dev);
5374 if (unlikely(ret < 0)) {
5375 if (net_ratelimit())
5376 netif_err(tp, drv, dev,
5377 "reinit failure (status = %d). Rescheduling\n",
5378 ret);
5379 rtl8169_schedule_work(dev, rtl8169_reinit_task);
5380 }
5381
5382out_unlock:
5383 rtnl_unlock();
5384}
5385
5386static void rtl8169_reset_task(struct work_struct *work)
5387{
5388 struct rtl8169_private *tp =
5389 container_of(work, struct rtl8169_private, task.work);
5390 struct net_device *dev = tp->dev; 5382 struct net_device *dev = tp->dev;
5391 int i; 5383 int i;
5392 5384
5393 rtnl_lock(); 5385 napi_disable(&tp->napi);
5394 5386 netif_stop_queue(dev);
5395 if (!netif_running(dev)) 5387 synchronize_sched();
5396 goto out_unlock;
5397 5388
5398 rtl8169_hw_reset(tp); 5389 rtl8169_hw_reset(tp);
5399 5390
5400 rtl8169_wait_for_quiescence(dev);
5401
5402 for (i = 0; i < NUM_RX_DESC; i++) 5391 for (i = 0; i < NUM_RX_DESC; i++)
5403 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); 5392 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5404 5393
5405 rtl8169_tx_clear(tp); 5394 rtl8169_tx_clear(tp);
5406 rtl8169_init_ring_indexes(tp); 5395 rtl8169_init_ring_indexes(tp);
5407 5396
5397 napi_enable(&tp->napi);
5408 rtl_hw_start(dev); 5398 rtl_hw_start(dev);
5409 netif_wake_queue(dev); 5399 netif_wake_queue(dev);
5410 rtl8169_check_link_status(dev, tp, tp->mmio_addr); 5400 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5411
5412out_unlock:
5413 rtnl_unlock();
5414} 5401}
5415 5402
5416static void rtl8169_tx_timeout(struct net_device *dev) 5403static void rtl8169_tx_timeout(struct net_device *dev)
5417{ 5404{
5418 rtl8169_schedule_work(dev, rtl8169_reset_task); 5405 struct rtl8169_private *tp = netdev_priv(dev);
5406
5407 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5419} 5408}
5420 5409
5421static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, 5410static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
@@ -5552,9 +5541,22 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5552 5541
5553 RTL_W8(TxPoll, NPQ); 5542 RTL_W8(TxPoll, NPQ);
5554 5543
5544 mmiowb();
5545
5555 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) { 5546 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
5547 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5548 * not miss a ring update when it notices a stopped queue.
5549 */
5550 smp_wmb();
5556 netif_stop_queue(dev); 5551 netif_stop_queue(dev);
5557 smp_rmb(); 5552 /* Sync with rtl_tx:
5553 * - publish queue status and cur_tx ring index (write barrier)
5554 * - refresh dirty_tx ring index (read barrier).
5555 * May the current thread have a pessimistic view of the ring
5556 * status and forget to wake up queue, a racing rtl_tx thread
5557 * can't.
5558 */
5559 smp_mb();
5558 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS) 5560 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
5559 netif_wake_queue(dev); 5561 netif_wake_queue(dev);
5560 } 5562 }
@@ -5618,12 +5620,10 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
5618 5620
5619 rtl8169_hw_reset(tp); 5621 rtl8169_hw_reset(tp);
5620 5622
5621 rtl8169_schedule_work(dev, rtl8169_reinit_task); 5623 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5622} 5624}
5623 5625
5624static void rtl8169_tx_interrupt(struct net_device *dev, 5626static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5625 struct rtl8169_private *tp,
5626 void __iomem *ioaddr)
5627{ 5627{
5628 unsigned int dirty_tx, tx_left; 5628 unsigned int dirty_tx, tx_left;
5629 5629
@@ -5655,7 +5655,14 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
5655 5655
5656 if (tp->dirty_tx != dirty_tx) { 5656 if (tp->dirty_tx != dirty_tx) {
5657 tp->dirty_tx = dirty_tx; 5657 tp->dirty_tx = dirty_tx;
5658 smp_wmb(); 5658 /* Sync with rtl8169_start_xmit:
5659 * - publish dirty_tx ring index (write barrier)
5660 * - refresh cur_tx ring index and queue status (read barrier)
5661 * May the current thread miss the stopped queue condition,
5662 * a racing xmit thread can only have a right view of the
5663 * ring status.
5664 */
5665 smp_mb();
5659 if (netif_queue_stopped(dev) && 5666 if (netif_queue_stopped(dev) &&
5660 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { 5667 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
5661 netif_wake_queue(dev); 5668 netif_wake_queue(dev);
@@ -5666,9 +5673,11 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
5666 * of start_xmit activity is detected (if it is not detected, 5673 * of start_xmit activity is detected (if it is not detected,
5667 * it is slow enough). -- FR 5674 * it is slow enough). -- FR
5668 */ 5675 */
5669 smp_rmb(); 5676 if (tp->cur_tx != dirty_tx) {
5670 if (tp->cur_tx != dirty_tx) 5677 void __iomem *ioaddr = tp->mmio_addr;
5678
5671 RTL_W8(TxPoll, NPQ); 5679 RTL_W8(TxPoll, NPQ);
5680 }
5672 } 5681 }
5673} 5682}
5674 5683
@@ -5707,9 +5716,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
5707 return skb; 5716 return skb;
5708} 5717}
5709 5718
5710static int rtl8169_rx_interrupt(struct net_device *dev, 5719static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
5711 struct rtl8169_private *tp,
5712 void __iomem *ioaddr, u32 budget)
5713{ 5720{
5714 unsigned int cur_rx, rx_left; 5721 unsigned int cur_rx, rx_left;
5715 unsigned int count; 5722 unsigned int count;
@@ -5737,7 +5744,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
5737 if (status & RxCRC) 5744 if (status & RxCRC)
5738 dev->stats.rx_crc_errors++; 5745 dev->stats.rx_crc_errors++;
5739 if (status & RxFOVF) { 5746 if (status & RxFOVF) {
5740 rtl8169_schedule_work(dev, rtl8169_reset_task); 5747 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5741 dev->stats.rx_fifo_errors++; 5748 dev->stats.rx_fifo_errors++;
5742 } 5749 }
5743 rtl8169_mark_to_asic(desc, rx_buf_sz); 5750 rtl8169_mark_to_asic(desc, rx_buf_sz);
@@ -5798,101 +5805,120 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
5798{ 5805{
5799 struct net_device *dev = dev_instance; 5806 struct net_device *dev = dev_instance;
5800 struct rtl8169_private *tp = netdev_priv(dev); 5807 struct rtl8169_private *tp = netdev_priv(dev);
5801 void __iomem *ioaddr = tp->mmio_addr;
5802 int handled = 0; 5808 int handled = 0;
5803 int status; 5809 u16 status;
5804 5810
5805 /* loop handling interrupts until we have no new ones or 5811 status = rtl_get_events(tp);
5806 * we hit a invalid/hotplug case. 5812 if (status && status != 0xffff) {
5807 */ 5813 status &= RTL_EVENT_NAPI | tp->event_slow;
5808 status = RTL_R16(IntrStatus); 5814 if (status) {
5809 while (status && status != 0xffff) { 5815 handled = 1;
5810 status &= tp->intr_event;
5811 if (!status)
5812 break;
5813 5816
5814 handled = 1; 5817 rtl_irq_disable(tp);
5818 napi_schedule(&tp->napi);
5819 }
5820 }
5821 return IRQ_RETVAL(handled);
5822}
5815 5823
5816 /* Handle all of the error cases first. These will reset 5824/*
5817 * the chip, so just exit the loop. 5825 * Workqueue context.
5818 */ 5826 */
5819 if (unlikely(!netif_running(dev))) { 5827static void rtl_slow_event_work(struct rtl8169_private *tp)
5820 rtl8169_hw_reset(tp); 5828{
5829 struct net_device *dev = tp->dev;
5830 u16 status;
5831
5832 status = rtl_get_events(tp) & tp->event_slow;
5833 rtl_ack_events(tp, status);
5834
5835 if (unlikely(status & RxFIFOOver)) {
5836 switch (tp->mac_version) {
5837 /* Work around for rx fifo overflow */
5838 case RTL_GIGA_MAC_VER_11:
5839 netif_stop_queue(dev);
5840 /* XXX - Hack alert. See rtl_task(). */
5841 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
5842 default:
5821 break; 5843 break;
5822 } 5844 }
5845 }
5823 5846
5824 if (unlikely(status & RxFIFOOver)) { 5847 if (unlikely(status & SYSErr))
5825 switch (tp->mac_version) { 5848 rtl8169_pcierr_interrupt(dev);
5826 /* Work around for rx fifo overflow */
5827 case RTL_GIGA_MAC_VER_11:
5828 netif_stop_queue(dev);
5829 rtl8169_tx_timeout(dev);
5830 goto done;
5831 default:
5832 break;
5833 }
5834 }
5835 5849
5836 if (unlikely(status & SYSErr)) { 5850 if (status & LinkChg)
5837 rtl8169_pcierr_interrupt(dev); 5851 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
5838 break;
5839 }
5840 5852
5841 if (status & LinkChg) 5853 napi_disable(&tp->napi);
5842 __rtl8169_check_link_status(dev, tp, ioaddr, true); 5854 rtl_irq_disable(tp);
5843 5855
5844 /* We need to see the lastest version of tp->intr_mask to 5856 napi_enable(&tp->napi);
5845 * avoid ignoring an MSI interrupt and having to wait for 5857 napi_schedule(&tp->napi);
5846 * another event which may never come. 5858}
5847 */
5848 smp_rmb();
5849 if (status & tp->intr_mask & tp->napi_event) {
5850 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
5851 tp->intr_mask = ~tp->napi_event;
5852
5853 if (likely(napi_schedule_prep(&tp->napi)))
5854 __napi_schedule(&tp->napi);
5855 else
5856 netif_info(tp, intr, dev,
5857 "interrupt %04x in poll\n", status);
5858 }
5859 5859
5860 /* We only get a new MSI interrupt when all active irq 5860static void rtl_task(struct work_struct *work)
5861 * sources on the chip have been acknowledged. So, ack 5861{
5862 * everything we've seen and check if new sources have become 5862 static const struct {
5863 * active to avoid blocking all interrupts from the chip. 5863 int bitnr;
5864 */ 5864 void (*action)(struct rtl8169_private *);
5865 RTL_W16(IntrStatus, 5865 } rtl_work[] = {
5866 (status & RxFIFOOver) ? (status | RxOverflow) : status); 5866 /* XXX - keep rtl_slow_event_work() as first element. */
5867 status = RTL_R16(IntrStatus); 5867 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
5868 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
5869 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
5870 };
5871 struct rtl8169_private *tp =
5872 container_of(work, struct rtl8169_private, wk.work);
5873 struct net_device *dev = tp->dev;
5874 int i;
5875
5876 rtl_lock_work(tp);
5877
5878 if (!netif_running(dev) ||
5879 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
5880 goto out_unlock;
5881
5882 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
5883 bool pending;
5884
5885 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
5886 if (pending)
5887 rtl_work[i].action(tp);
5868 } 5888 }
5869done: 5889
5870 return IRQ_RETVAL(handled); 5890out_unlock:
5891 rtl_unlock_work(tp);
5871} 5892}
5872 5893
5873static int rtl8169_poll(struct napi_struct *napi, int budget) 5894static int rtl8169_poll(struct napi_struct *napi, int budget)
5874{ 5895{
5875 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); 5896 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
5876 struct net_device *dev = tp->dev; 5897 struct net_device *dev = tp->dev;
5877 void __iomem *ioaddr = tp->mmio_addr; 5898 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
5878 int work_done; 5899 int work_done= 0;
5900 u16 status;
5901
5902 status = rtl_get_events(tp);
5903 rtl_ack_events(tp, status & ~tp->event_slow);
5879 5904
5880 work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget); 5905 if (status & RTL_EVENT_NAPI_RX)
5881 rtl8169_tx_interrupt(dev, tp, ioaddr); 5906 work_done = rtl_rx(dev, tp, (u32) budget);
5907
5908 if (status & RTL_EVENT_NAPI_TX)
5909 rtl_tx(dev, tp);
5910
5911 if (status & tp->event_slow) {
5912 enable_mask &= ~tp->event_slow;
5913
5914 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
5915 }
5882 5916
5883 if (work_done < budget) { 5917 if (work_done < budget) {
5884 napi_complete(napi); 5918 napi_complete(napi);
5885 5919
5886 /* We need for force the visibility of tp->intr_mask 5920 rtl_irq_enable(tp, enable_mask);
5887 * for other CPUs, as we can loose an MSI interrupt 5921 mmiowb();
5888 * and potentially wait for a retransmit timeout if we don't.
5889 * The posted write to IntrMask is safe, as it will
5890 * eventually make it to the chip and we won't loose anything
5891 * until it does.
5892 */
5893 tp->intr_mask = 0xffff;
5894 wmb();
5895 RTL_W16(IntrMask, tp->intr_event);
5896 } 5922 }
5897 5923
5898 return work_done; 5924 return work_done;
@@ -5916,26 +5942,19 @@ static void rtl8169_down(struct net_device *dev)
5916 5942
5917 del_timer_sync(&tp->timer); 5943 del_timer_sync(&tp->timer);
5918 5944
5919 netif_stop_queue(dev);
5920
5921 napi_disable(&tp->napi); 5945 napi_disable(&tp->napi);
5922 5946 netif_stop_queue(dev);
5923 spin_lock_irq(&tp->lock);
5924 5947
5925 rtl8169_hw_reset(tp); 5948 rtl8169_hw_reset(tp);
5926 /* 5949 /*
5927 * At this point device interrupts can not be enabled in any function, 5950 * At this point device interrupts can not be enabled in any function,
5928 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task, 5951 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
5929 * rtl8169_reinit_task) and napi is disabled (rtl8169_poll). 5952 * and napi is disabled (rtl8169_poll).
5930 */ 5953 */
5931 rtl8169_rx_missed(dev, ioaddr); 5954 rtl8169_rx_missed(dev, ioaddr);
5932 5955
5933 spin_unlock_irq(&tp->lock);
5934
5935 synchronize_irq(dev->irq);
5936
5937 /* Give a racing hard_start_xmit a few cycles to complete. */ 5956 /* Give a racing hard_start_xmit a few cycles to complete. */
5938 synchronize_sched(); /* FIXME: should this be synchronize_irq()? */ 5957 synchronize_sched();
5939 5958
5940 rtl8169_tx_clear(tp); 5959 rtl8169_tx_clear(tp);
5941 5960
@@ -5954,7 +5973,11 @@ static int rtl8169_close(struct net_device *dev)
5954 /* Update counters before going down */ 5973 /* Update counters before going down */
5955 rtl8169_update_counters(dev); 5974 rtl8169_update_counters(dev);
5956 5975
5976 rtl_lock_work(tp);
5977 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
5978
5957 rtl8169_down(dev); 5979 rtl8169_down(dev);
5980 rtl_unlock_work(tp);
5958 5981
5959 free_irq(dev->irq, dev); 5982 free_irq(dev->irq, dev);
5960 5983
@@ -5974,7 +5997,6 @@ static void rtl_set_rx_mode(struct net_device *dev)
5974{ 5997{
5975 struct rtl8169_private *tp = netdev_priv(dev); 5998 struct rtl8169_private *tp = netdev_priv(dev);
5976 void __iomem *ioaddr = tp->mmio_addr; 5999 void __iomem *ioaddr = tp->mmio_addr;
5977 unsigned long flags;
5978 u32 mc_filter[2]; /* Multicast hash filter */ 6000 u32 mc_filter[2]; /* Multicast hash filter */
5979 int rx_mode; 6001 int rx_mode;
5980 u32 tmp = 0; 6002 u32 tmp = 0;
@@ -6003,8 +6025,6 @@ static void rtl_set_rx_mode(struct net_device *dev)
6003 } 6025 }
6004 } 6026 }
6005 6027
6006 spin_lock_irqsave(&tp->lock, flags);
6007
6008 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; 6028 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
6009 6029
6010 if (tp->mac_version > RTL_GIGA_MAC_VER_06) { 6030 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
@@ -6018,8 +6038,6 @@ static void rtl_set_rx_mode(struct net_device *dev)
6018 RTL_W32(MAR0 + 0, mc_filter[0]); 6038 RTL_W32(MAR0 + 0, mc_filter[0]);
6019 6039
6020 RTL_W32(RxConfig, tmp); 6040 RTL_W32(RxConfig, tmp);
6021
6022 spin_unlock_irqrestore(&tp->lock, flags);
6023} 6041}
6024 6042
6025/** 6043/**
@@ -6032,13 +6050,9 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
6032{ 6050{
6033 struct rtl8169_private *tp = netdev_priv(dev); 6051 struct rtl8169_private *tp = netdev_priv(dev);
6034 void __iomem *ioaddr = tp->mmio_addr; 6052 void __iomem *ioaddr = tp->mmio_addr;
6035 unsigned long flags;
6036 6053
6037 if (netif_running(dev)) { 6054 if (netif_running(dev))
6038 spin_lock_irqsave(&tp->lock, flags);
6039 rtl8169_rx_missed(dev, ioaddr); 6055 rtl8169_rx_missed(dev, ioaddr);
6040 spin_unlock_irqrestore(&tp->lock, flags);
6041 }
6042 6056
6043 return &dev->stats; 6057 return &dev->stats;
6044} 6058}
@@ -6050,10 +6064,15 @@ static void rtl8169_net_suspend(struct net_device *dev)
6050 if (!netif_running(dev)) 6064 if (!netif_running(dev))
6051 return; 6065 return;
6052 6066
6053 rtl_pll_power_down(tp);
6054
6055 netif_device_detach(dev); 6067 netif_device_detach(dev);
6056 netif_stop_queue(dev); 6068 netif_stop_queue(dev);
6069
6070 rtl_lock_work(tp);
6071 napi_disable(&tp->napi);
6072 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6073 rtl_unlock_work(tp);
6074
6075 rtl_pll_power_down(tp);
6057} 6076}
6058 6077
6059#ifdef CONFIG_PM 6078#ifdef CONFIG_PM
@@ -6076,7 +6095,9 @@ static void __rtl8169_resume(struct net_device *dev)
6076 6095
6077 rtl_pll_power_up(tp); 6096 rtl_pll_power_up(tp);
6078 6097
6079 rtl8169_schedule_work(dev, rtl8169_reset_task); 6098 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6099
6100 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6080} 6101}
6081 6102
6082static int rtl8169_resume(struct device *device) 6103static int rtl8169_resume(struct device *device)
@@ -6102,10 +6123,10 @@ static int rtl8169_runtime_suspend(struct device *device)
6102 if (!tp->TxDescArray) 6123 if (!tp->TxDescArray)
6103 return 0; 6124 return 0;
6104 6125
6105 spin_lock_irq(&tp->lock); 6126 rtl_lock_work(tp);
6106 tp->saved_wolopts = __rtl8169_get_wol(tp); 6127 tp->saved_wolopts = __rtl8169_get_wol(tp);
6107 __rtl8169_set_wol(tp, WAKE_ANY); 6128 __rtl8169_set_wol(tp, WAKE_ANY);
6108 spin_unlock_irq(&tp->lock); 6129 rtl_unlock_work(tp);
6109 6130
6110 rtl8169_net_suspend(dev); 6131 rtl8169_net_suspend(dev);
6111 6132
@@ -6121,10 +6142,10 @@ static int rtl8169_runtime_resume(struct device *device)
6121 if (!tp->TxDescArray) 6142 if (!tp->TxDescArray)
6122 return 0; 6143 return 0;
6123 6144
6124 spin_lock_irq(&tp->lock); 6145 rtl_lock_work(tp);
6125 __rtl8169_set_wol(tp, tp->saved_wolopts); 6146 __rtl8169_set_wol(tp, tp->saved_wolopts);
6126 tp->saved_wolopts = 0; 6147 tp->saved_wolopts = 0;
6127 spin_unlock_irq(&tp->lock); 6148 rtl_unlock_work(tp);
6128 6149
6129 rtl8169_init_phy(dev, tp); 6150 rtl8169_init_phy(dev, tp);
6130 6151
@@ -6192,12 +6213,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
6192 /* Restore original MAC address */ 6213 /* Restore original MAC address */
6193 rtl_rar_set(tp, dev->perm_addr); 6214 rtl_rar_set(tp, dev->perm_addr);
6194 6215
6195 spin_lock_irq(&tp->lock);
6196
6197 rtl8169_hw_reset(tp); 6216 rtl8169_hw_reset(tp);
6198 6217
6199 spin_unlock_irq(&tp->lock);
6200
6201 if (system_state == SYSTEM_POWER_OFF) { 6218 if (system_state == SYSTEM_POWER_OFF) {
6202 if (__rtl8169_get_wol(tp) & WAKE_ANY) { 6219 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6203 rtl_wol_suspend_quirk(tp); 6220 rtl_wol_suspend_quirk(tp);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 813d41c4a845..1cb5a34d5779 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1792,7 +1792,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1792 1792
1793 ndev = alloc_etherdev(sizeof(struct sh_eth_private)); 1793 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
1794 if (!ndev) { 1794 if (!ndev) {
1795 dev_err(&pdev->dev, "Could not allocate device.\n");
1796 ret = -ENOMEM; 1795 ret = -ENOMEM;
1797 goto out; 1796 goto out;
1798 } 1797 }
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 22e9c0181ce8..bee97033167d 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -960,11 +960,11 @@ static int __devinit s6gmac_probe(struct platform_device *pdev)
960 int res; 960 int res;
961 unsigned long i; 961 unsigned long i;
962 struct mii_bus *mb; 962 struct mii_bus *mb;
963
963 dev = alloc_etherdev(sizeof(*pd)); 964 dev = alloc_etherdev(sizeof(*pd));
964 if (!dev) { 965 if (!dev)
965 printk(KERN_ERR DRV_PRMT "etherdev alloc failed, aborting.\n");
966 return -ENOMEM; 966 return -ENOMEM;
967 } 967
968 dev->open = s6gmac_open; 968 dev->open = s6gmac_open;
969 dev->stop = s6gmac_stop; 969 dev->stop = s6gmac_stop;
970 dev->hard_start_xmit = s6gmac_tx; 970 dev->hard_start_xmit = s6gmac_tx;
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index f955a19eb22f..bb8c8222122b 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -733,7 +733,6 @@ static int __devinit sgiseeq_probe(struct platform_device *pdev)
733 733
734 dev = alloc_etherdev(sizeof (struct sgiseeq_private)); 734 dev = alloc_etherdev(sizeof (struct sgiseeq_private));
735 if (!dev) { 735 if (!dev) {
736 printk(KERN_ERR "Sgiseeq: Etherdev alloc failed, aborting.\n");
737 err = -ENOMEM; 736 err = -ENOMEM;
738 goto err_out; 737 goto err_out;
739 } 738 }
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 5d18841f0f3d..8d423544a7e6 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -16,6 +16,13 @@ config SFC_MTD
16 depends on SFC && MTD && !(SFC=y && MTD=m) 16 depends on SFC && MTD && !(SFC=y && MTD=m)
17 default y 17 default y
18 ---help--- 18 ---help---
19 This exposes the on-board flash memory as MTD devices (e.g. 19 This exposes the on-board flash and/or EEPROM as MTD devices
20 /dev/mtd1). This makes it possible to upload new firmware 20 (e.g. /dev/mtd1). This is required to update the firmware or
21 to the NIC. 21 the boot configuration under Linux.
22config SFC_MCDI_MON
23 bool "Solarflare SFC9000-family hwmon support"
24 depends on SFC && HWMON && !(SFC=y && HWMON=m)
25 default y
26 ----help---
27 This exposes the on-board firmware-managed sensors as a
28 hardware monitor device.
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index ab31c7124db1..3fa2e25ccc45 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -2,7 +2,7 @@ sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
2 falcon_xmac.o mcdi_mac.o \ 2 falcon_xmac.o mcdi_mac.o \
3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ 3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
4 tenxpress.o txc43128_phy.o falcon_boards.o \ 4 tenxpress.o txc43128_phy.o falcon_boards.o \
5 mcdi.o mcdi_phy.o 5 mcdi.o mcdi_phy.o mcdi_mon.o
6sfc-$(CONFIG_SFC_MTD) += mtd.o 6sfc-$(CONFIG_SFC_MTD) += mtd.o
7 7
8obj-$(CONFIG_SFC) += sfc.o 8obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index 098ac2ad757d..a2a9f40b90cf 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -448,40 +448,40 @@ typedef union efx_oword {
448 EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low)) 448 EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low))
449 449
450#define EFX_SET_OWORD64(oword, low, high, value) do { \ 450#define EFX_SET_OWORD64(oword, low, high, value) do { \
451 (oword).u64[0] = (((oword).u64[0] \ 451 (oword).u64[0] = (((oword).u64[0] \
452 & ~EFX_INPLACE_MASK64(0, 63, low, high)) \ 452 & ~EFX_INPLACE_MASK64(0, 63, low, high)) \
453 | EFX_INSERT64(0, 63, low, high, value)); \ 453 | EFX_INSERT64(0, 63, low, high, value)); \
454 (oword).u64[1] = (((oword).u64[1] \ 454 (oword).u64[1] = (((oword).u64[1] \
455 & ~EFX_INPLACE_MASK64(64, 127, low, high)) \ 455 & ~EFX_INPLACE_MASK64(64, 127, low, high)) \
456 | EFX_INSERT64(64, 127, low, high, value)); \ 456 | EFX_INSERT64(64, 127, low, high, value)); \
457 } while (0) 457 } while (0)
458 458
459#define EFX_SET_QWORD64(qword, low, high, value) do { \ 459#define EFX_SET_QWORD64(qword, low, high, value) do { \
460 (qword).u64[0] = (((qword).u64[0] \ 460 (qword).u64[0] = (((qword).u64[0] \
461 & ~EFX_INPLACE_MASK64(0, 63, low, high)) \ 461 & ~EFX_INPLACE_MASK64(0, 63, low, high)) \
462 | EFX_INSERT64(0, 63, low, high, value)); \ 462 | EFX_INSERT64(0, 63, low, high, value)); \
463 } while (0) 463 } while (0)
464 464
465#define EFX_SET_OWORD32(oword, low, high, value) do { \ 465#define EFX_SET_OWORD32(oword, low, high, value) do { \
466 (oword).u32[0] = (((oword).u32[0] \ 466 (oword).u32[0] = (((oword).u32[0] \
467 & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ 467 & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
468 | EFX_INSERT32(0, 31, low, high, value)); \ 468 | EFX_INSERT32(0, 31, low, high, value)); \
469 (oword).u32[1] = (((oword).u32[1] \ 469 (oword).u32[1] = (((oword).u32[1] \
470 & ~EFX_INPLACE_MASK32(32, 63, low, high)) \ 470 & ~EFX_INPLACE_MASK32(32, 63, low, high)) \
471 | EFX_INSERT32(32, 63, low, high, value)); \ 471 | EFX_INSERT32(32, 63, low, high, value)); \
472 (oword).u32[2] = (((oword).u32[2] \ 472 (oword).u32[2] = (((oword).u32[2] \
473 & ~EFX_INPLACE_MASK32(64, 95, low, high)) \ 473 & ~EFX_INPLACE_MASK32(64, 95, low, high)) \
474 | EFX_INSERT32(64, 95, low, high, value)); \ 474 | EFX_INSERT32(64, 95, low, high, value)); \
475 (oword).u32[3] = (((oword).u32[3] \ 475 (oword).u32[3] = (((oword).u32[3] \
476 & ~EFX_INPLACE_MASK32(96, 127, low, high)) \ 476 & ~EFX_INPLACE_MASK32(96, 127, low, high)) \
477 | EFX_INSERT32(96, 127, low, high, value)); \ 477 | EFX_INSERT32(96, 127, low, high, value)); \
478 } while (0) 478 } while (0)
479 479
480#define EFX_SET_QWORD32(qword, low, high, value) do { \ 480#define EFX_SET_QWORD32(qword, low, high, value) do { \
481 (qword).u32[0] = (((qword).u32[0] \ 481 (qword).u32[0] = (((qword).u32[0] \
482 & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ 482 & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
483 | EFX_INSERT32(0, 31, low, high, value)); \ 483 | EFX_INSERT32(0, 31, low, high, value)); \
484 (qword).u32[1] = (((qword).u32[1] \ 484 (qword).u32[1] = (((qword).u32[1] \
485 & ~EFX_INPLACE_MASK32(32, 63, low, high)) \ 485 & ~EFX_INPLACE_MASK32(32, 63, low, high)) \
486 | EFX_INSERT32(32, 63, low, high, value)); \ 486 | EFX_INSERT32(32, 63, low, high, value)); \
487 } while (0) 487 } while (0)
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index e43702f33b62..952d0bf7695a 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -38,15 +38,15 @@
38 38
39/* Loopback mode names (see LOOPBACK_MODE()) */ 39/* Loopback mode names (see LOOPBACK_MODE()) */
40const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; 40const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
41const char *efx_loopback_mode_names[] = { 41const char *const efx_loopback_mode_names[] = {
42 [LOOPBACK_NONE] = "NONE", 42 [LOOPBACK_NONE] = "NONE",
43 [LOOPBACK_DATA] = "DATAPATH", 43 [LOOPBACK_DATA] = "DATAPATH",
44 [LOOPBACK_GMAC] = "GMAC", 44 [LOOPBACK_GMAC] = "GMAC",
45 [LOOPBACK_XGMII] = "XGMII", 45 [LOOPBACK_XGMII] = "XGMII",
46 [LOOPBACK_XGXS] = "XGXS", 46 [LOOPBACK_XGXS] = "XGXS",
47 [LOOPBACK_XAUI] = "XAUI", 47 [LOOPBACK_XAUI] = "XAUI",
48 [LOOPBACK_GMII] = "GMII", 48 [LOOPBACK_GMII] = "GMII",
49 [LOOPBACK_SGMII] = "SGMII", 49 [LOOPBACK_SGMII] = "SGMII",
50 [LOOPBACK_XGBR] = "XGBR", 50 [LOOPBACK_XGBR] = "XGBR",
51 [LOOPBACK_XFI] = "XFI", 51 [LOOPBACK_XFI] = "XFI",
52 [LOOPBACK_XAUI_FAR] = "XAUI_FAR", 52 [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
@@ -55,21 +55,21 @@ const char *efx_loopback_mode_names[] = {
55 [LOOPBACK_XFI_FAR] = "XFI_FAR", 55 [LOOPBACK_XFI_FAR] = "XFI_FAR",
56 [LOOPBACK_GPHY] = "GPHY", 56 [LOOPBACK_GPHY] = "GPHY",
57 [LOOPBACK_PHYXS] = "PHYXS", 57 [LOOPBACK_PHYXS] = "PHYXS",
58 [LOOPBACK_PCS] = "PCS", 58 [LOOPBACK_PCS] = "PCS",
59 [LOOPBACK_PMAPMD] = "PMA/PMD", 59 [LOOPBACK_PMAPMD] = "PMA/PMD",
60 [LOOPBACK_XPORT] = "XPORT", 60 [LOOPBACK_XPORT] = "XPORT",
61 [LOOPBACK_XGMII_WS] = "XGMII_WS", 61 [LOOPBACK_XGMII_WS] = "XGMII_WS",
62 [LOOPBACK_XAUI_WS] = "XAUI_WS", 62 [LOOPBACK_XAUI_WS] = "XAUI_WS",
63 [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", 63 [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
64 [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", 64 [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
65 [LOOPBACK_GMII_WS] = "GMII_WS", 65 [LOOPBACK_GMII_WS] = "GMII_WS",
66 [LOOPBACK_XFI_WS] = "XFI_WS", 66 [LOOPBACK_XFI_WS] = "XFI_WS",
67 [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", 67 [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
68 [LOOPBACK_PHYXS_WS] = "PHYXS_WS", 68 [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
69}; 69};
70 70
71const unsigned int efx_reset_type_max = RESET_TYPE_MAX; 71const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
72const char *efx_reset_type_names[] = { 72const char *const efx_reset_type_names[] = {
73 [RESET_TYPE_INVISIBLE] = "INVISIBLE", 73 [RESET_TYPE_INVISIBLE] = "INVISIBLE",
74 [RESET_TYPE_ALL] = "ALL", 74 [RESET_TYPE_ALL] = "ALL",
75 [RESET_TYPE_WORLD] = "WORLD", 75 [RESET_TYPE_WORLD] = "WORLD",
@@ -122,15 +122,6 @@ static int napi_weight = 64;
122 */ 122 */
123static unsigned int efx_monitor_interval = 1 * HZ; 123static unsigned int efx_monitor_interval = 1 * HZ;
124 124
125/* This controls whether or not the driver will initialise devices
126 * with invalid MAC addresses stored in the EEPROM or flash. If true,
127 * such devices will be initialised with a random locally-generated
128 * MAC address. This allows for loading the sfc_mtd driver to
129 * reprogram the flash, even if the flash contents (including the MAC
130 * address) have previously been erased.
131 */
132static unsigned int allow_bad_hwaddr;
133
134/* Initial interrupt moderation settings. They can be modified after 125/* Initial interrupt moderation settings. They can be modified after
135 * module load with ethtool. 126 * module load with ethtool.
136 * 127 *
@@ -162,7 +153,7 @@ static unsigned int interrupt_mode;
162 * interrupt handling. 153 * interrupt handling.
163 * 154 *
164 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. 155 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
165 * The default (0) means to assign an interrupt to each package (level II cache) 156 * The default (0) means to assign an interrupt to each core.
166 */ 157 */
167static unsigned int rss_cpus; 158static unsigned int rss_cpus;
168module_param(rss_cpus, uint, 0444); 159module_param(rss_cpus, uint, 0444);
@@ -238,8 +229,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
238 229
239 /* Deliver last RX packet. */ 230 /* Deliver last RX packet. */
240 if (channel->rx_pkt) { 231 if (channel->rx_pkt) {
241 __efx_rx_packet(channel, channel->rx_pkt, 232 __efx_rx_packet(channel, channel->rx_pkt);
242 channel->rx_pkt_csummed);
243 channel->rx_pkt = NULL; 233 channel->rx_pkt = NULL;
244 } 234 }
245 235
@@ -373,7 +363,7 @@ static int efx_probe_eventq(struct efx_channel *channel)
373 struct efx_nic *efx = channel->efx; 363 struct efx_nic *efx = channel->efx;
374 unsigned long entries; 364 unsigned long entries;
375 365
376 netif_dbg(channel->efx, probe, channel->efx->net_dev, 366 netif_dbg(efx, probe, efx->net_dev,
377 "chan %d create event queue\n", channel->channel); 367 "chan %d create event queue\n", channel->channel);
378 368
379 /* Build an event queue with room for one event per tx and rx buffer, 369 /* Build an event queue with room for one event per tx and rx buffer,
@@ -807,16 +797,14 @@ void efx_link_status_changed(struct efx_nic *efx)
807 } 797 }
808 798
809 /* Status message for kernel log */ 799 /* Status message for kernel log */
810 if (link_state->up) { 800 if (link_state->up)
811 netif_info(efx, link, efx->net_dev, 801 netif_info(efx, link, efx->net_dev,
812 "link up at %uMbps %s-duplex (MTU %d)%s\n", 802 "link up at %uMbps %s-duplex (MTU %d)%s\n",
813 link_state->speed, link_state->fd ? "full" : "half", 803 link_state->speed, link_state->fd ? "full" : "half",
814 efx->net_dev->mtu, 804 efx->net_dev->mtu,
815 (efx->promiscuous ? " [PROMISC]" : "")); 805 (efx->promiscuous ? " [PROMISC]" : ""));
816 } else { 806 else
817 netif_info(efx, link, efx->net_dev, "link down\n"); 807 netif_info(efx, link, efx->net_dev, "link down\n");
818 }
819
820} 808}
821 809
822void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) 810void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
@@ -863,11 +851,9 @@ int __efx_reconfigure_port(struct efx_nic *efx)
863 851
864 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 852 WARN_ON(!mutex_is_locked(&efx->mac_lock));
865 853
866 /* Serialise the promiscuous flag with efx_set_multicast_list. */ 854 /* Serialise the promiscuous flag with efx_set_rx_mode. */
867 if (efx_dev_registered(efx)) { 855 netif_addr_lock_bh(efx->net_dev);
868 netif_addr_lock_bh(efx->net_dev); 856 netif_addr_unlock_bh(efx->net_dev);
869 netif_addr_unlock_bh(efx->net_dev);
870 }
871 857
872 /* Disable PHY transmit in mac level loopbacks */ 858 /* Disable PHY transmit in mac level loopbacks */
873 phy_mode = efx->phy_mode; 859 phy_mode = efx->phy_mode;
@@ -907,16 +893,13 @@ static void efx_mac_work(struct work_struct *data)
907 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); 893 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
908 894
909 mutex_lock(&efx->mac_lock); 895 mutex_lock(&efx->mac_lock);
910 if (efx->port_enabled) { 896 if (efx->port_enabled)
911 efx->type->push_multicast_hash(efx); 897 efx->type->reconfigure_mac(efx);
912 efx->mac_op->reconfigure(efx);
913 }
914 mutex_unlock(&efx->mac_lock); 898 mutex_unlock(&efx->mac_lock);
915} 899}
916 900
917static int efx_probe_port(struct efx_nic *efx) 901static int efx_probe_port(struct efx_nic *efx)
918{ 902{
919 unsigned char *perm_addr;
920 int rc; 903 int rc;
921 904
922 netif_dbg(efx, probe, efx->net_dev, "create port\n"); 905 netif_dbg(efx, probe, efx->net_dev, "create port\n");
@@ -929,28 +912,10 @@ static int efx_probe_port(struct efx_nic *efx)
929 if (rc) 912 if (rc)
930 return rc; 913 return rc;
931 914
932 /* Sanity check MAC address */ 915 /* Initialise MAC address to permanent address */
933 perm_addr = efx->net_dev->perm_addr; 916 memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN);
934 if (is_valid_ether_addr(perm_addr)) {
935 memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN);
936 } else {
937 netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
938 perm_addr);
939 if (!allow_bad_hwaddr) {
940 rc = -EINVAL;
941 goto err;
942 }
943 random_ether_addr(efx->net_dev->dev_addr);
944 netif_info(efx, probe, efx->net_dev,
945 "using locally-generated MAC %pM\n",
946 efx->net_dev->dev_addr);
947 }
948 917
949 return 0; 918 return 0;
950
951 err:
952 efx->type->remove_port(efx);
953 return rc;
954} 919}
955 920
956static int efx_init_port(struct efx_nic *efx) 921static int efx_init_port(struct efx_nic *efx)
@@ -969,7 +934,7 @@ static int efx_init_port(struct efx_nic *efx)
969 934
970 /* Reconfigure the MAC before creating dma queues (required for 935 /* Reconfigure the MAC before creating dma queues (required for
971 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ 936 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
972 efx->mac_op->reconfigure(efx); 937 efx->type->reconfigure_mac(efx);
973 938
974 /* Ensure the PHY advertises the correct flow control settings */ 939 /* Ensure the PHY advertises the correct flow control settings */
975 rc = efx->phy_op->reconfigure(efx); 940 rc = efx->phy_op->reconfigure(efx);
@@ -996,8 +961,7 @@ static void efx_start_port(struct efx_nic *efx)
996 961
997 /* efx_mac_work() might have been scheduled after efx_stop_port(), 962 /* efx_mac_work() might have been scheduled after efx_stop_port(),
998 * and then cancelled by efx_flush_all() */ 963 * and then cancelled by efx_flush_all() */
999 efx->type->push_multicast_hash(efx); 964 efx->type->reconfigure_mac(efx);
1000 efx->mac_op->reconfigure(efx);
1001 965
1002 mutex_unlock(&efx->mac_lock); 966 mutex_unlock(&efx->mac_lock);
1003} 967}
@@ -1012,10 +976,8 @@ static void efx_stop_port(struct efx_nic *efx)
1012 mutex_unlock(&efx->mac_lock); 976 mutex_unlock(&efx->mac_lock);
1013 977
1014 /* Serialise against efx_set_multicast_list() */ 978 /* Serialise against efx_set_multicast_list() */
1015 if (efx_dev_registered(efx)) { 979 netif_addr_lock_bh(efx->net_dev);
1016 netif_addr_lock_bh(efx->net_dev); 980 netif_addr_unlock_bh(efx->net_dev);
1017 netif_addr_unlock_bh(efx->net_dev);
1018 }
1019} 981}
1020 982
1021static void efx_fini_port(struct efx_nic *efx) 983static void efx_fini_port(struct efx_nic *efx)
@@ -1069,9 +1031,11 @@ static int efx_init_io(struct efx_nic *efx)
1069 * masks event though they reject 46 bit masks. 1031 * masks event though they reject 46 bit masks.
1070 */ 1032 */
1071 while (dma_mask > 0x7fffffffUL) { 1033 while (dma_mask > 0x7fffffffUL) {
1072 if (pci_dma_supported(pci_dev, dma_mask) && 1034 if (pci_dma_supported(pci_dev, dma_mask)) {
1073 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0)) 1035 rc = pci_set_dma_mask(pci_dev, dma_mask);
1074 break; 1036 if (rc == 0)
1037 break;
1038 }
1075 dma_mask >>= 1; 1039 dma_mask >>= 1;
1076 } 1040 }
1077 if (rc) { 1041 if (rc) {
@@ -1144,18 +1108,16 @@ static void efx_fini_io(struct efx_nic *efx)
1144 pci_disable_device(efx->pci_dev); 1108 pci_disable_device(efx->pci_dev);
1145} 1109}
1146 1110
1147/* Get number of channels wanted. Each channel will have its own IRQ, 1111static int efx_wanted_parallelism(void)
1148 * 1 RX queue and/or 2 TX queues. */
1149static int efx_wanted_channels(void)
1150{ 1112{
1151 cpumask_var_t core_mask; 1113 cpumask_var_t thread_mask;
1152 int count; 1114 int count;
1153 int cpu; 1115 int cpu;
1154 1116
1155 if (rss_cpus) 1117 if (rss_cpus)
1156 return rss_cpus; 1118 return rss_cpus;
1157 1119
1158 if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { 1120 if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
1159 printk(KERN_WARNING 1121 printk(KERN_WARNING
1160 "sfc: RSS disabled due to allocation failure\n"); 1122 "sfc: RSS disabled due to allocation failure\n");
1161 return 1; 1123 return 1;
@@ -1163,14 +1125,14 @@ static int efx_wanted_channels(void)
1163 1125
1164 count = 0; 1126 count = 0;
1165 for_each_online_cpu(cpu) { 1127 for_each_online_cpu(cpu) {
1166 if (!cpumask_test_cpu(cpu, core_mask)) { 1128 if (!cpumask_test_cpu(cpu, thread_mask)) {
1167 ++count; 1129 ++count;
1168 cpumask_or(core_mask, core_mask, 1130 cpumask_or(thread_mask, thread_mask,
1169 topology_core_cpumask(cpu)); 1131 topology_thread_cpumask(cpu));
1170 } 1132 }
1171 } 1133 }
1172 1134
1173 free_cpumask_var(core_mask); 1135 free_cpumask_var(thread_mask);
1174 return count; 1136 return count;
1175} 1137}
1176 1138
@@ -1209,7 +1171,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1209 struct msix_entry xentries[EFX_MAX_CHANNELS]; 1171 struct msix_entry xentries[EFX_MAX_CHANNELS];
1210 int n_channels; 1172 int n_channels;
1211 1173
1212 n_channels = efx_wanted_channels(); 1174 n_channels = efx_wanted_parallelism();
1213 if (separate_tx_channels) 1175 if (separate_tx_channels)
1214 n_channels *= 2; 1176 n_channels *= 2;
1215 n_channels = min(n_channels, max_channels); 1177 n_channels = min(n_channels, max_channels);
@@ -1425,14 +1387,14 @@ static void efx_start_all(struct efx_nic *efx)
1425 return; 1387 return;
1426 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) 1388 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1427 return; 1389 return;
1428 if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) 1390 if (!netif_running(efx->net_dev))
1429 return; 1391 return;
1430 1392
1431 /* Mark the port as enabled so port reconfigurations can start, then 1393 /* Mark the port as enabled so port reconfigurations can start, then
1432 * restart the transmit interface early so the watchdog timer stops */ 1394 * restart the transmit interface early so the watchdog timer stops */
1433 efx_start_port(efx); 1395 efx_start_port(efx);
1434 1396
1435 if (efx_dev_registered(efx) && netif_device_present(efx->net_dev)) 1397 if (netif_device_present(efx->net_dev))
1436 netif_tx_wake_all_queues(efx->net_dev); 1398 netif_tx_wake_all_queues(efx->net_dev);
1437 1399
1438 efx_for_each_channel(channel, efx) 1400 efx_for_each_channel(channel, efx)
@@ -1523,11 +1485,9 @@ static void efx_stop_all(struct efx_nic *efx)
1523 1485
1524 /* Stop the kernel transmit interface late, so the watchdog 1486 /* Stop the kernel transmit interface late, so the watchdog
1525 * timer isn't ticking over the flush */ 1487 * timer isn't ticking over the flush */
1526 if (efx_dev_registered(efx)) { 1488 netif_tx_stop_all_queues(efx->net_dev);
1527 netif_tx_stop_all_queues(efx->net_dev); 1489 netif_tx_lock_bh(efx->net_dev);
1528 netif_tx_lock_bh(efx->net_dev); 1490 netif_tx_unlock_bh(efx->net_dev);
1529 netif_tx_unlock_bh(efx->net_dev);
1530 }
1531} 1491}
1532 1492
1533static void efx_remove_all(struct efx_nic *efx) 1493static void efx_remove_all(struct efx_nic *efx)
@@ -1544,13 +1504,13 @@ static void efx_remove_all(struct efx_nic *efx)
1544 * 1504 *
1545 **************************************************************************/ 1505 **************************************************************************/
1546 1506
1547static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int resolution) 1507static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns)
1548{ 1508{
1549 if (usecs == 0) 1509 if (usecs == 0)
1550 return 0; 1510 return 0;
1551 if (usecs < resolution) 1511 if (usecs * 1000 < quantum_ns)
1552 return 1; /* never round down to 0 */ 1512 return 1; /* never round down to 0 */
1553 return usecs / resolution; 1513 return usecs * 1000 / quantum_ns;
1554} 1514}
1555 1515
1556/* Set interrupt moderation parameters */ 1516/* Set interrupt moderation parameters */
@@ -1559,14 +1519,20 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
1559 bool rx_may_override_tx) 1519 bool rx_may_override_tx)
1560{ 1520{
1561 struct efx_channel *channel; 1521 struct efx_channel *channel;
1562 unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); 1522 unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max *
1563 unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); 1523 efx->timer_quantum_ns,
1524 1000);
1525 unsigned int tx_ticks;
1526 unsigned int rx_ticks;
1564 1527
1565 EFX_ASSERT_RESET_SERIALISED(efx); 1528 EFX_ASSERT_RESET_SERIALISED(efx);
1566 1529
1567 if (tx_ticks > EFX_IRQ_MOD_MAX || rx_ticks > EFX_IRQ_MOD_MAX) 1530 if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max)
1568 return -EINVAL; 1531 return -EINVAL;
1569 1532
1533 tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns);
1534 rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns);
1535
1570 if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 && 1536 if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
1571 !rx_may_override_tx) { 1537 !rx_may_override_tx) {
1572 netif_err(efx, drv, efx->net_dev, "Channels are shared. " 1538 netif_err(efx, drv, efx->net_dev, "Channels are shared. "
@@ -1589,8 +1555,14 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
1589void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, 1555void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
1590 unsigned int *rx_usecs, bool *rx_adaptive) 1556 unsigned int *rx_usecs, bool *rx_adaptive)
1591{ 1557{
1558 /* We must round up when converting ticks to microseconds
1559 * because we round down when converting the other way.
1560 */
1561
1592 *rx_adaptive = efx->irq_rx_adaptive; 1562 *rx_adaptive = efx->irq_rx_adaptive;
1593 *rx_usecs = efx->irq_rx_moderation * EFX_IRQ_MOD_RESOLUTION; 1563 *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation *
1564 efx->timer_quantum_ns,
1565 1000);
1594 1566
1595 /* If channels are shared between RX and TX, so is IRQ 1567 /* If channels are shared between RX and TX, so is IRQ
1596 * moderation. Otherwise, IRQ moderation is the same for all 1568 * moderation. Otherwise, IRQ moderation is the same for all
@@ -1599,9 +1571,10 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
1599 if (efx->tx_channel_offset == 0) 1571 if (efx->tx_channel_offset == 0)
1600 *tx_usecs = *rx_usecs; 1572 *tx_usecs = *rx_usecs;
1601 else 1573 else
1602 *tx_usecs = 1574 *tx_usecs = DIV_ROUND_UP(
1603 efx->channel[efx->tx_channel_offset]->irq_moderation * 1575 efx->channel[efx->tx_channel_offset]->irq_moderation *
1604 EFX_IRQ_MOD_RESOLUTION; 1576 efx->timer_quantum_ns,
1577 1000);
1605} 1578}
1606 1579
1607/************************************************************************** 1580/**************************************************************************
@@ -1765,14 +1738,15 @@ static int efx_net_stop(struct net_device *net_dev)
1765} 1738}
1766 1739
1767/* Context: process, dev_base_lock or RTNL held, non-blocking. */ 1740/* Context: process, dev_base_lock or RTNL held, non-blocking. */
1768static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) 1741static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
1742 struct rtnl_link_stats64 *stats)
1769{ 1743{
1770 struct efx_nic *efx = netdev_priv(net_dev); 1744 struct efx_nic *efx = netdev_priv(net_dev);
1771 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1745 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1772 1746
1773 spin_lock_bh(&efx->stats_lock); 1747 spin_lock_bh(&efx->stats_lock);
1748
1774 efx->type->update_stats(efx); 1749 efx->type->update_stats(efx);
1775 spin_unlock_bh(&efx->stats_lock);
1776 1750
1777 stats->rx_packets = mac_stats->rx_packets; 1751 stats->rx_packets = mac_stats->rx_packets;
1778 stats->tx_packets = mac_stats->tx_packets; 1752 stats->tx_packets = mac_stats->tx_packets;
@@ -1796,6 +1770,8 @@ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struc
1796 stats->tx_errors = (stats->tx_window_errors + 1770 stats->tx_errors = (stats->tx_window_errors +
1797 mac_stats->tx_bad); 1771 mac_stats->tx_bad);
1798 1772
1773 spin_unlock_bh(&efx->stats_lock);
1774
1799 return stats; 1775 return stats;
1800} 1776}
1801 1777
@@ -1816,7 +1792,6 @@ static void efx_watchdog(struct net_device *net_dev)
1816static int efx_change_mtu(struct net_device *net_dev, int new_mtu) 1792static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1817{ 1793{
1818 struct efx_nic *efx = netdev_priv(net_dev); 1794 struct efx_nic *efx = netdev_priv(net_dev);
1819 int rc = 0;
1820 1795
1821 EFX_ASSERT_RESET_SERIALISED(efx); 1796 EFX_ASSERT_RESET_SERIALISED(efx);
1822 1797
@@ -1833,13 +1808,13 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1833 /* Reconfigure the MAC before enabling the dma queues so that 1808 /* Reconfigure the MAC before enabling the dma queues so that
1834 * the RX buffers don't overflow */ 1809 * the RX buffers don't overflow */
1835 net_dev->mtu = new_mtu; 1810 net_dev->mtu = new_mtu;
1836 efx->mac_op->reconfigure(efx); 1811 efx->type->reconfigure_mac(efx);
1837 mutex_unlock(&efx->mac_lock); 1812 mutex_unlock(&efx->mac_lock);
1838 1813
1839 efx_init_channels(efx); 1814 efx_init_channels(efx);
1840 1815
1841 efx_start_all(efx); 1816 efx_start_all(efx);
1842 return rc; 1817 return 0;
1843} 1818}
1844 1819
1845static int efx_set_mac_address(struct net_device *net_dev, void *data) 1820static int efx_set_mac_address(struct net_device *net_dev, void *data)
@@ -1861,14 +1836,14 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
1861 1836
1862 /* Reconfigure the MAC */ 1837 /* Reconfigure the MAC */
1863 mutex_lock(&efx->mac_lock); 1838 mutex_lock(&efx->mac_lock);
1864 efx->mac_op->reconfigure(efx); 1839 efx->type->reconfigure_mac(efx);
1865 mutex_unlock(&efx->mac_lock); 1840 mutex_unlock(&efx->mac_lock);
1866 1841
1867 return 0; 1842 return 0;
1868} 1843}
1869 1844
1870/* Context: netif_addr_lock held, BHs disabled. */ 1845/* Context: netif_addr_lock held, BHs disabled. */
1871static void efx_set_multicast_list(struct net_device *net_dev) 1846static void efx_set_rx_mode(struct net_device *net_dev)
1872{ 1847{
1873 struct efx_nic *efx = netdev_priv(net_dev); 1848 struct efx_nic *efx = netdev_priv(net_dev);
1874 struct netdev_hw_addr *ha; 1849 struct netdev_hw_addr *ha;
@@ -1922,7 +1897,7 @@ static const struct net_device_ops efx_netdev_ops = {
1922 .ndo_do_ioctl = efx_ioctl, 1897 .ndo_do_ioctl = efx_ioctl,
1923 .ndo_change_mtu = efx_change_mtu, 1898 .ndo_change_mtu = efx_change_mtu,
1924 .ndo_set_mac_address = efx_set_mac_address, 1899 .ndo_set_mac_address = efx_set_mac_address,
1925 .ndo_set_rx_mode = efx_set_multicast_list, 1900 .ndo_set_rx_mode = efx_set_rx_mode,
1926 .ndo_set_features = efx_set_features, 1901 .ndo_set_features = efx_set_features,
1927#ifdef CONFIG_NET_POLL_CONTROLLER 1902#ifdef CONFIG_NET_POLL_CONTROLLER
1928 .ndo_poll_controller = efx_netpoll, 1903 .ndo_poll_controller = efx_netpoll,
@@ -1975,10 +1950,6 @@ static int efx_register_netdev(struct efx_nic *efx)
1975 net_dev->netdev_ops = &efx_netdev_ops; 1950 net_dev->netdev_ops = &efx_netdev_ops;
1976 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); 1951 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1977 1952
1978 /* Clear MAC statistics */
1979 efx->mac_op->update_stats(efx);
1980 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
1981
1982 rtnl_lock(); 1953 rtnl_lock();
1983 1954
1984 rc = dev_alloc_name(net_dev, net_dev->name); 1955 rc = dev_alloc_name(net_dev, net_dev->name);
@@ -1997,7 +1968,7 @@ static int efx_register_netdev(struct efx_nic *efx)
1997 } 1968 }
1998 1969
1999 /* Always start with carrier off; PHY events will detect the link */ 1970 /* Always start with carrier off; PHY events will detect the link */
2000 netif_carrier_off(efx->net_dev); 1971 netif_carrier_off(net_dev);
2001 1972
2002 rtnl_unlock(); 1973 rtnl_unlock();
2003 1974
@@ -2038,11 +2009,9 @@ static void efx_unregister_netdev(struct efx_nic *efx)
2038 efx_release_tx_buffers(tx_queue); 2009 efx_release_tx_buffers(tx_queue);
2039 } 2010 }
2040 2011
2041 if (efx_dev_registered(efx)) { 2012 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2042 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 2013 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2043 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2014 unregister_netdev(efx->net_dev);
2044 unregister_netdev(efx->net_dev);
2045 }
2046} 2015}
2047 2016
2048/************************************************************************** 2017/**************************************************************************
@@ -2095,7 +2064,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2095 "could not restore PHY settings\n"); 2064 "could not restore PHY settings\n");
2096 } 2065 }
2097 2066
2098 efx->mac_op->reconfigure(efx); 2067 efx->type->reconfigure_mac(efx);
2099 2068
2100 efx_init_channels(efx); 2069 efx_init_channels(efx);
2101 efx_restore_filters(efx); 2070 efx_restore_filters(efx);
@@ -2300,7 +2269,6 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
2300 efx->net_dev = net_dev; 2269 efx->net_dev = net_dev;
2301 spin_lock_init(&efx->stats_lock); 2270 spin_lock_init(&efx->stats_lock);
2302 mutex_init(&efx->mac_lock); 2271 mutex_init(&efx->mac_lock);
2303 efx->mac_op = type->default_mac_ops;
2304 efx->phy_op = &efx_dummy_phy_operations; 2272 efx->phy_op = &efx_dummy_phy_operations;
2305 efx->mdio.dev = net_dev; 2273 efx->mdio.dev = net_dev;
2306 INIT_WORK(&efx->mac_work, efx_mac_work); 2274 INIT_WORK(&efx->mac_work, efx_mac_work);
@@ -2459,7 +2427,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2459/* NIC initialisation 2427/* NIC initialisation
2460 * 2428 *
2461 * This is called at module load (or hotplug insertion, 2429 * This is called at module load (or hotplug insertion,
2462 * theoretically). It sets up PCI mappings, tests and resets the NIC, 2430 * theoretically). It sets up PCI mappings, resets the NIC,
2463 * sets up and registers the network devices with the kernel and hooks 2431 * sets up and registers the network devices with the kernel and hooks
2464 * the interrupt service routine. It does not prepare the device for 2432 * the interrupt service routine. It does not prepare the device for
2465 * transmission; this is left to the first time one of the network 2433 * transmission; this is left to the first time one of the network
@@ -2658,7 +2626,7 @@ static int efx_pm_suspend(struct device *dev)
2658 return rc; 2626 return rc;
2659} 2627}
2660 2628
2661static struct dev_pm_ops efx_pm_ops = { 2629static const struct dev_pm_ops efx_pm_ops = {
2662 .suspend = efx_pm_suspend, 2630 .suspend = efx_pm_suspend,
2663 .resume = efx_pm_resume, 2631 .resume = efx_pm_resume,
2664 .freeze = efx_pm_freeze, 2632 .freeze = efx_pm_freeze,
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index a3541ac6ea01..7f546e2c39e2 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -40,9 +40,9 @@ extern void efx_rx_strategy(struct efx_channel *channel);
40extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); 40extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
41extern void efx_rx_slow_fill(unsigned long context); 41extern void efx_rx_slow_fill(unsigned long context);
42extern void __efx_rx_packet(struct efx_channel *channel, 42extern void __efx_rx_packet(struct efx_channel *channel,
43 struct efx_rx_buffer *rx_buf, bool checksummed); 43 struct efx_rx_buffer *rx_buf);
44extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 44extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
45 unsigned int len, bool checksummed, bool discard); 45 unsigned int len, u16 flags);
46extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); 46extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
47 47
48#define EFX_MAX_DMAQ_SIZE 4096UL 48#define EFX_MAX_DMAQ_SIZE 4096UL
@@ -145,6 +145,12 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
145 napi_schedule(&channel->napi_str); 145 napi_schedule(&channel->napi_str);
146} 146}
147 147
148static inline void efx_schedule_channel_irq(struct efx_channel *channel)
149{
150 channel->last_irq_cpu = raw_smp_processor_id();
151 efx_schedule_channel(channel);
152}
153
148extern void efx_link_status_changed(struct efx_nic *efx); 154extern void efx_link_status_changed(struct efx_nic *efx);
149extern void efx_link_set_advertising(struct efx_nic *efx, u32); 155extern void efx_link_set_advertising(struct efx_nic *efx, u32);
150extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8); 156extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 29b2ebfef19f..f887f65e4189 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -52,11 +52,6 @@ static u64 efx_get_uint_stat(void *field)
52 return *(unsigned int *)field; 52 return *(unsigned int *)field;
53} 53}
54 54
55static u64 efx_get_ulong_stat(void *field)
56{
57 return *(unsigned long *)field;
58}
59
60static u64 efx_get_u64_stat(void *field) 55static u64 efx_get_u64_stat(void *field)
61{ 56{
62 return *(u64 *) field; 57 return *(u64 *) field;
@@ -67,12 +62,8 @@ static u64 efx_get_atomic_stat(void *field)
67 return atomic_read((atomic_t *) field); 62 return atomic_read((atomic_t *) field);
68} 63}
69 64
70#define EFX_ETHTOOL_ULONG_MAC_STAT(field) \
71 EFX_ETHTOOL_STAT(field, mac_stats, field, \
72 unsigned long, efx_get_ulong_stat)
73
74#define EFX_ETHTOOL_U64_MAC_STAT(field) \ 65#define EFX_ETHTOOL_U64_MAC_STAT(field) \
75 EFX_ETHTOOL_STAT(field, mac_stats, field, \ 66 EFX_ETHTOOL_STAT(field, mac_stats, field, \
76 u64, efx_get_u64_stat) 67 u64, efx_get_u64_stat)
77 68
78#define EFX_ETHTOOL_UINT_NIC_STAT(name) \ 69#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
@@ -91,36 +82,36 @@ static u64 efx_get_atomic_stat(void *field)
91 EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \ 82 EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
92 unsigned int, efx_get_uint_stat) 83 unsigned int, efx_get_uint_stat)
93 84
94static struct efx_ethtool_stat efx_ethtool_stats[] = { 85static const struct efx_ethtool_stat efx_ethtool_stats[] = {
95 EFX_ETHTOOL_U64_MAC_STAT(tx_bytes), 86 EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
96 EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes), 87 EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
97 EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes), 88 EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
98 EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets), 89 EFX_ETHTOOL_U64_MAC_STAT(tx_packets),
99 EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad), 90 EFX_ETHTOOL_U64_MAC_STAT(tx_bad),
100 EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause), 91 EFX_ETHTOOL_U64_MAC_STAT(tx_pause),
101 EFX_ETHTOOL_ULONG_MAC_STAT(tx_control), 92 EFX_ETHTOOL_U64_MAC_STAT(tx_control),
102 EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast), 93 EFX_ETHTOOL_U64_MAC_STAT(tx_unicast),
103 EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast), 94 EFX_ETHTOOL_U64_MAC_STAT(tx_multicast),
104 EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast), 95 EFX_ETHTOOL_U64_MAC_STAT(tx_broadcast),
105 EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64), 96 EFX_ETHTOOL_U64_MAC_STAT(tx_lt64),
106 EFX_ETHTOOL_ULONG_MAC_STAT(tx_64), 97 EFX_ETHTOOL_U64_MAC_STAT(tx_64),
107 EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127), 98 EFX_ETHTOOL_U64_MAC_STAT(tx_65_to_127),
108 EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255), 99 EFX_ETHTOOL_U64_MAC_STAT(tx_128_to_255),
109 EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511), 100 EFX_ETHTOOL_U64_MAC_STAT(tx_256_to_511),
110 EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023), 101 EFX_ETHTOOL_U64_MAC_STAT(tx_512_to_1023),
111 EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx), 102 EFX_ETHTOOL_U64_MAC_STAT(tx_1024_to_15xx),
112 EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo), 103 EFX_ETHTOOL_U64_MAC_STAT(tx_15xx_to_jumbo),
113 EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo), 104 EFX_ETHTOOL_U64_MAC_STAT(tx_gtjumbo),
114 EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision), 105 EFX_ETHTOOL_U64_MAC_STAT(tx_collision),
115 EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision), 106 EFX_ETHTOOL_U64_MAC_STAT(tx_single_collision),
116 EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision), 107 EFX_ETHTOOL_U64_MAC_STAT(tx_multiple_collision),
117 EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision), 108 EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_collision),
118 EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred), 109 EFX_ETHTOOL_U64_MAC_STAT(tx_deferred),
119 EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision), 110 EFX_ETHTOOL_U64_MAC_STAT(tx_late_collision),
120 EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred), 111 EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_deferred),
121 EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp), 112 EFX_ETHTOOL_U64_MAC_STAT(tx_non_tcpudp),
122 EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error), 113 EFX_ETHTOOL_U64_MAC_STAT(tx_mac_src_error),
123 EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error), 114 EFX_ETHTOOL_U64_MAC_STAT(tx_ip_src_error),
124 EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts), 115 EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
125 EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), 116 EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
126 EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), 117 EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
@@ -128,34 +119,34 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
128 EFX_ETHTOOL_U64_MAC_STAT(rx_bytes), 119 EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
129 EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes), 120 EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
130 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes), 121 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
131 EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets), 122 EFX_ETHTOOL_U64_MAC_STAT(rx_packets),
132 EFX_ETHTOOL_ULONG_MAC_STAT(rx_good), 123 EFX_ETHTOOL_U64_MAC_STAT(rx_good),
133 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad), 124 EFX_ETHTOOL_U64_MAC_STAT(rx_bad),
134 EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause), 125 EFX_ETHTOOL_U64_MAC_STAT(rx_pause),
135 EFX_ETHTOOL_ULONG_MAC_STAT(rx_control), 126 EFX_ETHTOOL_U64_MAC_STAT(rx_control),
136 EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast), 127 EFX_ETHTOOL_U64_MAC_STAT(rx_unicast),
137 EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast), 128 EFX_ETHTOOL_U64_MAC_STAT(rx_multicast),
138 EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast), 129 EFX_ETHTOOL_U64_MAC_STAT(rx_broadcast),
139 EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64), 130 EFX_ETHTOOL_U64_MAC_STAT(rx_lt64),
140 EFX_ETHTOOL_ULONG_MAC_STAT(rx_64), 131 EFX_ETHTOOL_U64_MAC_STAT(rx_64),
141 EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127), 132 EFX_ETHTOOL_U64_MAC_STAT(rx_65_to_127),
142 EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255), 133 EFX_ETHTOOL_U64_MAC_STAT(rx_128_to_255),
143 EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511), 134 EFX_ETHTOOL_U64_MAC_STAT(rx_256_to_511),
144 EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023), 135 EFX_ETHTOOL_U64_MAC_STAT(rx_512_to_1023),
145 EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx), 136 EFX_ETHTOOL_U64_MAC_STAT(rx_1024_to_15xx),
146 EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo), 137 EFX_ETHTOOL_U64_MAC_STAT(rx_15xx_to_jumbo),
147 EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo), 138 EFX_ETHTOOL_U64_MAC_STAT(rx_gtjumbo),
148 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64), 139 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_lt64),
149 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx), 140 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_64_to_15xx),
150 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo), 141 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_15xx_to_jumbo),
151 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo), 142 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_gtjumbo),
152 EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow), 143 EFX_ETHTOOL_U64_MAC_STAT(rx_overflow),
153 EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed), 144 EFX_ETHTOOL_U64_MAC_STAT(rx_missed),
154 EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier), 145 EFX_ETHTOOL_U64_MAC_STAT(rx_false_carrier),
155 EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error), 146 EFX_ETHTOOL_U64_MAC_STAT(rx_symbol_error),
156 EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error), 147 EFX_ETHTOOL_U64_MAC_STAT(rx_align_error),
157 EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error), 148 EFX_ETHTOOL_U64_MAC_STAT(rx_length_error),
158 EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error), 149 EFX_ETHTOOL_U64_MAC_STAT(rx_internal_error),
159 EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt), 150 EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
160 EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), 151 EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
161 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), 152 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
@@ -404,10 +395,6 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
404 &tests->eventq_int[channel->channel], 395 &tests->eventq_int[channel->channel],
405 EFX_CHANNEL_NAME(channel), 396 EFX_CHANNEL_NAME(channel),
406 "eventq.int", NULL); 397 "eventq.int", NULL);
407 efx_fill_test(n++, strings, data,
408 &tests->eventq_poll[channel->channel],
409 EFX_CHANNEL_NAME(channel),
410 "eventq.poll", NULL);
411 } 398 }
412 399
413 efx_fill_test(n++, strings, data, &tests->registers, 400 efx_fill_test(n++, strings, data, &tests->registers,
@@ -486,16 +473,17 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
486{ 473{
487 struct efx_nic *efx = netdev_priv(net_dev); 474 struct efx_nic *efx = netdev_priv(net_dev);
488 struct efx_mac_stats *mac_stats = &efx->mac_stats; 475 struct efx_mac_stats *mac_stats = &efx->mac_stats;
489 struct efx_ethtool_stat *stat; 476 const struct efx_ethtool_stat *stat;
490 struct efx_channel *channel; 477 struct efx_channel *channel;
491 struct efx_tx_queue *tx_queue; 478 struct efx_tx_queue *tx_queue;
492 struct rtnl_link_stats64 temp;
493 int i; 479 int i;
494 480
495 EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS); 481 EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
496 482
483 spin_lock_bh(&efx->stats_lock);
484
497 /* Update MAC and NIC statistics */ 485 /* Update MAC and NIC statistics */
498 dev_get_stats(net_dev, &temp); 486 efx->type->update_stats(efx);
499 487
500 /* Fill detailed statistics buffer */ 488 /* Fill detailed statistics buffer */
501 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) { 489 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
@@ -525,6 +513,8 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
525 break; 513 break;
526 } 514 }
527 } 515 }
516
517 spin_unlock_bh(&efx->stats_lock);
528} 518}
529 519
530static void efx_ethtool_self_test(struct net_device *net_dev, 520static void efx_ethtool_self_test(struct net_device *net_dev,
@@ -747,7 +737,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
747 /* Recover by resetting the EM block */ 737 /* Recover by resetting the EM block */
748 falcon_stop_nic_stats(efx); 738 falcon_stop_nic_stats(efx);
749 falcon_drain_tx_fifo(efx); 739 falcon_drain_tx_fifo(efx);
750 efx->mac_op->reconfigure(efx); 740 falcon_reconfigure_xmac(efx);
751 falcon_start_nic_stats(efx); 741 falcon_start_nic_stats(efx);
752 } else { 742 } else {
753 /* Schedule a reset to recover */ 743 /* Schedule a reset to recover */
@@ -772,7 +762,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
772 /* Reconfigure the MAC. The PHY *may* generate a link state change event 762 /* Reconfigure the MAC. The PHY *may* generate a link state change event
773 * if the user just changed the advertised capabilities, but there's no 763 * if the user just changed the advertised capabilities, but there's no
774 * harm doing this twice */ 764 * harm doing this twice */
775 efx->mac_op->reconfigure(efx); 765 efx->type->reconfigure_mac(efx);
776 766
777out: 767out:
778 mutex_unlock(&efx->mac_lock); 768 mutex_unlock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 8ae1ebd35397..98285115df10 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -19,7 +19,6 @@
19#include "net_driver.h" 19#include "net_driver.h"
20#include "bitfield.h" 20#include "bitfield.h"
21#include "efx.h" 21#include "efx.h"
22#include "mac.h"
23#include "spi.h" 22#include "spi.h"
24#include "nic.h" 23#include "nic.h"
25#include "regs.h" 24#include "regs.h"
@@ -89,7 +88,7 @@ static int falcon_getscl(void *data)
89 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN); 88 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
90} 89}
91 90
92static struct i2c_algo_bit_data falcon_i2c_bit_operations = { 91static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
93 .setsda = falcon_setsda, 92 .setsda = falcon_setsda,
94 .setscl = falcon_setscl, 93 .setscl = falcon_setscl,
95 .getsda = falcon_getsda, 94 .getsda = falcon_getsda,
@@ -104,8 +103,6 @@ static void falcon_push_irq_moderation(struct efx_channel *channel)
104 efx_dword_t timer_cmd; 103 efx_dword_t timer_cmd;
105 struct efx_nic *efx = channel->efx; 104 struct efx_nic *efx = channel->efx;
106 105
107 BUILD_BUG_ON(EFX_IRQ_MOD_MAX > (1 << FRF_AB_TC_TIMER_VAL_WIDTH));
108
109 /* Set timer register */ 106 /* Set timer register */
110 if (channel->irq_moderation) { 107 if (channel->irq_moderation) {
111 EFX_POPULATE_DWORD_2(timer_cmd, 108 EFX_POPULATE_DWORD_2(timer_cmd,
@@ -177,27 +174,24 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
177 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 174 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
178 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 175 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
179 176
177 /* Check to see if we have a serious error condition */
178 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
179 if (unlikely(syserr))
180 return efx_nic_fatal_interrupt(efx);
181
180 /* Determine interrupting queues, clear interrupt status 182 /* Determine interrupting queues, clear interrupt status
181 * register and acknowledge the device interrupt. 183 * register and acknowledge the device interrupt.
182 */ 184 */
183 BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS); 185 BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
184 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q); 186 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
185
186 /* Check to see if we have a serious error condition */
187 if (queues & (1U << efx->fatal_irq_level)) {
188 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
189 if (unlikely(syserr))
190 return efx_nic_fatal_interrupt(efx);
191 }
192
193 EFX_ZERO_OWORD(*int_ker); 187 EFX_ZERO_OWORD(*int_ker);
194 wmb(); /* Ensure the vector is cleared before interrupt ack */ 188 wmb(); /* Ensure the vector is cleared before interrupt ack */
195 falcon_irq_ack_a1(efx); 189 falcon_irq_ack_a1(efx);
196 190
197 if (queues & 1) 191 if (queues & 1)
198 efx_schedule_channel(efx_get_channel(efx, 0)); 192 efx_schedule_channel_irq(efx_get_channel(efx, 0));
199 if (queues & 2) 193 if (queues & 2)
200 efx_schedule_channel(efx_get_channel(efx, 1)); 194 efx_schedule_channel_irq(efx_get_channel(efx, 1));
201 return IRQ_HANDLED; 195 return IRQ_HANDLED;
202} 196}
203/************************************************************************** 197/**************************************************************************
@@ -613,7 +607,7 @@ static void falcon_stats_complete(struct efx_nic *efx)
613 nic_data->stats_pending = false; 607 nic_data->stats_pending = false;
614 if (*nic_data->stats_dma_done == FALCON_STATS_DONE) { 608 if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
615 rmb(); /* read the done flag before the stats */ 609 rmb(); /* read the done flag before the stats */
616 efx->mac_op->update_stats(efx); 610 falcon_update_stats_xmac(efx);
617 } else { 611 } else {
618 netif_err(efx, hw, efx->net_dev, 612 netif_err(efx, hw, efx->net_dev,
619 "timed out waiting for statistics\n"); 613 "timed out waiting for statistics\n");
@@ -670,7 +664,7 @@ static int falcon_reconfigure_port(struct efx_nic *efx)
670 falcon_reset_macs(efx); 664 falcon_reset_macs(efx);
671 665
672 efx->phy_op->reconfigure(efx); 666 efx->phy_op->reconfigure(efx);
673 rc = efx->mac_op->reconfigure(efx); 667 rc = falcon_reconfigure_xmac(efx);
674 BUG_ON(rc); 668 BUG_ON(rc);
675 669
676 falcon_start_nic_stats(efx); 670 falcon_start_nic_stats(efx);
@@ -1218,7 +1212,7 @@ static void falcon_monitor(struct efx_nic *efx)
1218 falcon_deconfigure_mac_wrapper(efx); 1212 falcon_deconfigure_mac_wrapper(efx);
1219 1213
1220 falcon_reset_macs(efx); 1214 falcon_reset_macs(efx);
1221 rc = efx->mac_op->reconfigure(efx); 1215 rc = falcon_reconfigure_xmac(efx);
1222 BUG_ON(rc); 1216 BUG_ON(rc);
1223 1217
1224 falcon_start_nic_stats(efx); 1218 falcon_start_nic_stats(efx);
@@ -1472,6 +1466,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
1472 goto fail5; 1466 goto fail5;
1473 } 1467 }
1474 1468
1469 efx->timer_quantum_ns = 4968; /* 621 cycles */
1470
1475 /* Initialise I2C adapter */ 1471 /* Initialise I2C adapter */
1476 board = falcon_board(efx); 1472 board = falcon_board(efx);
1477 board->i2c_adap.owner = THIS_MODULE; 1473 board->i2c_adap.owner = THIS_MODULE;
@@ -1676,7 +1672,7 @@ static void falcon_update_nic_stats(struct efx_nic *efx)
1676 *nic_data->stats_dma_done == FALCON_STATS_DONE) { 1672 *nic_data->stats_dma_done == FALCON_STATS_DONE) {
1677 nic_data->stats_pending = false; 1673 nic_data->stats_pending = false;
1678 rmb(); /* read the done flag before the stats */ 1674 rmb(); /* read the done flag before the stats */
1679 efx->mac_op->update_stats(efx); 1675 falcon_update_stats_xmac(efx);
1680 } 1676 }
1681} 1677}
1682 1678
@@ -1767,13 +1763,13 @@ const struct efx_nic_type falcon_a1_nic_type = {
1767 .stop_stats = falcon_stop_nic_stats, 1763 .stop_stats = falcon_stop_nic_stats,
1768 .set_id_led = falcon_set_id_led, 1764 .set_id_led = falcon_set_id_led,
1769 .push_irq_moderation = falcon_push_irq_moderation, 1765 .push_irq_moderation = falcon_push_irq_moderation,
1770 .push_multicast_hash = falcon_push_multicast_hash,
1771 .reconfigure_port = falcon_reconfigure_port, 1766 .reconfigure_port = falcon_reconfigure_port,
1767 .reconfigure_mac = falcon_reconfigure_xmac,
1768 .check_mac_fault = falcon_xmac_check_fault,
1772 .get_wol = falcon_get_wol, 1769 .get_wol = falcon_get_wol,
1773 .set_wol = falcon_set_wol, 1770 .set_wol = falcon_set_wol,
1774 .resume_wol = efx_port_dummy_op_void, 1771 .resume_wol = efx_port_dummy_op_void,
1775 .test_nvram = falcon_test_nvram, 1772 .test_nvram = falcon_test_nvram,
1776 .default_mac_ops = &falcon_xmac_operations,
1777 1773
1778 .revision = EFX_REV_FALCON_A1, 1774 .revision = EFX_REV_FALCON_A1,
1779 .mem_map_size = 0x20000, 1775 .mem_map_size = 0x20000,
@@ -1786,6 +1782,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
1786 .rx_buffer_padding = 0x24, 1782 .rx_buffer_padding = 0x24,
1787 .max_interrupt_mode = EFX_INT_MODE_MSI, 1783 .max_interrupt_mode = EFX_INT_MODE_MSI,
1788 .phys_addr_channels = 4, 1784 .phys_addr_channels = 4,
1785 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
1789 .tx_dc_base = 0x130000, 1786 .tx_dc_base = 0x130000,
1790 .rx_dc_base = 0x100000, 1787 .rx_dc_base = 0x100000,
1791 .offload_features = NETIF_F_IP_CSUM, 1788 .offload_features = NETIF_F_IP_CSUM,
@@ -1809,14 +1806,14 @@ const struct efx_nic_type falcon_b0_nic_type = {
1809 .stop_stats = falcon_stop_nic_stats, 1806 .stop_stats = falcon_stop_nic_stats,
1810 .set_id_led = falcon_set_id_led, 1807 .set_id_led = falcon_set_id_led,
1811 .push_irq_moderation = falcon_push_irq_moderation, 1808 .push_irq_moderation = falcon_push_irq_moderation,
1812 .push_multicast_hash = falcon_push_multicast_hash,
1813 .reconfigure_port = falcon_reconfigure_port, 1809 .reconfigure_port = falcon_reconfigure_port,
1810 .reconfigure_mac = falcon_reconfigure_xmac,
1811 .check_mac_fault = falcon_xmac_check_fault,
1814 .get_wol = falcon_get_wol, 1812 .get_wol = falcon_get_wol,
1815 .set_wol = falcon_set_wol, 1813 .set_wol = falcon_set_wol,
1816 .resume_wol = efx_port_dummy_op_void, 1814 .resume_wol = efx_port_dummy_op_void,
1817 .test_registers = falcon_b0_test_registers, 1815 .test_registers = falcon_b0_test_registers,
1818 .test_nvram = falcon_test_nvram, 1816 .test_nvram = falcon_test_nvram,
1819 .default_mac_ops = &falcon_xmac_operations,
1820 1817
1821 .revision = EFX_REV_FALCON_B0, 1818 .revision = EFX_REV_FALCON_B0,
1822 /* Map everything up to and including the RSS indirection 1819 /* Map everything up to and including the RSS indirection
@@ -1837,6 +1834,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
1837 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy 1834 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
1838 * interrupt handler only supports 32 1835 * interrupt handler only supports 32
1839 * channels */ 1836 * channels */
1837 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
1840 .tx_dc_base = 0x130000, 1838 .tx_dc_base = 0x130000,
1841 .rx_dc_base = 0x100000, 1839 .rx_dc_base = 0x100000,
1842 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, 1840 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index 6cc16b8cc6f4..2084cc6ede52 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -87,7 +87,7 @@ static const u8 falcon_lm87_common_regs[] = {
87 0 87 0
88}; 88};
89 89
90static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info, 90static int efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info,
91 const u8 *reg_values) 91 const u8 *reg_values)
92{ 92{
93 struct falcon_board *board = falcon_board(efx); 93 struct falcon_board *board = falcon_board(efx);
@@ -179,7 +179,7 @@ static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
179#else /* !CONFIG_SENSORS_LM87 */ 179#else /* !CONFIG_SENSORS_LM87 */
180 180
181static inline int 181static inline int
182efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info, 182efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info,
183 const u8 *reg_values) 183 const u8 *reg_values)
184{ 184{
185 return 0; 185 return 0;
@@ -442,7 +442,7 @@ static int sfe4001_check_hw(struct efx_nic *efx)
442 return (status < 0) ? -EIO : -ERANGE; 442 return (status < 0) ? -EIO : -ERANGE;
443} 443}
444 444
445static struct i2c_board_info sfe4001_hwmon_info = { 445static const struct i2c_board_info sfe4001_hwmon_info = {
446 I2C_BOARD_INFO("max6647", 0x4e), 446 I2C_BOARD_INFO("max6647", 0x4e),
447}; 447};
448 448
@@ -522,7 +522,7 @@ static const u8 sfe4002_lm87_regs[] = {
522 0 522 0
523}; 523};
524 524
525static struct i2c_board_info sfe4002_hwmon_info = { 525static const struct i2c_board_info sfe4002_hwmon_info = {
526 I2C_BOARD_INFO("lm87", 0x2e), 526 I2C_BOARD_INFO("lm87", 0x2e),
527 .platform_data = &sfe4002_lm87_channel, 527 .platform_data = &sfe4002_lm87_channel,
528}; 528};
@@ -591,7 +591,7 @@ static const u8 sfn4112f_lm87_regs[] = {
591 0 591 0
592}; 592};
593 593
594static struct i2c_board_info sfn4112f_hwmon_info = { 594static const struct i2c_board_info sfn4112f_hwmon_info = {
595 I2C_BOARD_INFO("lm87", 0x2e), 595 I2C_BOARD_INFO("lm87", 0x2e),
596 .platform_data = &sfn4112f_lm87_channel, 596 .platform_data = &sfn4112f_lm87_channel,
597}; 597};
@@ -653,7 +653,7 @@ static const u8 sfe4003_lm87_regs[] = {
653 0 653 0
654}; 654};
655 655
656static struct i2c_board_info sfe4003_hwmon_info = { 656static const struct i2c_board_info sfe4003_hwmon_info = {
657 I2C_BOARD_INFO("lm87", 0x2e), 657 I2C_BOARD_INFO("lm87", 0x2e),
658 .platform_data = &sfe4003_lm87_channel, 658 .platform_data = &sfe4003_lm87_channel,
659}; 659};
diff --git a/drivers/net/ethernet/sfc/falcon_xmac.c b/drivers/net/ethernet/sfc/falcon_xmac.c
index 9516452c079c..6106ef15dee3 100644
--- a/drivers/net/ethernet/sfc/falcon_xmac.c
+++ b/drivers/net/ethernet/sfc/falcon_xmac.c
@@ -14,7 +14,6 @@
14#include "nic.h" 14#include "nic.h"
15#include "regs.h" 15#include "regs.h"
16#include "io.h" 16#include "io.h"
17#include "mac.h"
18#include "mdio_10g.h" 17#include "mdio_10g.h"
19#include "workarounds.h" 18#include "workarounds.h"
20 19
@@ -139,7 +138,7 @@ static bool falcon_xmac_link_ok(struct efx_nic *efx)
139 return (efx->loopback_mode == LOOPBACK_XGMII || 138 return (efx->loopback_mode == LOOPBACK_XGMII ||
140 falcon_xgxs_link_ok(efx)) && 139 falcon_xgxs_link_ok(efx)) &&
141 (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) || 140 (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
142 LOOPBACK_INTERNAL(efx) || 141 LOOPBACK_INTERNAL(efx) ||
143 efx_mdio_phyxgxs_lane_sync(efx)); 142 efx_mdio_phyxgxs_lane_sync(efx));
144} 143}
145 144
@@ -270,12 +269,12 @@ static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
270 return mac_up; 269 return mac_up;
271} 270}
272 271
273static bool falcon_xmac_check_fault(struct efx_nic *efx) 272bool falcon_xmac_check_fault(struct efx_nic *efx)
274{ 273{
275 return !falcon_xmac_link_ok_retry(efx, 5); 274 return !falcon_xmac_link_ok_retry(efx, 5);
276} 275}
277 276
278static int falcon_reconfigure_xmac(struct efx_nic *efx) 277int falcon_reconfigure_xmac(struct efx_nic *efx)
279{ 278{
280 struct falcon_nic_data *nic_data = efx->nic_data; 279 struct falcon_nic_data *nic_data = efx->nic_data;
281 280
@@ -290,7 +289,7 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx)
290 return 0; 289 return 0;
291} 290}
292 291
293static void falcon_update_stats_xmac(struct efx_nic *efx) 292void falcon_update_stats_xmac(struct efx_nic *efx)
294{ 293{
295 struct efx_mac_stats *mac_stats = &efx->mac_stats; 294 struct efx_mac_stats *mac_stats = &efx->mac_stats;
296 295
@@ -361,9 +360,3 @@ void falcon_poll_xmac(struct efx_nic *efx)
361 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); 360 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
362 falcon_ack_status_intr(efx); 361 falcon_ack_status_intr(efx);
363} 362}
364
365const struct efx_mac_operations falcon_xmac_operations = {
366 .reconfigure = falcon_reconfigure_xmac,
367 .update_stats = falcon_update_stats_xmac,
368 .check_fault = falcon_xmac_check_fault,
369};
diff --git a/drivers/net/ethernet/sfc/mac.h b/drivers/net/ethernet/sfc/mac.h
deleted file mode 100644
index d6a255d0856b..000000000000
--- a/drivers/net/ethernet/sfc/mac.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_MAC_H
12#define EFX_MAC_H
13
14#include "net_driver.h"
15
16extern const struct efx_mac_operations falcon_xmac_operations;
17extern const struct efx_mac_operations efx_mcdi_mac_operations;
18extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
19 u32 dma_len, int enable, int clear);
20
21#endif
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 81a425397468..619f63a66ce7 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -22,22 +22,22 @@
22 ************************************************************************** 22 **************************************************************************
23 */ 23 */
24 24
25/* Software-defined structure to the shared-memory */
26#define CMD_NOTIFY_PORT0 0
27#define CMD_NOTIFY_PORT1 4
28#define CMD_PDU_PORT0 0x008
29#define CMD_PDU_PORT1 0x108
30#define REBOOT_FLAG_PORT0 0x3f8
31#define REBOOT_FLAG_PORT1 0x3fc
32
33#define MCDI_RPC_TIMEOUT 10 /*seconds */ 25#define MCDI_RPC_TIMEOUT 10 /*seconds */
34 26
35#define MCDI_PDU(efx) \ 27#define MCDI_PDU(efx) \
36 (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0) 28 (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
37#define MCDI_DOORBELL(efx) \ 29#define MCDI_DOORBELL(efx) \
38 (efx_port_num(efx) ? CMD_NOTIFY_PORT1 : CMD_NOTIFY_PORT0) 30 (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
39#define MCDI_REBOOT_FLAG(efx) \ 31#define MCDI_STATUS(efx) \
40 (efx_port_num(efx) ? REBOOT_FLAG_PORT1 : REBOOT_FLAG_PORT0) 32 (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
33
34/* A reboot/assertion causes the MCDI status word to be set after the
35 * command word is set or a REBOOT event is sent. If we notice a reboot
36 * via these mechanisms then wait 10ms for the status word to be set. */
37#define MCDI_STATUS_DELAY_US 100
38#define MCDI_STATUS_DELAY_COUNT 100
39#define MCDI_STATUS_SLEEP_MS \
40 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
41 41
42#define SEQ_MASK \ 42#define SEQ_MASK \
43 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) 43 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
@@ -77,7 +77,7 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
77 u32 xflags, seqno; 77 u32 xflags, seqno;
78 78
79 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 79 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
80 BUG_ON(inlen & 3 || inlen >= 0x100); 80 BUG_ON(inlen & 3 || inlen >= MC_SMEM_PDU_LEN);
81 81
82 seqno = mcdi->seqno & SEQ_MASK; 82 seqno = mcdi->seqno & SEQ_MASK;
83 xflags = 0; 83 xflags = 0;
@@ -111,7 +111,7 @@ static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
111 int i; 111 int i;
112 112
113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
114 BUG_ON(outlen & 3 || outlen >= 0x100); 114 BUG_ON(outlen & 3 || outlen >= MC_SMEM_PDU_LEN);
115 115
116 for (i = 0; i < outlen; i += 4) 116 for (i = 0; i < outlen; i += 4)
117 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); 117 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
@@ -210,7 +210,7 @@ out:
210/* Test and clear MC-rebooted flag for this port/function */ 210/* Test and clear MC-rebooted flag for this port/function */
211int efx_mcdi_poll_reboot(struct efx_nic *efx) 211int efx_mcdi_poll_reboot(struct efx_nic *efx)
212{ 212{
213 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); 213 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
214 efx_dword_t reg; 214 efx_dword_t reg;
215 uint32_t value; 215 uint32_t value;
216 216
@@ -384,6 +384,11 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
384 netif_dbg(efx, hw, efx->net_dev, 384 netif_dbg(efx, hw, efx->net_dev,
385 "MC command 0x%x inlen %d failed rc=%d\n", 385 "MC command 0x%x inlen %d failed rc=%d\n",
386 cmd, (int)inlen, -rc); 386 cmd, (int)inlen, -rc);
387
388 if (rc == -EIO || rc == -EINTR) {
389 msleep(MCDI_STATUS_SLEEP_MS);
390 efx_mcdi_poll_reboot(efx);
391 }
387 } 392 }
388 393
389 efx_mcdi_release(mcdi); 394 efx_mcdi_release(mcdi);
@@ -465,10 +470,20 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
465 mcdi->resplen = 0; 470 mcdi->resplen = 0;
466 ++mcdi->credits; 471 ++mcdi->credits;
467 } 472 }
468 } else 473 } else {
474 int count;
475
469 /* Nobody was waiting for an MCDI request, so trigger a reset */ 476 /* Nobody was waiting for an MCDI request, so trigger a reset */
470 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 477 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
471 478
479 /* Consume the status word since efx_mcdi_rpc_finish() won't */
480 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
481 if (efx_mcdi_poll_reboot(efx))
482 break;
483 udelay(MCDI_STATUS_DELAY_US);
484 }
485 }
486
472 spin_unlock(&mcdi->iface_lock); 487 spin_unlock(&mcdi->iface_lock);
473} 488}
474 489
@@ -502,49 +517,6 @@ static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
502 efx_link_status_changed(efx); 517 efx_link_status_changed(efx);
503} 518}
504 519
505static const char *sensor_names[] = {
506 [MC_CMD_SENSOR_CONTROLLER_TEMP] = "Controller temp. sensor",
507 [MC_CMD_SENSOR_PHY_COMMON_TEMP] = "PHY shared temp. sensor",
508 [MC_CMD_SENSOR_CONTROLLER_COOLING] = "Controller cooling",
509 [MC_CMD_SENSOR_PHY0_TEMP] = "PHY 0 temp. sensor",
510 [MC_CMD_SENSOR_PHY0_COOLING] = "PHY 0 cooling",
511 [MC_CMD_SENSOR_PHY1_TEMP] = "PHY 1 temp. sensor",
512 [MC_CMD_SENSOR_PHY1_COOLING] = "PHY 1 cooling",
513 [MC_CMD_SENSOR_IN_1V0] = "1.0V supply sensor",
514 [MC_CMD_SENSOR_IN_1V2] = "1.2V supply sensor",
515 [MC_CMD_SENSOR_IN_1V8] = "1.8V supply sensor",
516 [MC_CMD_SENSOR_IN_2V5] = "2.5V supply sensor",
517 [MC_CMD_SENSOR_IN_3V3] = "3.3V supply sensor",
518 [MC_CMD_SENSOR_IN_12V0] = "12V supply sensor"
519};
520
521static const char *sensor_status_names[] = {
522 [MC_CMD_SENSOR_STATE_OK] = "OK",
523 [MC_CMD_SENSOR_STATE_WARNING] = "Warning",
524 [MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
525 [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
526};
527
528static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
529{
530 unsigned int monitor, state, value;
531 const char *name, *state_txt;
532 monitor = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
533 state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
534 value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
535 /* Deal gracefully with the board having more drivers than we
536 * know about, but do not expect new sensor states. */
537 name = (monitor >= ARRAY_SIZE(sensor_names))
538 ? "No sensor name available" :
539 sensor_names[monitor];
540 EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
541 state_txt = sensor_status_names[state];
542
543 netif_err(efx, hw, efx->net_dev,
544 "Sensor %d (%s) reports condition '%s' for raw value %d\n",
545 monitor, name, state_txt, value);
546}
547
548/* Called from falcon_process_eventq for MCDI events */ 520/* Called from falcon_process_eventq for MCDI events */
549void efx_mcdi_process_event(struct efx_channel *channel, 521void efx_mcdi_process_event(struct efx_channel *channel,
550 efx_qword_t *event) 522 efx_qword_t *event)
@@ -604,7 +576,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
604 576
605void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) 577void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
606{ 578{
607 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)]; 579 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_OUT_LEN, 4)];
608 size_t outlength; 580 size_t outlength;
609 const __le16 *ver_words; 581 const __le16 *ver_words;
610 int rc; 582 int rc;
@@ -616,7 +588,7 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
616 if (rc) 588 if (rc)
617 goto fail; 589 goto fail;
618 590
619 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { 591 if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
620 rc = -EIO; 592 rc = -EIO;
621 goto fail; 593 goto fail;
622 } 594 }
@@ -663,9 +635,9 @@ fail:
663} 635}
664 636
665int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 637int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
666 u16 *fw_subtype_list) 638 u16 *fw_subtype_list, u32 *capabilities)
667{ 639{
668 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN]; 640 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN];
669 size_t outlen; 641 size_t outlen;
670 int port_num = efx_port_num(efx); 642 int port_num = efx_port_num(efx);
671 int offset; 643 int offset;
@@ -678,7 +650,7 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
678 if (rc) 650 if (rc)
679 goto fail; 651 goto fail;
680 652
681 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) { 653 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
682 rc = -EIO; 654 rc = -EIO;
683 goto fail; 655 goto fail;
684 } 656 }
@@ -691,7 +663,16 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
691 if (fw_subtype_list) 663 if (fw_subtype_list)
692 memcpy(fw_subtype_list, 664 memcpy(fw_subtype_list,
693 outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST, 665 outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST,
694 MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN); 666 MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM *
667 sizeof(fw_subtype_list[0]));
668 if (capabilities) {
669 if (port_num)
670 *capabilities = MCDI_DWORD(outbuf,
671 GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
672 else
673 *capabilities = MCDI_DWORD(outbuf,
674 GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
675 }
695 676
696 return 0; 677 return 0;
697 678
@@ -779,7 +760,7 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
779 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); 760 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
780 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); 761 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
781 *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & 762 *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
782 (1 << MC_CMD_NVRAM_PROTECTED_LBN)); 763 (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
783 return 0; 764 return 0;
784 765
785fail: 766fail:
@@ -1060,7 +1041,7 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1060 1041
1061int efx_mcdi_reset_port(struct efx_nic *efx) 1042int efx_mcdi_reset_port(struct efx_nic *efx)
1062{ 1043{
1063 int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL); 1044 int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
1064 if (rc) 1045 if (rc)
1065 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 1046 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
1066 __func__, rc); 1047 __func__, rc);
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index aced2a7856fc..fbaa6efcd744 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -56,6 +56,15 @@ struct efx_mcdi_iface {
56 size_t resplen; 56 size_t resplen;
57}; 57};
58 58
59struct efx_mcdi_mon {
60 struct efx_buffer dma_buf;
61 struct mutex update_lock;
62 unsigned long last_update;
63 struct device *device;
64 struct efx_mcdi_mon_attribute *attrs;
65 unsigned int n_attrs;
66};
67
59extern void efx_mcdi_init(struct efx_nic *efx); 68extern void efx_mcdi_init(struct efx_nic *efx);
60 69
61extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, 70extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
@@ -68,6 +77,7 @@ extern void efx_mcdi_mode_event(struct efx_nic *efx);
68 77
69extern void efx_mcdi_process_event(struct efx_channel *channel, 78extern void efx_mcdi_process_event(struct efx_channel *channel,
70 efx_qword_t *event); 79 efx_qword_t *event);
80extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
71 81
72#define MCDI_PTR2(_buf, _ofst) \ 82#define MCDI_PTR2(_buf, _ofst) \
73 (((u8 *)_buf) + _ofst) 83 (((u8 *)_buf) + _ofst)
@@ -83,6 +93,10 @@ extern void efx_mcdi_process_event(struct efx_channel *channel,
83 93
84#define MCDI_PTR(_buf, _ofst) \ 94#define MCDI_PTR(_buf, _ofst) \
85 MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST) 95 MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST)
96#define MCDI_ARRAY_PTR(_buf, _field, _type, _index) \
97 MCDI_PTR2(_buf, \
98 MC_CMD_ ## _field ## _OFST + \
99 (_index) * MC_CMD_ ## _type ## _TYPEDEF_LEN)
86#define MCDI_SET_DWORD(_buf, _ofst, _value) \ 100#define MCDI_SET_DWORD(_buf, _ofst, _value) \
87 MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value) 101 MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value)
88#define MCDI_DWORD(_buf, _ofst) \ 102#define MCDI_DWORD(_buf, _ofst) \
@@ -92,12 +106,18 @@ extern void efx_mcdi_process_event(struct efx_channel *channel,
92 106
93#define MCDI_EVENT_FIELD(_ev, _field) \ 107#define MCDI_EVENT_FIELD(_ev, _field) \
94 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) 108 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
109#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
110 EFX_DWORD_FIELD( \
111 *((efx_dword_t *) \
112 (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) + \
113 (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \
114 MC_CMD_ ## _type ## _TYPEDEF_ ## _field2)
95 115
96extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); 116extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
97extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 117extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
98 bool *was_attached_out); 118 bool *was_attached_out);
99extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 119extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
100 u16 *fw_subtype_list); 120 u16 *fw_subtype_list, u32 *capabilities);
101extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, 121extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
102 u32 dest_evq); 122 u32 dest_evq);
103extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); 123extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
@@ -126,5 +146,17 @@ extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
126extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); 146extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
127extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); 147extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
128extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx); 148extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
149extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
150 u32 dma_len, int enable, int clear);
151extern int efx_mcdi_mac_reconfigure(struct efx_nic *efx);
152extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
153
154#ifdef CONFIG_SFC_MCDI_MON
155extern int efx_mcdi_mon_probe(struct efx_nic *efx);
156extern void efx_mcdi_mon_remove(struct efx_nic *efx);
157#else
158static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
159static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
160#endif
129 161
130#endif /* EFX_MCDI_H */ 162#endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c
index 50c20777a564..f67cf921bd1b 100644
--- a/drivers/net/ethernet/sfc/mcdi_mac.c
+++ b/drivers/net/ethernet/sfc/mcdi_mac.c
@@ -9,7 +9,6 @@
9 9
10#include "net_driver.h" 10#include "net_driver.h"
11#include "efx.h" 11#include "efx.h"
12#include "mac.h"
13#include "mcdi.h" 12#include "mcdi.h"
14#include "mcdi_pcol.h" 13#include "mcdi_pcol.h"
15 14
@@ -52,7 +51,7 @@ static int efx_mcdi_set_mac(struct efx_nic *efx)
52 NULL, 0, NULL); 51 NULL, 0, NULL);
53} 52}
54 53
55static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults) 54bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
56{ 55{
57 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; 56 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
58 size_t outlength; 57 size_t outlength;
@@ -62,16 +61,13 @@ static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults)
62 61
63 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, 62 rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
64 outbuf, sizeof(outbuf), &outlength); 63 outbuf, sizeof(outbuf), &outlength);
65 if (rc) 64 if (rc) {
66 goto fail; 65 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
67 66 __func__, rc);
68 *faults = MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT); 67 return true;
69 return 0; 68 }
70 69
71fail: 70 return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
72 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
73 __func__, rc);
74 return rc;
75} 71}
76 72
77int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, 73int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
@@ -84,7 +80,7 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
84 u32 addr_hi; 80 u32 addr_hi;
85 u32 addr_lo; 81 u32 addr_lo;
86 82
87 BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_LEN != 0); 83 BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
88 84
89 addr_lo = ((u64)dma_addr) >> 0; 85 addr_lo = ((u64)dma_addr) >> 0;
90 addr_hi = ((u64)dma_addr) >> 32; 86 addr_hi = ((u64)dma_addr) >> 32;
@@ -93,13 +89,13 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
93 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi); 89 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
94 cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD); 90 cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
95 EFX_POPULATE_DWORD_7(*cmd_ptr, 91 EFX_POPULATE_DWORD_7(*cmd_ptr,
96 MC_CMD_MAC_STATS_CMD_DMA, !!enable, 92 MC_CMD_MAC_STATS_IN_DMA, !!enable,
97 MC_CMD_MAC_STATS_CMD_CLEAR, clear, 93 MC_CMD_MAC_STATS_IN_CLEAR, clear,
98 MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1, 94 MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE, 1,
99 MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, !!enable, 95 MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
100 MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0, 96 MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR, 0,
101 MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT, 1, 97 MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT, 1,
102 MC_CMD_MAC_STATS_CMD_PERIOD_MS, period); 98 MC_CMD_MAC_STATS_IN_PERIOD_MS, period);
103 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); 99 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
104 100
105 rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), 101 rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
@@ -115,31 +111,18 @@ fail:
115 return rc; 111 return rc;
116} 112}
117 113
118static int efx_mcdi_mac_reconfigure(struct efx_nic *efx) 114int efx_mcdi_mac_reconfigure(struct efx_nic *efx)
119{ 115{
120 int rc; 116 int rc;
121 117
118 WARN_ON(!mutex_is_locked(&efx->mac_lock));
119
122 rc = efx_mcdi_set_mac(efx); 120 rc = efx_mcdi_set_mac(efx);
123 if (rc != 0) 121 if (rc != 0)
124 return rc; 122 return rc;
125 123
126 /* Restore the multicast hash registers. */ 124 return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
127 efx->type->push_multicast_hash(efx); 125 efx->multicast_hash.byte,
128 126 sizeof(efx->multicast_hash),
129 return 0; 127 NULL, 0, NULL);
130}
131
132
133static bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
134{
135 u32 faults;
136 int rc = efx_mcdi_get_mac_faults(efx, &faults);
137 return (rc != 0) || (faults != 0);
138} 128}
139
140
141const struct efx_mac_operations efx_mcdi_mac_operations = {
142 .reconfigure = efx_mcdi_mac_reconfigure,
143 .update_stats = efx_port_dummy_op_void,
144 .check_fault = efx_mcdi_mac_check_fault,
145};
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
new file mode 100644
index 000000000000..8a72c10b9a6c
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -0,0 +1,415 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2011 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/bitops.h>
11#include <linux/slab.h>
12#include <linux/hwmon.h>
13#include <linux/stat.h>
14
15#include "net_driver.h"
16#include "mcdi.h"
17#include "mcdi_pcol.h"
18#include "nic.h"
19
20enum efx_hwmon_type {
21 EFX_HWMON_UNKNOWN,
22 EFX_HWMON_TEMP, /* temperature */
23 EFX_HWMON_COOL, /* cooling device, probably a heatsink */
24 EFX_HWMON_IN /* input voltage */
25};
26
27static const struct {
28 const char *label;
29 enum efx_hwmon_type hwmon_type;
30 int port;
31} efx_mcdi_sensor_type[MC_CMD_SENSOR_ENTRY_MAXNUM] = {
32#define SENSOR(name, label, hwmon_type, port) \
33 [MC_CMD_SENSOR_##name] = { label, hwmon_type, port }
34 SENSOR(CONTROLLER_TEMP, "Controller temp.", EFX_HWMON_TEMP, -1),
35 SENSOR(PHY_COMMON_TEMP, "PHY temp.", EFX_HWMON_TEMP, -1),
36 SENSOR(CONTROLLER_COOLING, "Controller cooling", EFX_HWMON_COOL, -1),
37 SENSOR(PHY0_TEMP, "PHY temp.", EFX_HWMON_TEMP, 0),
38 SENSOR(PHY0_COOLING, "PHY cooling", EFX_HWMON_COOL, 0),
39 SENSOR(PHY1_TEMP, "PHY temp.", EFX_HWMON_TEMP, 1),
40 SENSOR(PHY1_COOLING, "PHY cooling", EFX_HWMON_COOL, 1),
41 SENSOR(IN_1V0, "1.0V supply", EFX_HWMON_IN, -1),
42 SENSOR(IN_1V2, "1.2V supply", EFX_HWMON_IN, -1),
43 SENSOR(IN_1V8, "1.8V supply", EFX_HWMON_IN, -1),
44 SENSOR(IN_2V5, "2.5V supply", EFX_HWMON_IN, -1),
45 SENSOR(IN_3V3, "3.3V supply", EFX_HWMON_IN, -1),
46 SENSOR(IN_12V0, "12.0V supply", EFX_HWMON_IN, -1),
47 SENSOR(IN_1V2A, "1.2V analogue supply", EFX_HWMON_IN, -1),
48 SENSOR(IN_VREF, "ref. voltage", EFX_HWMON_IN, -1),
49#undef SENSOR
50};
51
52static const char *const sensor_status_names[] = {
53 [MC_CMD_SENSOR_STATE_OK] = "OK",
54 [MC_CMD_SENSOR_STATE_WARNING] = "Warning",
55 [MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
56 [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
57};
58
59void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
60{
61 unsigned int type, state, value;
62 const char *name = NULL, *state_txt;
63
64 type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
65 state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
66 value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
67
68 /* Deal gracefully with the board having more drivers than we
69 * know about, but do not expect new sensor states. */
70 if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
71 name = efx_mcdi_sensor_type[type].label;
72 if (!name)
73 name = "No sensor name available";
74 EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
75 state_txt = sensor_status_names[state];
76
77 netif_err(efx, hw, efx->net_dev,
78 "Sensor %d (%s) reports condition '%s' for raw value %d\n",
79 type, name, state_txt, value);
80}
81
82#ifdef CONFIG_SFC_MCDI_MON
83
84struct efx_mcdi_mon_attribute {
85 struct device_attribute dev_attr;
86 unsigned int index;
87 unsigned int type;
88 unsigned int limit_value;
89 char name[12];
90};
91
92static int efx_mcdi_mon_update(struct efx_nic *efx)
93{
94 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
95 u8 inbuf[MC_CMD_READ_SENSORS_IN_LEN];
96 int rc;
97
98 MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_LO,
99 hwmon->dma_buf.dma_addr & 0xffffffff);
100 MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_HI,
101 (u64)hwmon->dma_buf.dma_addr >> 32);
102
103 rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS,
104 inbuf, sizeof(inbuf), NULL, 0, NULL);
105 if (rc == 0)
106 hwmon->last_update = jiffies;
107 return rc;
108}
109
110static ssize_t efx_mcdi_mon_show_name(struct device *dev,
111 struct device_attribute *attr,
112 char *buf)
113{
114 return sprintf(buf, "%s\n", KBUILD_MODNAME);
115}
116
117static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index,
118 efx_dword_t *entry)
119{
120 struct efx_nic *efx = dev_get_drvdata(dev);
121 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
122 int rc;
123
124 BUILD_BUG_ON(MC_CMD_READ_SENSORS_OUT_LEN != 0);
125
126 mutex_lock(&hwmon->update_lock);
127
128 /* Use cached value if last update was < 1 s ago */
129 if (time_before(jiffies, hwmon->last_update + HZ))
130 rc = 0;
131 else
132 rc = efx_mcdi_mon_update(efx);
133
134 /* Copy out the requested entry */
135 *entry = ((efx_dword_t *)hwmon->dma_buf.addr)[index];
136
137 mutex_unlock(&hwmon->update_lock);
138
139 return rc;
140}
141
142static ssize_t efx_mcdi_mon_show_value(struct device *dev,
143 struct device_attribute *attr,
144 char *buf)
145{
146 struct efx_mcdi_mon_attribute *mon_attr =
147 container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
148 efx_dword_t entry;
149 unsigned int value;
150 int rc;
151
152 rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
153 if (rc)
154 return rc;
155
156 value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
157
158 /* Convert temperature from degrees to milli-degrees Celsius */
159 if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP)
160 value *= 1000;
161
162 return sprintf(buf, "%u\n", value);
163}
164
165static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
166 struct device_attribute *attr,
167 char *buf)
168{
169 struct efx_mcdi_mon_attribute *mon_attr =
170 container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
171 unsigned int value;
172
173 value = mon_attr->limit_value;
174
175 /* Convert temperature from degrees to milli-degrees Celsius */
176 if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP)
177 value *= 1000;
178
179 return sprintf(buf, "%u\n", value);
180}
181
182static ssize_t efx_mcdi_mon_show_alarm(struct device *dev,
183 struct device_attribute *attr,
184 char *buf)
185{
186 struct efx_mcdi_mon_attribute *mon_attr =
187 container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
188 efx_dword_t entry;
189 int state;
190 int rc;
191
192 rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
193 if (rc)
194 return rc;
195
196 state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
197 return sprintf(buf, "%d\n", state != MC_CMD_SENSOR_STATE_OK);
198}
199
200static ssize_t efx_mcdi_mon_show_label(struct device *dev,
201 struct device_attribute *attr,
202 char *buf)
203{
204 struct efx_mcdi_mon_attribute *mon_attr =
205 container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
206 return sprintf(buf, "%s\n",
207 efx_mcdi_sensor_type[mon_attr->type].label);
208}
209
210static int
211efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
212 ssize_t (*reader)(struct device *,
213 struct device_attribute *, char *),
214 unsigned int index, unsigned int type,
215 unsigned int limit_value)
216{
217 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
218 struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
219 int rc;
220
221 strlcpy(attr->name, name, sizeof(attr->name));
222 attr->index = index;
223 attr->type = type;
224 attr->limit_value = limit_value;
225 attr->dev_attr.attr.name = attr->name;
226 attr->dev_attr.attr.mode = S_IRUGO;
227 attr->dev_attr.show = reader;
228 rc = device_create_file(&efx->pci_dev->dev, &attr->dev_attr);
229 if (rc == 0)
230 ++hwmon->n_attrs;
231 return rc;
232}
233
234int efx_mcdi_mon_probe(struct efx_nic *efx)
235{
236 struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
237 unsigned int n_attrs, n_temp = 0, n_cool = 0, n_in = 0;
238 u8 outbuf[MC_CMD_SENSOR_INFO_OUT_LENMAX];
239 size_t outlen;
240 char name[12];
241 u32 mask;
242 int rc, i, type;
243
244 BUILD_BUG_ON(MC_CMD_SENSOR_INFO_IN_LEN != 0);
245
246 rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, NULL, 0,
247 outbuf, sizeof(outbuf), &outlen);
248 if (rc)
249 return rc;
250 if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
251 return -EIO;
252
253 /* Find out which sensors are present. Don't create a device
254 * if there are none.
255 */
256 mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
257 if (mask == 0)
258 return 0;
259
260 /* Check again for short response */
261 if (outlen < MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask)))
262 return -EIO;
263
264 rc = efx_nic_alloc_buffer(efx, &hwmon->dma_buf,
265 4 * MC_CMD_SENSOR_ENTRY_MAXNUM);
266 if (rc)
267 return rc;
268
269 mutex_init(&hwmon->update_lock);
270 efx_mcdi_mon_update(efx);
271
272 /* Allocate space for the maximum possible number of
273 * attributes for this set of sensors: name of the driver plus
274 * value, min, max, crit, alarm and label for each sensor.
275 */
276 n_attrs = 1 + 6 * hweight32(mask);
277 hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
278 if (!hwmon->attrs) {
279 rc = -ENOMEM;
280 goto fail;
281 }
282
283 hwmon->device = hwmon_device_register(&efx->pci_dev->dev);
284 if (IS_ERR(hwmon->device)) {
285 rc = PTR_ERR(hwmon->device);
286 goto fail;
287 }
288
289 rc = efx_mcdi_mon_add_attr(efx, "name", efx_mcdi_mon_show_name, 0, 0, 0);
290 if (rc)
291 goto fail;
292
293 for (i = 0, type = -1; ; i++) {
294 const char *hwmon_prefix;
295 unsigned hwmon_index;
296 u16 min1, max1, min2, max2;
297
298 /* Find next sensor type or exit if there is none */
299 type++;
300 while (!(mask & (1 << type))) {
301 type++;
302 if (type == 32)
303 return 0;
304 }
305
306 /* Skip sensors specific to a different port */
307 if (efx_mcdi_sensor_type[type].hwmon_type != EFX_HWMON_UNKNOWN &&
308 efx_mcdi_sensor_type[type].port >= 0 &&
309 efx_mcdi_sensor_type[type].port != efx_port_num(efx))
310 continue;
311
312 switch (efx_mcdi_sensor_type[type].hwmon_type) {
313 case EFX_HWMON_TEMP:
314 hwmon_prefix = "temp";
315 hwmon_index = ++n_temp; /* 1-based */
316 break;
317 case EFX_HWMON_COOL:
318 /* This is likely to be a heatsink, but there
319 * is no convention for representing cooling
320 * devices other than fans.
321 */
322 hwmon_prefix = "fan";
323 hwmon_index = ++n_cool; /* 1-based */
324 break;
325 default:
326 hwmon_prefix = "in";
327 hwmon_index = n_in++; /* 0-based */
328 break;
329 }
330
331 min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
332 SENSOR_INFO_ENTRY, i, MIN1);
333 max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
334 SENSOR_INFO_ENTRY, i, MAX1);
335 min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
336 SENSOR_INFO_ENTRY, i, MIN2);
337 max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
338 SENSOR_INFO_ENTRY, i, MAX2);
339
340 if (min1 != max1) {
341 snprintf(name, sizeof(name), "%s%u_input",
342 hwmon_prefix, hwmon_index);
343 rc = efx_mcdi_mon_add_attr(
344 efx, name, efx_mcdi_mon_show_value, i, type, 0);
345 if (rc)
346 goto fail;
347
348 snprintf(name, sizeof(name), "%s%u_min",
349 hwmon_prefix, hwmon_index);
350 rc = efx_mcdi_mon_add_attr(
351 efx, name, efx_mcdi_mon_show_limit,
352 i, type, min1);
353 if (rc)
354 goto fail;
355
356 snprintf(name, sizeof(name), "%s%u_max",
357 hwmon_prefix, hwmon_index);
358 rc = efx_mcdi_mon_add_attr(
359 efx, name, efx_mcdi_mon_show_limit,
360 i, type, max1);
361 if (rc)
362 goto fail;
363
364 if (min2 != max2) {
365 /* Assume max2 is critical value.
366 * But we have no good way to expose min2.
367 */
368 snprintf(name, sizeof(name), "%s%u_crit",
369 hwmon_prefix, hwmon_index);
370 rc = efx_mcdi_mon_add_attr(
371 efx, name, efx_mcdi_mon_show_limit,
372 i, type, max2);
373 if (rc)
374 goto fail;
375 }
376 }
377
378 snprintf(name, sizeof(name), "%s%u_alarm",
379 hwmon_prefix, hwmon_index);
380 rc = efx_mcdi_mon_add_attr(
381 efx, name, efx_mcdi_mon_show_alarm, i, type, 0);
382 if (rc)
383 goto fail;
384
385 if (efx_mcdi_sensor_type[type].label) {
386 snprintf(name, sizeof(name), "%s%u_label",
387 hwmon_prefix, hwmon_index);
388 rc = efx_mcdi_mon_add_attr(
389 efx, name, efx_mcdi_mon_show_label, i, type, 0);
390 if (rc)
391 goto fail;
392 }
393 }
394
395fail:
396 efx_mcdi_mon_remove(efx);
397 return rc;
398}
399
400void efx_mcdi_mon_remove(struct efx_nic *efx)
401{
402 struct siena_nic_data *nic_data = efx->nic_data;
403 struct efx_mcdi_mon *hwmon = &nic_data->hwmon;
404 unsigned int i;
405
406 for (i = 0; i < hwmon->n_attrs; i++)
407 device_remove_file(&efx->pci_dev->dev,
408 &hwmon->attrs[i].dev_attr);
409 kfree(hwmon->attrs);
410 if (hwmon->device)
411 hwmon_device_unregister(hwmon->device);
412 efx_nic_free_buffer(efx, &hwmon->dma_buf);
413}
414
415#endif /* CONFIG_SFC_MCDI_MON */
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 41fe06fa0600..0310b9f08c9b 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -22,6 +22,18 @@
22/* The Scheduler has started. */ 22/* The Scheduler has started. */
23#define MC_FW_STATE_SCHED (8) 23#define MC_FW_STATE_SCHED (8)
24 24
25/* Siena MC shared memmory offsets */
26/* The 'doorbell' addresses are hard-wired to alert the MC when written */
27#define MC_SMEM_P0_DOORBELL_OFST 0x000
28#define MC_SMEM_P1_DOORBELL_OFST 0x004
29/* The rest of these are firmware-defined */
30#define MC_SMEM_P0_PDU_OFST 0x008
31#define MC_SMEM_P1_PDU_OFST 0x108
32#define MC_SMEM_PDU_LEN 0x100
33#define MC_SMEM_P0_PTP_TIME_OFST 0x7f0
34#define MC_SMEM_P0_STATUS_OFST 0x7f8
35#define MC_SMEM_P1_STATUS_OFST 0x7fc
36
25/* Values to be written to the per-port status dword in shared 37/* Values to be written to the per-port status dword in shared
26 * memory on reboot and assert */ 38 * memory on reboot and assert */
27#define MC_STATUS_DWORD_REBOOT (0xb007b007) 39#define MC_STATUS_DWORD_REBOOT (0xb007b007)
@@ -34,6 +46,8 @@
34 */ 46 */
35#define MCDI_PCOL_VERSION 1 47#define MCDI_PCOL_VERSION 1
36 48
49/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
50
37/** 51/**
38 * MCDI version 1 52 * MCDI version 1
39 * 53 *
@@ -131,53 +145,6 @@
131 */ 145 */
132#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc 146#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
133 147
134#define MCDI_EVENT_DATA_LBN 0
135#define MCDI_EVENT_DATA_WIDTH 32
136#define MCDI_EVENT_CONT_LBN 32
137#define MCDI_EVENT_CONT_WIDTH 1
138#define MCDI_EVENT_LEVEL_LBN 33
139#define MCDI_EVENT_LEVEL_WIDTH 3
140#define MCDI_EVENT_LEVEL_INFO (0)
141#define MCDI_EVENT_LEVEL_WARN (1)
142#define MCDI_EVENT_LEVEL_ERR (2)
143#define MCDI_EVENT_LEVEL_FATAL (3)
144#define MCDI_EVENT_SRC_LBN 36
145#define MCDI_EVENT_SRC_WIDTH 8
146#define MCDI_EVENT_CODE_LBN 44
147#define MCDI_EVENT_CODE_WIDTH 8
148#define MCDI_EVENT_CODE_BADSSERT (1)
149#define MCDI_EVENT_CODE_PMNOTICE (2)
150#define MCDI_EVENT_CODE_CMDDONE (3)
151#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
152#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
153#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
154#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
155#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16
156#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
157#define MCDI_EVENT_CODE_LINKCHANGE (4)
158#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
159#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
160#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
161#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
162#define MCDI_EVENT_LINKCHANGE_SPEED_100M 1
163#define MCDI_EVENT_LINKCHANGE_SPEED_1G 2
164#define MCDI_EVENT_LINKCHANGE_SPEED_10G 3
165#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
166#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
167#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
168#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
169#define MCDI_EVENT_CODE_SENSOREVT (5)
170#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
171#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
172#define MCDI_EVENT_SENSOREVT_STATE_LBN 8
173#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
174#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16
175#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
176#define MCDI_EVENT_CODE_SCHEDERR (6)
177#define MCDI_EVENT_CODE_REBOOT (7)
178#define MCDI_EVENT_CODE_MAC_STATS_DMA (8)
179#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
180#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
181 148
182/* Non-existent command target */ 149/* Non-existent command target */
183#define MC_CMD_ERR_ENOENT 2 150#define MC_CMD_ERR_ENOENT 2
@@ -198,121 +165,24 @@
198 165
199#define MC_CMD_ERR_CODE_OFST 0 166#define MC_CMD_ERR_CODE_OFST 0
200 167
168/* We define 8 "escape" commands to allow
169 for command number space extension */
170
171#define MC_CMD_CMD_SPACE_ESCAPE_0 0x78
172#define MC_CMD_CMD_SPACE_ESCAPE_1 0x79
173#define MC_CMD_CMD_SPACE_ESCAPE_2 0x7A
174#define MC_CMD_CMD_SPACE_ESCAPE_3 0x7B
175#define MC_CMD_CMD_SPACE_ESCAPE_4 0x7C
176#define MC_CMD_CMD_SPACE_ESCAPE_5 0x7D
177#define MC_CMD_CMD_SPACE_ESCAPE_6 0x7E
178#define MC_CMD_CMD_SPACE_ESCAPE_7 0x7F
179
180/* Vectors in the boot ROM */
181/* Point to the copycode entry point. */
182#define MC_BOOTROM_COPYCODE_VEC (0x7f4)
183/* Points to the recovery mode entry point. */
184#define MC_BOOTROM_NOFLASH_VEC (0x7f8)
201 185
202/* MC_CMD_READ32: (debug, variadic out)
203 * Read multiple 32byte words from MC memory
204 */
205#define MC_CMD_READ32 0x01
206#define MC_CMD_READ32_IN_LEN 8
207#define MC_CMD_READ32_IN_ADDR_OFST 0
208#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
209#define MC_CMD_READ32_OUT_LEN(_numwords) \
210 (4 * (_numwords))
211#define MC_CMD_READ32_OUT_BUFFER_OFST 0
212
213/* MC_CMD_WRITE32: (debug, variadic in)
214 * Write multiple 32byte words to MC memory
215 */
216#define MC_CMD_WRITE32 0x02
217#define MC_CMD_WRITE32_IN_LEN(_numwords) (((_numwords) * 4) + 4)
218#define MC_CMD_WRITE32_IN_ADDR_OFST 0
219#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
220#define MC_CMD_WRITE32_OUT_LEN 0
221
222/* MC_CMD_COPYCODE: (debug)
223 * Copy MC code between two locations and jump
224 */
225#define MC_CMD_COPYCODE 0x03
226#define MC_CMD_COPYCODE_IN_LEN 16
227#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
228#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
229#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
230#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
231/* Control should return to the caller rather than jumping */
232#define MC_CMD_COPYCODE_JUMP_NONE 1
233#define MC_CMD_COPYCODE_OUT_LEN 0
234
235/* MC_CMD_SET_FUNC: (debug)
236 * Select function for function-specific commands.
237 */
238#define MC_CMD_SET_FUNC 0x04
239#define MC_CMD_SET_FUNC_IN_LEN 4
240#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
241#define MC_CMD_SET_FUNC_OUT_LEN 0
242
243/* MC_CMD_GET_BOOT_STATUS:
244 * Get the instruction address from which the MC booted.
245 */
246#define MC_CMD_GET_BOOT_STATUS 0x05
247#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
248#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
249#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
250#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
251/* Reboot caused by watchdog */
252#define MC_CMD_GET_BOOT_STATUS_FLAGS_WATCHDOG_LBN (0)
253#define MC_CMD_GET_BOOT_STATUS_FLAGS_WATCHDOG_WIDTH (1)
254/* MC booted from primary flash partition */
255#define MC_CMD_GET_BOOT_STATUS_FLAGS_PRIMARY_LBN (1)
256#define MC_CMD_GET_BOOT_STATUS_FLAGS_PRIMARY_WIDTH (1)
257/* MC booted from backup flash partition */
258#define MC_CMD_GET_BOOT_STATUS_FLAGS_BACKUP_LBN (2)
259#define MC_CMD_GET_BOOT_STATUS_FLAGS_BACKUP_WIDTH (1)
260
261/* MC_CMD_GET_ASSERTS: (debug, variadic out)
262 * Get (and optionally clear) the current assertion status.
263 *
264 * Only OUT.GLOBAL_FLAGS is guaranteed to exist in the completion
265 * payload. The other fields will only be present if
266 * OUT.GLOBAL_FLAGS != NO_FAILS
267 */
268#define MC_CMD_GET_ASSERTS 0x06
269#define MC_CMD_GET_ASSERTS_IN_LEN 4
270#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
271#define MC_CMD_GET_ASSERTS_OUT_LEN 140
272/* Assertion status flag */
273#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
274/*! No assertions have failed. */
275#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 1
276/*! A system-level assertion has failed. */
277#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 2
278/*! A thread-level assertion has failed. */
279#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 3
280/*! The system was reset by the watchdog. */
281#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 4
282/* Failing PC value */
283#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
284/* Saved GP regs */
285#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
286#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_LEN 124
287/* Failing thread address */
288#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
289
290/* MC_CMD_LOG_CTRL:
291 * Determine the output stream for various events and messages
292 */
293#define MC_CMD_LOG_CTRL 0x07
294#define MC_CMD_LOG_CTRL_IN_LEN 8
295#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
296#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART (1)
297#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ (2)
298#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
299#define MC_CMD_LOG_CTRL_OUT_LEN 0
300
301/* MC_CMD_GET_VERSION:
302 * Get version information about the MC firmware
303 */
304#define MC_CMD_GET_VERSION 0x08
305#define MC_CMD_GET_VERSION_IN_LEN 0
306#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
307#define MC_CMD_GET_VERSION_V1_OUT_LEN 32
308#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
309/* Reserved version number to indicate "any" version. */
310#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
311/* The version response of a boot ROM awaiting rescue */
312#define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000
313#define MC_CMD_GET_VERSION_V1_OUT_PCOL_OFST 4
314/* 128bit mask of functions supported by the current firmware */
315#define MC_CMD_GET_VERSION_V1_OUT_SUPPORTED_FUNCS_OFST 8
316/* The command set exported by the boot ROM (MCDI v0) */ 186/* The command set exported by the boot ROM (MCDI v0) */
317#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ 187#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
318 (1 << MC_CMD_READ32) | \ 188 (1 << MC_CMD_READ32) | \
@@ -320,1456 +190,2214 @@
320 (1 << MC_CMD_COPYCODE) | \ 190 (1 << MC_CMD_COPYCODE) | \
321 (1 << MC_CMD_GET_VERSION), \ 191 (1 << MC_CMD_GET_VERSION), \
322 0, 0, 0 } 192 0, 0, 0 }
323#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
324 193
325/* Vectors in the boot ROM */ 194#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
326/* Point to the copycode entry point. */ 195 (MC_CMD_SENSOR_ENTRY_OFST + (_x))
327#define MC_BOOTROM_COPYCODE_VEC (0x7f4) 196
328/* Points to the recovery mode entry point. */ 197#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) \
329#define MC_BOOTROM_NOFLASH_VEC (0x7f8) 198 (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
199 MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST + \
200 (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
201
202#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) \
203 (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
204 MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST + \
205 (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
206
207#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) \
208 (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
209 MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \
210 (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
211
212
213/* MCDI_EVENT structuredef */
214#define MCDI_EVENT_LEN 8
215#define MCDI_EVENT_CONT_LBN 32
216#define MCDI_EVENT_CONT_WIDTH 1
217#define MCDI_EVENT_LEVEL_LBN 33
218#define MCDI_EVENT_LEVEL_WIDTH 3
219#define MCDI_EVENT_LEVEL_INFO 0x0 /* enum */
220#define MCDI_EVENT_LEVEL_WARN 0x1 /* enum */
221#define MCDI_EVENT_LEVEL_ERR 0x2 /* enum */
222#define MCDI_EVENT_LEVEL_FATAL 0x3 /* enum */
223#define MCDI_EVENT_DATA_OFST 0
224#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
225#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
226#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
227#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
228#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16
229#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
230#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
231#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
232#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
233#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
234#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 /* enum */
235#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 /* enum */
236#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 /* enum */
237#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
238#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
239#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
240#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
241#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
242#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
243#define MCDI_EVENT_SENSOREVT_STATE_LBN 8
244#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
245#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16
246#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
247#define MCDI_EVENT_FWALERT_DATA_LBN 8
248#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
249#define MCDI_EVENT_FWALERT_REASON_LBN 0
250#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
251#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 /* enum */
252#define MCDI_EVENT_FLR_VF_LBN 0
253#define MCDI_EVENT_FLR_VF_WIDTH 8
254#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
255#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
256#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
257#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
258#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 /* enum */
259#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 /* enum */
260#define MCDI_EVENT_TX_ERR_2BIG 0x3 /* enum */
261#define MCDI_EVENT_TX_ERR_INFO_LBN 16
262#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
263#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
264#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
265#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0
266#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
267#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1 /* enum */
268#define MCDI_EVENT_PTP_ERR_FILTER 0x2 /* enum */
269#define MCDI_EVENT_PTP_ERR_FIFO 0x3 /* enum */
270#define MCDI_EVENT_PTP_ERR_QUEUE 0x4 /* enum */
271#define MCDI_EVENT_DATA_LBN 0
272#define MCDI_EVENT_DATA_WIDTH 32
273#define MCDI_EVENT_SRC_LBN 36
274#define MCDI_EVENT_SRC_WIDTH 8
275#define MCDI_EVENT_EV_CODE_LBN 60
276#define MCDI_EVENT_EV_CODE_WIDTH 4
277#define MCDI_EVENT_CODE_LBN 44
278#define MCDI_EVENT_CODE_WIDTH 8
279#define MCDI_EVENT_CODE_BADSSERT 0x1 /* enum */
280#define MCDI_EVENT_CODE_PMNOTICE 0x2 /* enum */
281#define MCDI_EVENT_CODE_CMDDONE 0x3 /* enum */
282#define MCDI_EVENT_CODE_LINKCHANGE 0x4 /* enum */
283#define MCDI_EVENT_CODE_SENSOREVT 0x5 /* enum */
284#define MCDI_EVENT_CODE_SCHEDERR 0x6 /* enum */
285#define MCDI_EVENT_CODE_REBOOT 0x7 /* enum */
286#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 /* enum */
287#define MCDI_EVENT_CODE_FWALERT 0x9 /* enum */
288#define MCDI_EVENT_CODE_FLR 0xa /* enum */
289#define MCDI_EVENT_CODE_TX_ERR 0xb /* enum */
290#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum */
291#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum */
292#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum */
293#define MCDI_EVENT_CMDDONE_DATA_OFST 0
294#define MCDI_EVENT_CMDDONE_DATA_LBN 0
295#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
296#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0
297#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
298#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
299#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
300#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
301#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
302#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
303#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
304#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
305#define MCDI_EVENT_TX_ERR_DATA_OFST 0
306#define MCDI_EVENT_TX_ERR_DATA_LBN 0
307#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
308#define MCDI_EVENT_PTP_SECONDS_OFST 0
309#define MCDI_EVENT_PTP_SECONDS_LBN 0
310#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
311#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
312#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
313#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
314#define MCDI_EVENT_PTP_UUID_OFST 0
315#define MCDI_EVENT_PTP_UUID_LBN 0
316#define MCDI_EVENT_PTP_UUID_WIDTH 32
317
318
319/***********************************/
320/* MC_CMD_READ32
321 * Read multiple 32byte words from MC memory.
322 */
323#define MC_CMD_READ32 0x1
324
325/* MC_CMD_READ32_IN msgrequest */
326#define MC_CMD_READ32_IN_LEN 8
327#define MC_CMD_READ32_IN_ADDR_OFST 0
328#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
329
330/* MC_CMD_READ32_OUT msgresponse */
331#define MC_CMD_READ32_OUT_LENMIN 4
332#define MC_CMD_READ32_OUT_LENMAX 252
333#define MC_CMD_READ32_OUT_LEN(num) (0+4*(num))
334#define MC_CMD_READ32_OUT_BUFFER_OFST 0
335#define MC_CMD_READ32_OUT_BUFFER_LEN 4
336#define MC_CMD_READ32_OUT_BUFFER_MINNUM 1
337#define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63
338
339
340/***********************************/
341/* MC_CMD_WRITE32
342 * Write multiple 32byte words to MC memory.
343 */
344#define MC_CMD_WRITE32 0x2
345
346/* MC_CMD_WRITE32_IN msgrequest */
347#define MC_CMD_WRITE32_IN_LENMIN 8
348#define MC_CMD_WRITE32_IN_LENMAX 252
349#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
350#define MC_CMD_WRITE32_IN_ADDR_OFST 0
351#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
352#define MC_CMD_WRITE32_IN_BUFFER_LEN 4
353#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
354#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62
355
356/* MC_CMD_WRITE32_OUT msgresponse */
357#define MC_CMD_WRITE32_OUT_LEN 0
358
359
360/***********************************/
361/* MC_CMD_COPYCODE
362 * Copy MC code between two locations and jump.
363 */
364#define MC_CMD_COPYCODE 0x3
365
366/* MC_CMD_COPYCODE_IN msgrequest */
367#define MC_CMD_COPYCODE_IN_LEN 16
368#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
369#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
370#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
371#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
372#define MC_CMD_COPYCODE_JUMP_NONE 0x1 /* enum */
373
374/* MC_CMD_COPYCODE_OUT msgresponse */
375#define MC_CMD_COPYCODE_OUT_LEN 0
376
377
378/***********************************/
379/* MC_CMD_SET_FUNC
380 */
381#define MC_CMD_SET_FUNC 0x4
382
383/* MC_CMD_SET_FUNC_IN msgrequest */
384#define MC_CMD_SET_FUNC_IN_LEN 4
385#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
386
387/* MC_CMD_SET_FUNC_OUT msgresponse */
388#define MC_CMD_SET_FUNC_OUT_LEN 0
389
390
391/***********************************/
392/* MC_CMD_GET_BOOT_STATUS
393 */
394#define MC_CMD_GET_BOOT_STATUS 0x5
395
396/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
397#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
398
399/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
400#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
401#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
402#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
403#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
404#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
405#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
406#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1
407#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2
408#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1
330 409
331/* Test execution limits */
332#define MC_TESTEXEC_VARIANT_COUNT 16
333#define MC_TESTEXEC_RESULT_COUNT 7
334 410
335/* MC_CMD_SET_TESTVARS: (debug, variadic in) 411/***********************************/
336 * Write variant words for test. 412/* MC_CMD_GET_ASSERTS
337 * 413 * Get and clear any assertion status.
338 * The user supplies a bitmap of the variants they wish to set.
339 * They must ensure that IN.LEN >= 4 + 4 * ffs(BITMAP)
340 */
341#define MC_CMD_SET_TESTVARS 0x09
342#define MC_CMD_SET_TESTVARS_IN_LEN(_numwords) \
343 (4 + 4*(_numwords))
344#define MC_CMD_SET_TESTVARS_IN_ARGS_BITMAP_OFST 0
345/* Up to MC_TESTEXEC_VARIANT_COUNT of 32byte words start here */
346#define MC_CMD_SET_TESTVARS_IN_ARGS_BUFFER_OFST 4
347#define MC_CMD_SET_TESTVARS_OUT_LEN 0
348
349/* MC_CMD_GET_TESTRCS: (debug, variadic out)
350 * Return result words from test.
351 */
352#define MC_CMD_GET_TESTRCS 0x0a
353#define MC_CMD_GET_TESTRCS_IN_LEN 4
354#define MC_CMD_GET_TESTRCS_IN_NUMWORDS_OFST 0
355#define MC_CMD_GET_TESTRCS_OUT_LEN(_numwords) \
356 (4 * (_numwords))
357#define MC_CMD_GET_TESTRCS_OUT_BUFFER_OFST 0
358
359/* MC_CMD_RUN_TEST: (debug)
360 * Run the test exported by this firmware image
361 */
362#define MC_CMD_RUN_TEST 0x0b
363#define MC_CMD_RUN_TEST_IN_LEN 0
364#define MC_CMD_RUN_TEST_OUT_LEN 0
365
366/* MC_CMD_CSR_READ32: (debug, variadic out)
367 * Read 32bit words from the indirect memory map
368 */
369#define MC_CMD_CSR_READ32 0x0c
370#define MC_CMD_CSR_READ32_IN_LEN 12
371#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
372#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
373#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
374#define MC_CMD_CSR_READ32_OUT_LEN(_numwords) \
375 (((_numwords) * 4) + 4)
376/* IN.NUMWORDS of 32bit words start here */
377#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
378#define MC_CMD_CSR_READ32_OUT_IREG_STATUS_OFST(_numwords) \
379 ((_numwords) * 4)
380
381/* MC_CMD_CSR_WRITE32: (debug, variadic in)
382 * Write 32bit dwords to the indirect memory map
383 */
384#define MC_CMD_CSR_WRITE32 0x0d
385#define MC_CMD_CSR_WRITE32_IN_LEN(_numwords) \
386 (((_numwords) * 4) + 8)
387#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
388#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
389/* Multiple 32bit words of data to write start here */
390#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
391#define MC_CMD_CSR_WRITE32_OUT_LEN 4
392#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
393
394/* MC_CMD_JTAG_WORK: (debug, fpga only)
395 * Process JTAG work buffer for RBF acceleration.
396 *
397 * Host: bit count, (up to) 32 words of data to clock out to JTAG
398 * (bits 1,0=TMS,TDO for first bit; bits 3,2=TMS,TDO for second bit, etc.)
399 * MC: bit count, (up to) 32 words of data clocked in from JTAG
400 * (bit 0=TDI for first bit, bit 1=TDI for second bit, etc.; [31:16] unused)
401 */ 414 */
402#define MC_CMD_JTAG_WORK 0x0e 415#define MC_CMD_GET_ASSERTS 0x6
403 416
404/* MC_CMD_STACKINFO: (debug, variadic out) 417/* MC_CMD_GET_ASSERTS_IN msgrequest */
405 * Get stack information 418#define MC_CMD_GET_ASSERTS_IN_LEN 4
406 * 419#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
407 * Host: nothing 420
408 * MC: (thread ptr, stack size, free space) for each thread in system 421/* MC_CMD_GET_ASSERTS_OUT msgresponse */
409 */ 422#define MC_CMD_GET_ASSERTS_OUT_LEN 140
410#define MC_CMD_STACKINFO 0x0f 423#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
424#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 /* enum */
425#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 /* enum */
426#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 /* enum */
427#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 /* enum */
428#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
429#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
430#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
431#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
432#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
433#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
411 434
412/* MC_CMD_MDIO_READ: 435
413 * MDIO register read 436/***********************************/
437/* MC_CMD_LOG_CTRL
438 * Configure the output stream for various events and messages.
439 */
440#define MC_CMD_LOG_CTRL 0x7
441
442/* MC_CMD_LOG_CTRL_IN msgrequest */
443#define MC_CMD_LOG_CTRL_IN_LEN 8
444#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
445#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum */
446#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 /* enum */
447#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
448
449/* MC_CMD_LOG_CTRL_OUT msgresponse */
450#define MC_CMD_LOG_CTRL_OUT_LEN 0
451
452
453/***********************************/
454/* MC_CMD_GET_VERSION
455 * Get version information about the MC firmware.
456 */
457#define MC_CMD_GET_VERSION 0x8
458
459/* MC_CMD_GET_VERSION_IN msgrequest */
460#define MC_CMD_GET_VERSION_IN_LEN 0
461
462/* MC_CMD_GET_VERSION_V0_OUT msgresponse */
463#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
464#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
465#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff /* enum */
466#define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000 /* enum */
467
468/* MC_CMD_GET_VERSION_OUT msgresponse */
469#define MC_CMD_GET_VERSION_OUT_LEN 32
470/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
471/* Enum values, see field(s): */
472/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
473#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
474#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
475#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
476#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
477#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8
478#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
479#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
480
481
482/***********************************/
483/* MC_CMD_GET_FPGAREG
484 * Read multiple bytes from PTP FPGA.
485 */
486#define MC_CMD_GET_FPGAREG 0x9
487
488/* MC_CMD_GET_FPGAREG_IN msgrequest */
489#define MC_CMD_GET_FPGAREG_IN_LEN 8
490#define MC_CMD_GET_FPGAREG_IN_ADDR_OFST 0
491#define MC_CMD_GET_FPGAREG_IN_NUMBYTES_OFST 4
492
493/* MC_CMD_GET_FPGAREG_OUT msgresponse */
494#define MC_CMD_GET_FPGAREG_OUT_LENMIN 1
495#define MC_CMD_GET_FPGAREG_OUT_LENMAX 255
496#define MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num))
497#define MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0
498#define MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1
499#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1
500#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 255
501
502
503/***********************************/
504/* MC_CMD_PUT_FPGAREG
505 * Write multiple bytes to PTP FPGA.
506 */
507#define MC_CMD_PUT_FPGAREG 0xa
508
509/* MC_CMD_PUT_FPGAREG_IN msgrequest */
510#define MC_CMD_PUT_FPGAREG_IN_LENMIN 5
511#define MC_CMD_PUT_FPGAREG_IN_LENMAX 255
512#define MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num))
513#define MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0
514#define MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4
515#define MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1
516#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1
517#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 251
518
519/* MC_CMD_PUT_FPGAREG_OUT msgresponse */
520#define MC_CMD_PUT_FPGAREG_OUT_LEN 0
521
522
523/***********************************/
524/* MC_CMD_PTP
525 * Perform PTP operation
526 */
527#define MC_CMD_PTP 0xb
528
529/* MC_CMD_PTP_IN msgrequest */
530#define MC_CMD_PTP_IN_LEN 1
531#define MC_CMD_PTP_IN_OP_OFST 0
532#define MC_CMD_PTP_IN_OP_LEN 1
533#define MC_CMD_PTP_OP_ENABLE 0x1 /* enum */
534#define MC_CMD_PTP_OP_DISABLE 0x2 /* enum */
535#define MC_CMD_PTP_OP_TRANSMIT 0x3 /* enum */
536#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 /* enum */
537#define MC_CMD_PTP_OP_STATUS 0x5 /* enum */
538#define MC_CMD_PTP_OP_ADJUST 0x6 /* enum */
539#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 /* enum */
540#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 /* enum */
541#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 /* enum */
542#define MC_CMD_PTP_OP_RESET_STATS 0xa /* enum */
543#define MC_CMD_PTP_OP_DEBUG 0xb /* enum */
544#define MC_CMD_PTP_OP_MAX 0xc /* enum */
545
546/* MC_CMD_PTP_IN_ENABLE msgrequest */
547#define MC_CMD_PTP_IN_ENABLE_LEN 16
548#define MC_CMD_PTP_IN_CMD_OFST 0
549#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
550#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
551#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
552#define MC_CMD_PTP_MODE_V1 0x0 /* enum */
553#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */
554#define MC_CMD_PTP_MODE_V2 0x2 /* enum */
555#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */
556
557/* MC_CMD_PTP_IN_DISABLE msgrequest */
558#define MC_CMD_PTP_IN_DISABLE_LEN 8
559/* MC_CMD_PTP_IN_CMD_OFST 0 */
560/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
561
562/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
563#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
564#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 255
565#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
566/* MC_CMD_PTP_IN_CMD_OFST 0 */
567/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
568#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
569#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
570#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
571#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
572#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 243
573
574/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
575#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
576/* MC_CMD_PTP_IN_CMD_OFST 0 */
577/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
578
579/* MC_CMD_PTP_IN_STATUS msgrequest */
580#define MC_CMD_PTP_IN_STATUS_LEN 8
581/* MC_CMD_PTP_IN_CMD_OFST 0 */
582/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
583
584/* MC_CMD_PTP_IN_ADJUST msgrequest */
585#define MC_CMD_PTP_IN_ADJUST_LEN 24
586/* MC_CMD_PTP_IN_CMD_OFST 0 */
587/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
588#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
589#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
590#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
591#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
592#define MC_CMD_PTP_IN_ADJUST_BITS 0x28 /* enum */
593#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
594#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
595
596/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
597#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
598/* MC_CMD_PTP_IN_CMD_OFST 0 */
599/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
600#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
601#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
602#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
603#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
604#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16
605
606/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
607#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
608/* MC_CMD_PTP_IN_CMD_OFST 0 */
609/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
610
611/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
612#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
613/* MC_CMD_PTP_IN_CMD_OFST 0 */
614/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
615#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
616
617/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
618#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
619/* MC_CMD_PTP_IN_CMD_OFST 0 */
620/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
621
622/* MC_CMD_PTP_IN_DEBUG msgrequest */
623#define MC_CMD_PTP_IN_DEBUG_LEN 12
624/* MC_CMD_PTP_IN_CMD_OFST 0 */
625/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
626#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
627
628/* MC_CMD_PTP_OUT msgresponse */
629#define MC_CMD_PTP_OUT_LEN 0
630
631/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
632#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
633#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
634#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
635
636/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
637#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
638#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
639#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
640
641/* MC_CMD_PTP_OUT_STATUS msgresponse */
642#define MC_CMD_PTP_OUT_STATUS_LEN 64
643#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
644#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
645#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
646#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
647#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
648#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
649#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
650#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
651#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
652#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
653#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
654#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
655#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
656#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
657#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
658#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
659
660/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
661#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
662#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
663#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
664#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
665#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
666#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
667#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
668#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
669#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
670#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
671#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
672#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
673
674/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
675#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
676#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
677#define MC_CMD_PTP_MANF_SUCCESS 0x0 /* enum */
678#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1 /* enum */
679#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2 /* enum */
680#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3 /* enum */
681#define MC_CMD_PTP_MANF_OSCILLATOR 0x4 /* enum */
682#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5 /* enum */
683#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6 /* enum */
684#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7 /* enum */
685#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8 /* enum */
686#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9 /* enum */
687#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
688
689/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
690#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
691#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
692#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
693#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
694
695
696/***********************************/
697/* MC_CMD_CSR_READ32
698 * Read 32bit words from the indirect memory map.
699 */
700#define MC_CMD_CSR_READ32 0xc
701
702/* MC_CMD_CSR_READ32_IN msgrequest */
703#define MC_CMD_CSR_READ32_IN_LEN 12
704#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
705#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
706#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
707
708/* MC_CMD_CSR_READ32_OUT msgresponse */
709#define MC_CMD_CSR_READ32_OUT_LENMIN 4
710#define MC_CMD_CSR_READ32_OUT_LENMAX 252
711#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
712#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
713#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
714#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
715#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
716
717
718/***********************************/
719/* MC_CMD_CSR_WRITE32
720 * Write 32bit dwords to the indirect memory map.
721 */
722#define MC_CMD_CSR_WRITE32 0xd
723
724/* MC_CMD_CSR_WRITE32_IN msgrequest */
725#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
726#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
727#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
728#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
729#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
730#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
731#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
732#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
733#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
734
735/* MC_CMD_CSR_WRITE32_OUT msgresponse */
736#define MC_CMD_CSR_WRITE32_OUT_LEN 4
737#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
738
739
740/***********************************/
741/* MC_CMD_STACKINFO
742 * Get stack information.
743 */
744#define MC_CMD_STACKINFO 0xf
745
746/* MC_CMD_STACKINFO_IN msgrequest */
747#define MC_CMD_STACKINFO_IN_LEN 0
748
749/* MC_CMD_STACKINFO_OUT msgresponse */
750#define MC_CMD_STACKINFO_OUT_LENMIN 12
751#define MC_CMD_STACKINFO_OUT_LENMAX 252
752#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
753#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
754#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
755#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
756#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
757
758
759/***********************************/
760/* MC_CMD_MDIO_READ
761 * MDIO register read.
414 */ 762 */
415#define MC_CMD_MDIO_READ 0x10 763#define MC_CMD_MDIO_READ 0x10
416#define MC_CMD_MDIO_READ_IN_LEN 16
417#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
418#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
419#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
420#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
421#define MC_CMD_MDIO_READ_OUT_LEN 8
422#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
423#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
424
425/* MC_CMD_MDIO_WRITE:
426 * MDIO register write
427 */
428#define MC_CMD_MDIO_WRITE 0x11
429#define MC_CMD_MDIO_WRITE_IN_LEN 20
430#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
431#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
432#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
433#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
434#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
435#define MC_CMD_MDIO_WRITE_OUT_LEN 4
436#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
437 764
438/* By default all the MCDI MDIO operations perform clause45 mode. 765/* MC_CMD_MDIO_READ_IN msgrequest */
439 * If you want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. 766#define MC_CMD_MDIO_READ_IN_LEN 16
440 */ 767#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
441#define MC_CMD_MDIO_CLAUSE22 32 768#define MC_CMD_MDIO_BUS_INTERNAL 0x0 /* enum */
769#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 /* enum */
770#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
771#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
772#define MC_CMD_MDIO_CLAUSE22 0x20 /* enum */
773#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
442 774
443/* There are two MDIO buses: one for the internal PHY, and one for external 775/* MC_CMD_MDIO_READ_OUT msgresponse */
444 * devices. 776#define MC_CMD_MDIO_READ_OUT_LEN 8
445 */ 777#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
446#define MC_CMD_MDIO_BUS_INTERNAL 0 778#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
447#define MC_CMD_MDIO_BUS_EXTERNAL 1 779#define MC_CMD_MDIO_STATUS_GOOD 0x8 /* enum */
448 780
449/* The MDIO commands return the raw status bits from the MDIO block. A "good" 781
450 * transaction should have the DONE bit set and all other bits clear. 782/***********************************/
783/* MC_CMD_MDIO_WRITE
784 * MDIO register write.
451 */ 785 */
452#define MC_CMD_MDIO_STATUS_GOOD 0x08 786#define MC_CMD_MDIO_WRITE 0x11
453 787
788/* MC_CMD_MDIO_WRITE_IN msgrequest */
789#define MC_CMD_MDIO_WRITE_IN_LEN 20
790#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
791/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
792/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
793#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
794#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
795/* MC_CMD_MDIO_CLAUSE22 0x20 */
796#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
797#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
454 798
455/* MC_CMD_DBI_WRITE: (debug) 799/* MC_CMD_MDIO_WRITE_OUT msgresponse */
456 * Write DBI register(s) 800#define MC_CMD_MDIO_WRITE_OUT_LEN 4
457 * 801#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
458 * Host: address, byte-enables (and VF selection, and cs2 flag), 802/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
459 * value [,address ...] 803
460 * MC: nothing 804
805/***********************************/
806/* MC_CMD_DBI_WRITE
807 * Write DBI register(s).
461 */ 808 */
462#define MC_CMD_DBI_WRITE 0x12 809#define MC_CMD_DBI_WRITE 0x12
463#define MC_CMD_DBI_WRITE_IN_LEN(_numwords) \ 810
464 (12 * (_numwords)) 811/* MC_CMD_DBI_WRITE_IN msgrequest */
465#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(_word) \ 812#define MC_CMD_DBI_WRITE_IN_LENMIN 12
466 (((_word) * 12) + 0) 813#define MC_CMD_DBI_WRITE_IN_LENMAX 252
467#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(_word) \ 814#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
468 (((_word) * 12) + 4) 815#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
469#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(_word) \ 816#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
470 (((_word) * 12) + 8) 817#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
471#define MC_CMD_DBI_WRITE_OUT_LEN 0 818#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
472 819
473/* MC_CMD_DBI_READ: (debug) 820/* MC_CMD_DBI_WRITE_OUT msgresponse */
474 * Read DBI register(s) 821#define MC_CMD_DBI_WRITE_OUT_LEN 0
475 * 822
476 * Host: address, [,address ...] 823/* MC_CMD_DBIWROP_TYPEDEF structuredef */
477 * MC: value [,value ...] 824#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
478 * (note: this does not support reading from VFs, but is retained for backwards 825#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
479 * compatibility; see MC_CMD_DBI_READX below) 826#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
480 */ 827#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
481#define MC_CMD_DBI_READ 0x13 828#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST 4
482#define MC_CMD_DBI_READ_IN_LEN(_numwords) \ 829#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_LBN 32
483 (4 * (_numwords)) 830#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_WIDTH 32
484#define MC_CMD_DBI_READ_OUT_LEN(_numwords) \ 831#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
485 (4 * (_numwords)) 832#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
486 833#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
487/* MC_CMD_PORT_READ32: (debug) 834
835
836/***********************************/
837/* MC_CMD_PORT_READ32
488 * Read a 32-bit register from the indirect port register map. 838 * Read a 32-bit register from the indirect port register map.
489 *
490 * The port to access is implied by the Shared memory channel used.
491 */ 839 */
492#define MC_CMD_PORT_READ32 0x14 840#define MC_CMD_PORT_READ32 0x14
493#define MC_CMD_PORT_READ32_IN_LEN 4
494#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
495#define MC_CMD_PORT_READ32_OUT_LEN 8
496#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
497#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
498 841
499/* MC_CMD_PORT_WRITE32: (debug) 842/* MC_CMD_PORT_READ32_IN msgrequest */
843#define MC_CMD_PORT_READ32_IN_LEN 4
844#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
845
846/* MC_CMD_PORT_READ32_OUT msgresponse */
847#define MC_CMD_PORT_READ32_OUT_LEN 8
848#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
849#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
850
851
852/***********************************/
853/* MC_CMD_PORT_WRITE32
500 * Write a 32-bit register to the indirect port register map. 854 * Write a 32-bit register to the indirect port register map.
501 *
502 * The port to access is implied by the Shared memory channel used.
503 */ 855 */
504#define MC_CMD_PORT_WRITE32 0x15 856#define MC_CMD_PORT_WRITE32 0x15
505#define MC_CMD_PORT_WRITE32_IN_LEN 8 857
506#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0 858/* MC_CMD_PORT_WRITE32_IN msgrequest */
507#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4 859#define MC_CMD_PORT_WRITE32_IN_LEN 8
508#define MC_CMD_PORT_WRITE32_OUT_LEN 4 860#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
509#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0 861#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
510 862
511/* MC_CMD_PORT_READ128: (debug) 863/* MC_CMD_PORT_WRITE32_OUT msgresponse */
512 * Read a 128-bit register from indirect port register map 864#define MC_CMD_PORT_WRITE32_OUT_LEN 4
513 * 865#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
514 * The port to access is implied by the Shared memory channel used. 866
867
868/***********************************/
869/* MC_CMD_PORT_READ128
870 * Read a 128-bit register from the indirect port register map.
515 */ 871 */
516#define MC_CMD_PORT_READ128 0x16 872#define MC_CMD_PORT_READ128 0x16
517#define MC_CMD_PORT_READ128_IN_LEN 4 873
518#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0 874/* MC_CMD_PORT_READ128_IN msgrequest */
519#define MC_CMD_PORT_READ128_OUT_LEN 20 875#define MC_CMD_PORT_READ128_IN_LEN 4
520#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0 876#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
521#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16 877
522 878/* MC_CMD_PORT_READ128_OUT msgresponse */
523/* MC_CMD_PORT_WRITE128: (debug) 879#define MC_CMD_PORT_READ128_OUT_LEN 20
524 * Write a 128-bit register to indirect port register map. 880#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
525 * 881#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
526 * The port to access is implied by the Shared memory channel used. 882#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
883
884
885/***********************************/
886/* MC_CMD_PORT_WRITE128
887 * Write a 128-bit register to the indirect port register map.
527 */ 888 */
528#define MC_CMD_PORT_WRITE128 0x17 889#define MC_CMD_PORT_WRITE128 0x17
529#define MC_CMD_PORT_WRITE128_IN_LEN 20 890
530#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0 891/* MC_CMD_PORT_WRITE128_IN msgrequest */
531#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4 892#define MC_CMD_PORT_WRITE128_IN_LEN 20
532#define MC_CMD_PORT_WRITE128_OUT_LEN 4 893#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
533#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0 894#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
534 895#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
535/* MC_CMD_GET_BOARD_CFG: 896
536 * Returns the MC firmware configuration structure 897/* MC_CMD_PORT_WRITE128_OUT msgresponse */
537 * 898#define MC_CMD_PORT_WRITE128_OUT_LEN 4
538 * The FW_SUBTYPE_LIST contains a 16-bit value for each of the 12 types of 899#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
539 * NVRAM area. The values are defined in the firmware/mc/platform/<xxx>.c file 900
540 * for a specific board type, but otherwise have no meaning to the MC; they 901
541 * are used by the driver to manage selection of appropriate firmware updates. 902/***********************************/
903/* MC_CMD_GET_BOARD_CFG
904 * Returns the MC firmware configuration structure.
542 */ 905 */
543#define MC_CMD_GET_BOARD_CFG 0x18 906#define MC_CMD_GET_BOARD_CFG 0x18
544#define MC_CMD_GET_BOARD_CFG_IN_LEN 0 907
545#define MC_CMD_GET_BOARD_CFG_OUT_LEN 96 908/* MC_CMD_GET_BOARD_CFG_IN msgrequest */
546#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0 909#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
547#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4 910
548#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32 911/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */
549#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36 912#define MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96
550#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40 913#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
551#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44 914#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
552#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6 915#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
553#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50 916#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
554#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6 917#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
555#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56 918#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
556#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60 919#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0x0 /* enum */
557#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64 920#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 0x1 /* enum */
558#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68 921#define MC_CMD_CAPABILITIES_TURBO_LBN 0x1 /* enum */
559#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72 922#define MC_CMD_CAPABILITIES_TURBO_WIDTH 0x1 /* enum */
560#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 24 923#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 0x2 /* enum */
561 924#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 0x1 /* enum */
562/* MC_CMD_DBI_READX: (debug) 925#define MC_CMD_CAPABILITIES_PTP_LBN 0x3 /* enum */
563 * Read DBI register(s) -- extended functionality 926#define MC_CMD_CAPABILITIES_PTP_WIDTH 0x1 /* enum */
564 * 927#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
565 * Host: vf selection, address, [,vf selection ...] 928/* Enum values, see field(s): */
566 * MC: value [,value ...] 929/* CAPABILITIES_PORT0 */
930#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
931#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
932#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
933#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
934#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
935#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
936#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
937#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
938#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
939#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
940#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
941#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32
942
943
944/***********************************/
945/* MC_CMD_DBI_READX
946 * Read DBI register(s).
567 */ 947 */
568#define MC_CMD_DBI_READX 0x19 948#define MC_CMD_DBI_READX 0x19
569#define MC_CMD_DBI_READX_IN_LEN(_numwords) \
570 (8*(_numwords))
571#define MC_CMD_DBI_READX_OUT_LEN(_numwords) \
572 (4*(_numwords))
573 949
574/* MC_CMD_SET_RAND_SEED: 950/* MC_CMD_DBI_READX_IN msgrequest */
575 * Set the 16byte seed for the MC pseudo-random generator 951#define MC_CMD_DBI_READX_IN_LENMIN 8
952#define MC_CMD_DBI_READX_IN_LENMAX 248
953#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
954#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
955#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
956#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
957#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
958#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
959#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
960
961/* MC_CMD_DBI_READX_OUT msgresponse */
962#define MC_CMD_DBI_READX_OUT_LENMIN 4
963#define MC_CMD_DBI_READX_OUT_LENMAX 252
964#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
965#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
966#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
967#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
968#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
969
970
971/***********************************/
972/* MC_CMD_SET_RAND_SEED
973 * Set the 16byte seed for the MC pseudo-random generator.
576 */ 974 */
577#define MC_CMD_SET_RAND_SEED 0x1a 975#define MC_CMD_SET_RAND_SEED 0x1a
578#define MC_CMD_SET_RAND_SEED_IN_LEN 16
579#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
580#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
581 976
582/* MC_CMD_LTSSM_HIST: (debug) 977/* MC_CMD_SET_RAND_SEED_IN msgrequest */
583 * Retrieve the history of the LTSSM, if the build supports it. 978#define MC_CMD_SET_RAND_SEED_IN_LEN 16
584 * 979#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
585 * Host: nothing 980#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
586 * MC: variable number of LTSSM values, as bytes 981
587 * The history is read-to-clear. 982/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
983#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
984
985
986/***********************************/
987/* MC_CMD_LTSSM_HIST
988 * Retrieve the history of the PCIE LTSSM.
588 */ 989 */
589#define MC_CMD_LTSSM_HIST 0x1b 990#define MC_CMD_LTSSM_HIST 0x1b
590 991
591/* MC_CMD_DRV_ATTACH: 992/* MC_CMD_LTSSM_HIST_IN msgrequest */
592 * Inform MCPU that this port is managed on the host (i.e. driver active) 993#define MC_CMD_LTSSM_HIST_IN_LEN 0
994
995/* MC_CMD_LTSSM_HIST_OUT msgresponse */
996#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
997#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
998#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
999#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
1000#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
1001#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
1002#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
1003
1004
1005/***********************************/
1006/* MC_CMD_DRV_ATTACH
1007 * Inform MCPU that this port is managed on the host.
593 */ 1008 */
594#define MC_CMD_DRV_ATTACH 0x1c 1009#define MC_CMD_DRV_ATTACH 0x1c
595#define MC_CMD_DRV_ATTACH_IN_LEN 8
596#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
597#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
598#define MC_CMD_DRV_ATTACH_OUT_LEN 4
599#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
600 1010
601/* MC_CMD_NCSI_PROD: (debug) 1011/* MC_CMD_DRV_ATTACH_IN msgrequest */
602 * Trigger an NC-SI event (and possibly an AEN in response) 1012#define MC_CMD_DRV_ATTACH_IN_LEN 8
1013#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
1014#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
1015
1016/* MC_CMD_DRV_ATTACH_OUT msgresponse */
1017#define MC_CMD_DRV_ATTACH_OUT_LEN 4
1018#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
1019
1020
1021/***********************************/
1022/* MC_CMD_NCSI_PROD
1023 * Trigger an NC-SI event.
603 */ 1024 */
604#define MC_CMD_NCSI_PROD 0x1d 1025#define MC_CMD_NCSI_PROD 0x1d
605#define MC_CMD_NCSI_PROD_IN_LEN 4 1026
606#define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0 1027/* MC_CMD_NCSI_PROD_IN msgrequest */
607#define MC_CMD_NCSI_PROD_LINKCHANGE_LBN 0 1028#define MC_CMD_NCSI_PROD_IN_LEN 4
608#define MC_CMD_NCSI_PROD_LINKCHANGE_WIDTH 1 1029#define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0
609#define MC_CMD_NCSI_PROD_RESET_LBN 1 1030#define MC_CMD_NCSI_PROD_LINKCHANGE 0x0 /* enum */
610#define MC_CMD_NCSI_PROD_RESET_WIDTH 1 1031#define MC_CMD_NCSI_PROD_RESET 0x1 /* enum */
611#define MC_CMD_NCSI_PROD_DRVATTACH_LBN 2 1032#define MC_CMD_NCSI_PROD_DRVATTACH 0x2 /* enum */
612#define MC_CMD_NCSI_PROD_DRVATTACH_WIDTH 1 1033#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_LBN 0
613#define MC_CMD_NCSI_PROD_OUT_LEN 0 1034#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_WIDTH 1
614 1035#define MC_CMD_NCSI_PROD_IN_RESET_LBN 1
615/* Enumeration */ 1036#define MC_CMD_NCSI_PROD_IN_RESET_WIDTH 1
616#define MC_CMD_NCSI_PROD_LINKCHANGE 0 1037#define MC_CMD_NCSI_PROD_IN_DRVATTACH_LBN 2
617#define MC_CMD_NCSI_PROD_RESET 1 1038#define MC_CMD_NCSI_PROD_IN_DRVATTACH_WIDTH 1
618#define MC_CMD_NCSI_PROD_DRVATTACH 2 1039
619 1040/* MC_CMD_NCSI_PROD_OUT msgresponse */
620/* MC_CMD_DEVEL: (debug) 1041#define MC_CMD_NCSI_PROD_OUT_LEN 0
621 * Reserved for development 1042
622 */ 1043
623#define MC_CMD_DEVEL 0x1e 1044/***********************************/
624 1045/* MC_CMD_SHMUART
625/* MC_CMD_SHMUART: (debug)
626 * Route UART output to circular buffer in shared memory instead. 1046 * Route UART output to circular buffer in shared memory instead.
627 */ 1047 */
628#define MC_CMD_SHMUART 0x1f 1048#define MC_CMD_SHMUART 0x1f
629#define MC_CMD_SHMUART_IN_FLAG_OFST 0
630#define MC_CMD_SHMUART_IN_LEN 4
631#define MC_CMD_SHMUART_OUT_LEN 0
632 1049
633/* MC_CMD_PORT_RESET: 1050/* MC_CMD_SHMUART_IN msgrequest */
634 * Generic per-port reset. There is no equivalent for per-board reset. 1051#define MC_CMD_SHMUART_IN_LEN 4
635 * 1052#define MC_CMD_SHMUART_IN_FLAG_OFST 0
636 * Locks required: None 1053
637 * Return code: 0, ETIME 1054/* MC_CMD_SHMUART_OUT msgresponse */
638 */ 1055#define MC_CMD_SHMUART_OUT_LEN 0
639#define MC_CMD_PORT_RESET 0x20 1056
640#define MC_CMD_PORT_RESET_IN_LEN 0 1057
641#define MC_CMD_PORT_RESET_OUT_LEN 0 1058/***********************************/
642 1059/* MC_CMD_ENTITY_RESET
643/* MC_CMD_RESOURCE_LOCK: 1060 * Generic per-port reset.
644 * Generic resource lock/unlock interface. 1061 */
645 * 1062#define MC_CMD_ENTITY_RESET 0x20
646 * Locks required: None 1063
647 * Return code: 0, 1064/* MC_CMD_ENTITY_RESET_IN msgrequest */
648 * EBUSY (if trylock is contended by other port), 1065#define MC_CMD_ENTITY_RESET_IN_LEN 4
649 * EDEADLK (if trylock is already acquired by this port) 1066#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
650 * EINVAL (if unlock doesn't own the lock) 1067#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
651 */ 1068#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
652#define MC_CMD_RESOURCE_LOCK 0x21 1069
653#define MC_CMD_RESOURCE_LOCK_IN_LEN 8 1070/* MC_CMD_ENTITY_RESET_OUT msgresponse */
654#define MC_CMD_RESOURCE_LOCK_IN_ACTION_OFST 0 1071#define MC_CMD_ENTITY_RESET_OUT_LEN 0
655#define MC_CMD_RESOURCE_LOCK_ACTION_TRYLOCK 1 1072
656#define MC_CMD_RESOURCE_LOCK_ACTION_UNLOCK 0 1073
657#define MC_CMD_RESOURCE_LOCK_IN_RESOURCE_OFST 4 1074/***********************************/
658#define MC_CMD_RESOURCE_LOCK_I2C 2 1075/* MC_CMD_PCIE_CREDITS
659#define MC_CMD_RESOURCE_LOCK_PHY 3 1076 * Read instantaneous and minimum flow control thresholds.
660#define MC_CMD_RESOURCE_LOCK_OUT_LEN 0 1077 */
661 1078#define MC_CMD_PCIE_CREDITS 0x21
662/* MC_CMD_SPI_COMMAND: (variadic in, variadic out) 1079
663 * Read/Write to/from the SPI device. 1080/* MC_CMD_PCIE_CREDITS_IN msgrequest */
664 * 1081#define MC_CMD_PCIE_CREDITS_IN_LEN 8
665 * Locks required: SPI_LOCK 1082#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
666 * Return code: 0, ETIME, EINVAL, EACCES (if SPI_LOCK is not held) 1083#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
667 */ 1084
668#define MC_CMD_SPI_COMMAND 0x22 1085/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
669#define MC_CMD_SPI_COMMAND_IN_LEN(_write_bytes) (12 + (_write_bytes)) 1086#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
670#define MC_CMD_SPI_COMMAND_IN_ARGS_OFST 0 1087#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
671#define MC_CMD_SPI_COMMAND_IN_ARGS_ADDRESS_OFST 0 1088#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
672#define MC_CMD_SPI_COMMAND_IN_ARGS_READ_BYTES_OFST 4 1089#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
673#define MC_CMD_SPI_COMMAND_IN_ARGS_CHIP_SELECT_OFST 8 1090#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
674/* Data to write here */ 1091#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
675#define MC_CMD_SPI_COMMAND_IN_WRITE_BUFFER_OFST 12 1092#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
676#define MC_CMD_SPI_COMMAND_OUT_LEN(_read_bytes) (_read_bytes) 1093#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
677/* Data read here */ 1094#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
678#define MC_CMD_SPI_COMMAND_OUT_READ_BUFFER_OFST 0 1095#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
679 1096#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
680/* MC_CMD_I2C_READ_WRITE: (variadic in, variadic out) 1097#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
681 * Read/Write to/from the I2C bus. 1098#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
682 * 1099#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
683 * Locks required: I2C_LOCK 1100#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
684 * Return code: 0, ETIME, EINVAL, EACCES (if I2C_LOCK is not held) 1101#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
685 */ 1102#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
686#define MC_CMD_I2C_RW 0x23 1103
687#define MC_CMD_I2C_RW_IN_LEN(_write_bytes) (8 + (_write_bytes)) 1104
688#define MC_CMD_I2C_RW_IN_ARGS_OFST 0 1105/***********************************/
689#define MC_CMD_I2C_RW_IN_ARGS_ADDR_OFST 0 1106/* MC_CMD_RXD_MONITOR
690#define MC_CMD_I2C_RW_IN_ARGS_READ_BYTES_OFST 4 1107 * Get histogram of RX queue fill level.
691/* Data to write here */ 1108 */
692#define MC_CMD_I2C_RW_IN_WRITE_BUFFER_OFSET 8 1109#define MC_CMD_RXD_MONITOR 0x22
693#define MC_CMD_I2C_RW_OUT_LEN(_read_bytes) (_read_bytes) 1110
694/* Data read here */ 1111/* MC_CMD_RXD_MONITOR_IN msgrequest */
695#define MC_CMD_I2C_RW_OUT_READ_BUFFER_OFST 0 1112#define MC_CMD_RXD_MONITOR_IN_LEN 12
696 1113#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
697/* Generic phy capability bitmask */ 1114#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
698#define MC_CMD_PHY_CAP_10HDX_LBN 1 1115#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
699#define MC_CMD_PHY_CAP_10HDX_WIDTH 1 1116
700#define MC_CMD_PHY_CAP_10FDX_LBN 2 1117/* MC_CMD_RXD_MONITOR_OUT msgresponse */
701#define MC_CMD_PHY_CAP_10FDX_WIDTH 1 1118#define MC_CMD_RXD_MONITOR_OUT_LEN 80
702#define MC_CMD_PHY_CAP_100HDX_LBN 3 1119#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
703#define MC_CMD_PHY_CAP_100HDX_WIDTH 1 1120#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
704#define MC_CMD_PHY_CAP_100FDX_LBN 4 1121#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
705#define MC_CMD_PHY_CAP_100FDX_WIDTH 1 1122#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
706#define MC_CMD_PHY_CAP_1000HDX_LBN 5 1123#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
707#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1 1124#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
708#define MC_CMD_PHY_CAP_1000FDX_LBN 6 1125#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
709#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1 1126#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
710#define MC_CMD_PHY_CAP_10000FDX_LBN 7 1127#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
711#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1 1128#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
712#define MC_CMD_PHY_CAP_PAUSE_LBN 8 1129#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
713#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1 1130#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
714#define MC_CMD_PHY_CAP_ASYM_LBN 9 1131#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
715#define MC_CMD_PHY_CAP_ASYM_WIDTH 1 1132#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
716#define MC_CMD_PHY_CAP_AN_LBN 10 1133#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
717#define MC_CMD_PHY_CAP_AN_WIDTH 1 1134#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
718 1135#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
719/* Generic loopback enumeration */ 1136#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
720#define MC_CMD_LOOPBACK_NONE 0 1137#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
721#define MC_CMD_LOOPBACK_DATA 1 1138#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
722#define MC_CMD_LOOPBACK_GMAC 2 1139
723#define MC_CMD_LOOPBACK_XGMII 3 1140
724#define MC_CMD_LOOPBACK_XGXS 4 1141/***********************************/
725#define MC_CMD_LOOPBACK_XAUI 5 1142/* MC_CMD_PUTS
726#define MC_CMD_LOOPBACK_GMII 6 1143 * puts(3) implementation over MCDI
727#define MC_CMD_LOOPBACK_SGMII 7 1144 */
728#define MC_CMD_LOOPBACK_XGBR 8 1145#define MC_CMD_PUTS 0x23
729#define MC_CMD_LOOPBACK_XFI 9 1146
730#define MC_CMD_LOOPBACK_XAUI_FAR 10 1147/* MC_CMD_PUTS_IN msgrequest */
731#define MC_CMD_LOOPBACK_GMII_FAR 11 1148#define MC_CMD_PUTS_IN_LENMIN 13
732#define MC_CMD_LOOPBACK_SGMII_FAR 12 1149#define MC_CMD_PUTS_IN_LENMAX 255
733#define MC_CMD_LOOPBACK_XFI_FAR 13 1150#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
734#define MC_CMD_LOOPBACK_GPHY 14 1151#define MC_CMD_PUTS_IN_DEST_OFST 0
735#define MC_CMD_LOOPBACK_PHYXS 15 1152#define MC_CMD_PUTS_IN_UART_LBN 0
736#define MC_CMD_LOOPBACK_PCS 16 1153#define MC_CMD_PUTS_IN_UART_WIDTH 1
737#define MC_CMD_LOOPBACK_PMAPMD 17 1154#define MC_CMD_PUTS_IN_PORT_LBN 1
738#define MC_CMD_LOOPBACK_XPORT 18 1155#define MC_CMD_PUTS_IN_PORT_WIDTH 1
739#define MC_CMD_LOOPBACK_XGMII_WS 19 1156#define MC_CMD_PUTS_IN_DHOST_OFST 4
740#define MC_CMD_LOOPBACK_XAUI_WS 20 1157#define MC_CMD_PUTS_IN_DHOST_LEN 6
741#define MC_CMD_LOOPBACK_XAUI_WS_FAR 21 1158#define MC_CMD_PUTS_IN_STRING_OFST 12
742#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 22 1159#define MC_CMD_PUTS_IN_STRING_LEN 1
743#define MC_CMD_LOOPBACK_GMII_WS 23 1160#define MC_CMD_PUTS_IN_STRING_MINNUM 1
744#define MC_CMD_LOOPBACK_XFI_WS 24 1161#define MC_CMD_PUTS_IN_STRING_MAXNUM 243
745#define MC_CMD_LOOPBACK_XFI_WS_FAR 25 1162
746#define MC_CMD_LOOPBACK_PHYXS_WS 26 1163/* MC_CMD_PUTS_OUT msgresponse */
747 1164#define MC_CMD_PUTS_OUT_LEN 0
748/* Generic PHY statistics enumeration */ 1165
749#define MC_CMD_OUI 0 1166
750#define MC_CMD_PMA_PMD_LINK_UP 1 1167/***********************************/
751#define MC_CMD_PMA_PMD_RX_FAULT 2 1168/* MC_CMD_GET_PHY_CFG
752#define MC_CMD_PMA_PMD_TX_FAULT 3 1169 * Report PHY configuration.
753#define MC_CMD_PMA_PMD_SIGNAL 4
754#define MC_CMD_PMA_PMD_SNR_A 5
755#define MC_CMD_PMA_PMD_SNR_B 6
756#define MC_CMD_PMA_PMD_SNR_C 7
757#define MC_CMD_PMA_PMD_SNR_D 8
758#define MC_CMD_PCS_LINK_UP 9
759#define MC_CMD_PCS_RX_FAULT 10
760#define MC_CMD_PCS_TX_FAULT 11
761#define MC_CMD_PCS_BER 12
762#define MC_CMD_PCS_BLOCK_ERRORS 13
763#define MC_CMD_PHYXS_LINK_UP 14
764#define MC_CMD_PHYXS_RX_FAULT 15
765#define MC_CMD_PHYXS_TX_FAULT 16
766#define MC_CMD_PHYXS_ALIGN 17
767#define MC_CMD_PHYXS_SYNC 18
768#define MC_CMD_AN_LINK_UP 19
769#define MC_CMD_AN_COMPLETE 20
770#define MC_CMD_AN_10GBT_STATUS 21
771#define MC_CMD_CL22_LINK_UP 22
772#define MC_CMD_PHY_NSTATS 23
773
774/* MC_CMD_GET_PHY_CFG:
775 * Report PHY configuration. This guarantees to succeed even if the PHY is in
776 * a "zombie" state.
777 *
778 * Locks required: None
779 * Return code: 0
780 */ 1170 */
781#define MC_CMD_GET_PHY_CFG 0x24 1171#define MC_CMD_GET_PHY_CFG 0x24
782 1172
783#define MC_CMD_GET_PHY_CFG_IN_LEN 0 1173/* MC_CMD_GET_PHY_CFG_IN msgrequest */
784#define MC_CMD_GET_PHY_CFG_OUT_LEN 72 1174#define MC_CMD_GET_PHY_CFG_IN_LEN 0
785 1175
786#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 1176/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
787#define MC_CMD_GET_PHY_CFG_PRESENT_LBN 0 1177#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
788#define MC_CMD_GET_PHY_CFG_PRESENT_WIDTH 1 1178#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
789#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN 1 1179#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
790#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_WIDTH 1 1180#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
791#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN 2 1181#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
792#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_WIDTH 1 1182#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1
793#define MC_CMD_GET_PHY_CFG_LOWPOWER_LBN 3 1183#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2
794#define MC_CMD_GET_PHY_CFG_LOWPOWER_WIDTH 1 1184#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1
795#define MC_CMD_GET_PHY_CFG_POWEROFF_LBN 4 1185#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3
796#define MC_CMD_GET_PHY_CFG_POWEROFF_WIDTH 1 1186#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1
797#define MC_CMD_GET_PHY_CFG_TXDIS_LBN 5 1187#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4
798#define MC_CMD_GET_PHY_CFG_TXDIS_WIDTH 1 1188#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1
799#define MC_CMD_GET_PHY_CFG_BIST_LBN 6 1189#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5
800#define MC_CMD_GET_PHY_CFG_BIST_WIDTH 1 1190#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
801#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 1191#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
802/* Bitmask of supported capabilities */ 1192#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
803#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 1193#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
804#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12 1194#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
805#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16 1195#define MC_CMD_PHY_CAP_10HDX_LBN 1
806/* PHY statistics bitmap */ 1196#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
807#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20 1197#define MC_CMD_PHY_CAP_10FDX_LBN 2
808/* PHY type/name string */ 1198#define MC_CMD_PHY_CAP_10FDX_WIDTH 1
809#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24 1199#define MC_CMD_PHY_CAP_100HDX_LBN 3
810#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20 1200#define MC_CMD_PHY_CAP_100HDX_WIDTH 1
811#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44 1201#define MC_CMD_PHY_CAP_100FDX_LBN 4
812#define MC_CMD_MEDIA_XAUI 1 1202#define MC_CMD_PHY_CAP_100FDX_WIDTH 1
813#define MC_CMD_MEDIA_CX4 2 1203#define MC_CMD_PHY_CAP_1000HDX_LBN 5
814#define MC_CMD_MEDIA_KX4 3 1204#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1
815#define MC_CMD_MEDIA_XFP 4 1205#define MC_CMD_PHY_CAP_1000FDX_LBN 6
816#define MC_CMD_MEDIA_SFP_PLUS 5 1206#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1
817#define MC_CMD_MEDIA_BASE_T 6 1207#define MC_CMD_PHY_CAP_10000FDX_LBN 7
818/* MDIO "MMDS" supported */ 1208#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1
819#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 1209#define MC_CMD_PHY_CAP_PAUSE_LBN 8
820/* Native clause 22 */ 1210#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1
821#define MC_CMD_MMD_CLAUSE22 0 1211#define MC_CMD_PHY_CAP_ASYM_LBN 9
822#define MC_CMD_MMD_CLAUSE45_PMAPMD 1 1212#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
823#define MC_CMD_MMD_CLAUSE45_WIS 2 1213#define MC_CMD_PHY_CAP_AN_LBN 10
824#define MC_CMD_MMD_CLAUSE45_PCS 3 1214#define MC_CMD_PHY_CAP_AN_WIDTH 1
825#define MC_CMD_MMD_CLAUSE45_PHYXS 4 1215#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
826#define MC_CMD_MMD_CLAUSE45_DTEXS 5 1216#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
827#define MC_CMD_MMD_CLAUSE45_TC 6 1217#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
828#define MC_CMD_MMD_CLAUSE45_AN 7 1218#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
829/* Clause22 proxied over clause45 by PHY */ 1219#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
830#define MC_CMD_MMD_CLAUSE45_C22EXT 29 1220#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
831#define MC_CMD_MMD_CLAUSE45_VEND1 30 1221#define MC_CMD_MEDIA_XAUI 0x1 /* enum */
832#define MC_CMD_MMD_CLAUSE45_VEND2 31 1222#define MC_CMD_MEDIA_CX4 0x2 /* enum */
833/* PHY stepping version */ 1223#define MC_CMD_MEDIA_KX4 0x3 /* enum */
834#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52 1224#define MC_CMD_MEDIA_XFP 0x4 /* enum */
835#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20 1225#define MC_CMD_MEDIA_SFP_PLUS 0x5 /* enum */
836 1226#define MC_CMD_MEDIA_BASE_T 0x6 /* enum */
837/* MC_CMD_START_BIST: 1227#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
1228#define MC_CMD_MMD_CLAUSE22 0x0 /* enum */
1229#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
1230#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
1231#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
1232#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */
1233#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
1234#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
1235#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
1236#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d /* enum */
1237#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
1238#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
1239#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
1240#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
1241
1242
1243/***********************************/
1244/* MC_CMD_START_BIST
838 * Start a BIST test on the PHY. 1245 * Start a BIST test on the PHY.
839 *
840 * Locks required: PHY_LOCK if doing a PHY BIST
841 * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
842 */ 1246 */
843#define MC_CMD_START_BIST 0x25 1247#define MC_CMD_START_BIST 0x25
844#define MC_CMD_START_BIST_IN_LEN 4 1248
845#define MC_CMD_START_BIST_IN_TYPE_OFST 0 1249/* MC_CMD_START_BIST_IN msgrequest */
846#define MC_CMD_START_BIST_OUT_LEN 0 1250#define MC_CMD_START_BIST_IN_LEN 4
847 1251#define MC_CMD_START_BIST_IN_TYPE_OFST 0
848/* Run the PHY's short cable BIST */ 1252#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 /* enum */
849#define MC_CMD_PHY_BIST_CABLE_SHORT 1 1253#define MC_CMD_PHY_BIST_CABLE_LONG 0x2 /* enum */
850/* Run the PHY's long cable BIST */ 1254#define MC_CMD_BPX_SERDES_BIST 0x3 /* enum */
851#define MC_CMD_PHY_BIST_CABLE_LONG 2 1255#define MC_CMD_MC_LOOPBACK_BIST 0x4 /* enum */
852/* Run BIST on the currently selected BPX Serdes (XAUI or XFI) */ 1256#define MC_CMD_PHY_BIST 0x5 /* enum */
853#define MC_CMD_BPX_SERDES_BIST 3 1257
854/* Run the MC loopback tests */ 1258/* MC_CMD_START_BIST_OUT msgresponse */
855#define MC_CMD_MC_LOOPBACK_BIST 4 1259#define MC_CMD_START_BIST_OUT_LEN 0
856/* Run the PHY's standard BIST */ 1260
857#define MC_CMD_PHY_BIST 5 1261
858 1262/***********************************/
859/* MC_CMD_POLL_PHY_BIST: (variadic output) 1263/* MC_CMD_POLL_BIST
860 * Poll for BIST completion 1264 * Poll for BIST completion.
861 *
862 * Returns a single status code, and optionally some PHY specific
863 * bist output. The driver should only consume the BIST output
864 * after validating OUTLEN and PHY_CFG.PHY_TYPE.
865 *
866 * If a driver can't successfully parse the BIST output, it should
867 * still respect the pass/Fail in OUT.RESULT
868 *
869 * Locks required: PHY_LOCK if doing a PHY BIST
870 * Return code: 0, EACCES (if PHY_LOCK is not held)
871 */ 1265 */
872#define MC_CMD_POLL_BIST 0x26 1266#define MC_CMD_POLL_BIST 0x26
873#define MC_CMD_POLL_BIST_IN_LEN 0 1267
874#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN 1268/* MC_CMD_POLL_BIST_IN msgrequest */
875#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36 1269#define MC_CMD_POLL_BIST_IN_LEN 0
876#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 1270
877#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 1271/* MC_CMD_POLL_BIST_OUT msgresponse */
878#define MC_CMD_POLL_BIST_RUNNING 1 1272#define MC_CMD_POLL_BIST_OUT_LEN 8
879#define MC_CMD_POLL_BIST_PASSED 2 1273#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
880#define MC_CMD_POLL_BIST_FAILED 3 1274#define MC_CMD_POLL_BIST_RUNNING 0x1 /* enum */
881#define MC_CMD_POLL_BIST_TIMEOUT 4 1275#define MC_CMD_POLL_BIST_PASSED 0x2 /* enum */
882/* Generic: */ 1276#define MC_CMD_POLL_BIST_FAILED 0x3 /* enum */
883#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 1277#define MC_CMD_POLL_BIST_TIMEOUT 0x4 /* enum */
884/* SFT9001-specific: */ 1278#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
885#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4 1279
886#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8 1280/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
887#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12 1281#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
888#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16 1282/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
889#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20 1283/* Enum values, see field(s): */
890#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24 1284/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
891#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28 1285#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
892#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32 1286#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
893#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1 1287#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
894#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2 1288#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
895#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3 1289#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
896#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 4 1290#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 /* enum */
897#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 9 1291#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2 /* enum */
898/* mrsfp "PHY" driver: */ 1292#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3 /* enum */
899#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4 1293#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4 /* enum */
900#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0 1294#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 /* enum */
901#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 1 1295#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
902#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 2 1296/* Enum values, see field(s): */
903#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 3 1297/* CABLE_STATUS_A */
904#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 4 1298#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
905#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 5 1299/* Enum values, see field(s): */
906#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 6 1300/* CABLE_STATUS_A */
907#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 7 1301#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
908#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 8 1302/* Enum values, see field(s): */
909 1303/* CABLE_STATUS_A */
910/* MC_CMD_PHY_SPI: (variadic in, variadic out) 1304
911 * Read/Write/Erase the PHY SPI device 1305/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
912 * 1306#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
913 * Locks required: PHY_LOCK 1307/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
914 * Return code: 0, ETIME, EINVAL, EACCES (if PHY_LOCK is not held) 1308/* Enum values, see field(s): */
915 */ 1309/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
916#define MC_CMD_PHY_SPI 0x27 1310#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
917#define MC_CMD_PHY_SPI_IN_LEN(_write_bytes) (12 + (_write_bytes)) 1311#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 /* enum */
918#define MC_CMD_PHY_SPI_IN_ARGS_OFST 0 1312#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1 /* enum */
919#define MC_CMD_PHY_SPI_IN_ARGS_ADDR_OFST 0 1313#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2 /* enum */
920#define MC_CMD_PHY_SPI_IN_ARGS_READ_BYTES_OFST 4 1314#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3 /* enum */
921#define MC_CMD_PHY_SPI_IN_ARGS_ERASE_ALL_OFST 8 1315#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4 /* enum */
922/* Data to write here */ 1316#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5 /* enum */
923#define MC_CMD_PHY_SPI_IN_WRITE_BUFFER_OFSET 12 1317#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6 /* enum */
924#define MC_CMD_PHY_SPI_OUT_LEN(_read_bytes) (_read_bytes) 1318#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7 /* enum */
925/* Data read here */ 1319#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8 /* enum */
926#define MC_CMD_PHY_SPI_OUT_READ_BUFFER_OFST 0 1320
927 1321
928 1322/***********************************/
929/* MC_CMD_GET_LOOPBACK_MODES: 1323/* MC_CMD_FLUSH_RX_QUEUES
930 * Returns a bitmask of loopback modes evailable at each speed. 1324 * Flush receive queue(s).
931 * 1325 */
932 * Locks required: None 1326#define MC_CMD_FLUSH_RX_QUEUES 0x27
933 * Return code: 0 1327
1328/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
1329#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
1330#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
1331#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
1332#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
1333#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
1334#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
1335#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
1336
1337/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
1338#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
1339
1340
1341/***********************************/
1342/* MC_CMD_GET_LOOPBACK_MODES
1343 * Get port's loopback modes.
934 */ 1344 */
935#define MC_CMD_GET_LOOPBACK_MODES 0x28 1345#define MC_CMD_GET_LOOPBACK_MODES 0x28
936#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0 1346
937#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32 1347/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
938#define MC_CMD_GET_LOOPBACK_MODES_100M_OFST 0 1348#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
939#define MC_CMD_GET_LOOPBACK_MODES_1G_OFST 8 1349
940#define MC_CMD_GET_LOOPBACK_MODES_10G_OFST 16 1350/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
941#define MC_CMD_GET_LOOPBACK_MODES_SUGGESTED_OFST 24 1351#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32
942 1352#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
943/* Flow control enumeration */ 1353#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
944#define MC_CMD_FCNTL_OFF 0 1354#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
945#define MC_CMD_FCNTL_RESPOND 1 1355#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
946#define MC_CMD_FCNTL_BIDIR 2 1356#define MC_CMD_LOOPBACK_NONE 0x0 /* enum */
947/* Auto - Use what the link has autonegotiated 1357#define MC_CMD_LOOPBACK_DATA 0x1 /* enum */
948 * - The driver should modify the advertised capabilities via SET_LINK.CAP 1358#define MC_CMD_LOOPBACK_GMAC 0x2 /* enum */
949 * to control the negotiated flow control mode. 1359#define MC_CMD_LOOPBACK_XGMII 0x3 /* enum */
950 * - Can only be set if the PHY supports PAUSE+ASYM capabilities 1360#define MC_CMD_LOOPBACK_XGXS 0x4 /* enum */
951 * - Never returned by GET_LINK as the value programmed into the MAC 1361#define MC_CMD_LOOPBACK_XAUI 0x5 /* enum */
952 */ 1362#define MC_CMD_LOOPBACK_GMII 0x6 /* enum */
953#define MC_CMD_FCNTL_AUTO 3 1363#define MC_CMD_LOOPBACK_SGMII 0x7 /* enum */
954 1364#define MC_CMD_LOOPBACK_XGBR 0x8 /* enum */
955/* Generic mac fault bitmask */ 1365#define MC_CMD_LOOPBACK_XFI 0x9 /* enum */
956#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 1366#define MC_CMD_LOOPBACK_XAUI_FAR 0xa /* enum */
957#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 1367#define MC_CMD_LOOPBACK_GMII_FAR 0xb /* enum */
958#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 1368#define MC_CMD_LOOPBACK_SGMII_FAR 0xc /* enum */
959#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 1369#define MC_CMD_LOOPBACK_XFI_FAR 0xd /* enum */
960#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 1370#define MC_CMD_LOOPBACK_GPHY 0xe /* enum */
961#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 1371#define MC_CMD_LOOPBACK_PHYXS 0xf /* enum */
962 1372#define MC_CMD_LOOPBACK_PCS 0x10 /* enum */
963/* MC_CMD_GET_LINK: 1373#define MC_CMD_LOOPBACK_PMAPMD 0x11 /* enum */
964 * Read the unified MAC/PHY link state 1374#define MC_CMD_LOOPBACK_XPORT 0x12 /* enum */
965 * 1375#define MC_CMD_LOOPBACK_XGMII_WS 0x13 /* enum */
966 * Locks required: None 1376#define MC_CMD_LOOPBACK_XAUI_WS 0x14 /* enum */
967 * Return code: 0, ETIME 1377#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 /* enum */
1378#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 /* enum */
1379#define MC_CMD_LOOPBACK_GMII_WS 0x17 /* enum */
1380#define MC_CMD_LOOPBACK_XFI_WS 0x18 /* enum */
1381#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 /* enum */
1382#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a /* enum */
1383#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
1384#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
1385#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
1386#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
1387/* Enum values, see field(s): */
1388/* 100M */
1389#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
1390#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
1391#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
1392#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
1393/* Enum values, see field(s): */
1394/* 100M */
1395#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
1396#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
1397#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
1398#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
1399/* Enum values, see field(s): */
1400/* 100M */
1401
1402
1403/***********************************/
1404/* MC_CMD_GET_LINK
1405 * Read the unified MAC/PHY link state.
968 */ 1406 */
969#define MC_CMD_GET_LINK 0x29 1407#define MC_CMD_GET_LINK 0x29
970#define MC_CMD_GET_LINK_IN_LEN 0 1408
971#define MC_CMD_GET_LINK_OUT_LEN 28 1409/* MC_CMD_GET_LINK_IN msgrequest */
972/* near-side and link-partner advertised capabilities */ 1410#define MC_CMD_GET_LINK_IN_LEN 0
973#define MC_CMD_GET_LINK_OUT_CAP_OFST 0 1411
974#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 1412/* MC_CMD_GET_LINK_OUT msgresponse */
975/* Autonegotiated speed in mbit/s. The link may still be down 1413#define MC_CMD_GET_LINK_OUT_LEN 28
976 * even if this reads non-zero */ 1414#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
977#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 1415#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
978#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 1416#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
979#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16 1417#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
980/* Whether we have overall link up */ 1418/* Enum values, see field(s): */
981#define MC_CMD_GET_LINK_LINK_UP_LBN 0 1419/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
982#define MC_CMD_GET_LINK_LINK_UP_WIDTH 1 1420#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
983#define MC_CMD_GET_LINK_FULL_DUPLEX_LBN 1 1421#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
984#define MC_CMD_GET_LINK_FULL_DUPLEX_WIDTH 1 1422#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
985/* Whether we have link at the layers provided by the BPX */ 1423#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
986#define MC_CMD_GET_LINK_BPX_LINK_LBN 2 1424#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1
987#define MC_CMD_GET_LINK_BPX_LINK_WIDTH 1 1425#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2
988/* Whether the PHY has external link */ 1426#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
989#define MC_CMD_GET_LINK_PHY_LINK_LBN 3 1427#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
990#define MC_CMD_GET_LINK_PHY_LINK_WIDTH 1 1428#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
991#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 1429#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
992#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 1430#define MC_CMD_FCNTL_OFF 0x0 /* enum */
993 1431#define MC_CMD_FCNTL_RESPOND 0x1 /* enum */
994/* MC_CMD_SET_LINK: 1432#define MC_CMD_FCNTL_BIDIR 0x2 /* enum */
995 * Write the unified MAC/PHY link configuration 1433#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
996 * 1434#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
997 * A loopback speed of "0" is supported, and means 1435#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
998 * (choose any available speed) 1436#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
999 * 1437#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
1000 * Locks required: None 1438#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
1001 * Return code: 0, EINVAL, ETIME 1439#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
1440#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3
1441#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1
1442
1443
1444/***********************************/
1445/* MC_CMD_SET_LINK
1446 * Write the unified MAC/PHY link configuration.
1002 */ 1447 */
1003#define MC_CMD_SET_LINK 0x2a 1448#define MC_CMD_SET_LINK 0x2a
1004#define MC_CMD_SET_LINK_IN_LEN 16 1449
1005#define MC_CMD_SET_LINK_IN_CAP_OFST 0 1450/* MC_CMD_SET_LINK_IN msgrequest */
1006#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4 1451#define MC_CMD_SET_LINK_IN_LEN 16
1007#define MC_CMD_SET_LINK_LOWPOWER_LBN 0 1452#define MC_CMD_SET_LINK_IN_CAP_OFST 0
1008#define MC_CMD_SET_LINK_LOWPOWER_WIDTH 1 1453#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
1009#define MC_CMD_SET_LINK_POWEROFF_LBN 1 1454#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
1010#define MC_CMD_SET_LINK_POWEROFF_WIDTH 1 1455#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
1011#define MC_CMD_SET_LINK_TXDIS_LBN 2 1456#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
1012#define MC_CMD_SET_LINK_TXDIS_WIDTH 1 1457#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
1013#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8 1458#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
1014#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12 1459#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
1015#define MC_CMD_SET_LINK_OUT_LEN 0 1460#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
1016 1461/* Enum values, see field(s): */
1017/* MC_CMD_SET_ID_LED: 1462/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
1018 * Set indentification LED state 1463#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
1019 * 1464
1020 * Locks required: None 1465/* MC_CMD_SET_LINK_OUT msgresponse */
1021 * Return code: 0, EINVAL 1466#define MC_CMD_SET_LINK_OUT_LEN 0
1467
1468
1469/***********************************/
1470/* MC_CMD_SET_ID_LED
1471 * Set indentification LED state.
1022 */ 1472 */
1023#define MC_CMD_SET_ID_LED 0x2b 1473#define MC_CMD_SET_ID_LED 0x2b
1024#define MC_CMD_SET_ID_LED_IN_LEN 4 1474
1025#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0 1475/* MC_CMD_SET_ID_LED_IN msgrequest */
1026#define MC_CMD_LED_OFF 0 1476#define MC_CMD_SET_ID_LED_IN_LEN 4
1027#define MC_CMD_LED_ON 1 1477#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
1028#define MC_CMD_LED_DEFAULT 2 1478#define MC_CMD_LED_OFF 0x0 /* enum */
1029#define MC_CMD_SET_ID_LED_OUT_LEN 0 1479#define MC_CMD_LED_ON 0x1 /* enum */
1030 1480#define MC_CMD_LED_DEFAULT 0x2 /* enum */
1031/* MC_CMD_SET_MAC: 1481
1032 * Set MAC configuration 1482/* MC_CMD_SET_ID_LED_OUT msgresponse */
1033 * 1483#define MC_CMD_SET_ID_LED_OUT_LEN 0
1034 * The MTU is the MTU programmed directly into the XMAC/GMAC 1484
1035 * (inclusive of EtherII, VLAN, bug16011 padding) 1485
1036 * 1486/***********************************/
1037 * Locks required: None 1487/* MC_CMD_SET_MAC
1038 * Return code: 0, EINVAL 1488 * Set MAC configuration.
1039 */ 1489 */
1040#define MC_CMD_SET_MAC 0x2c 1490#define MC_CMD_SET_MAC 0x2c
1041#define MC_CMD_SET_MAC_IN_LEN 24 1491
1042#define MC_CMD_SET_MAC_IN_MTU_OFST 0 1492/* MC_CMD_SET_MAC_IN msgrequest */
1043#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4 1493#define MC_CMD_SET_MAC_IN_LEN 24
1044#define MC_CMD_SET_MAC_IN_ADDR_OFST 8 1494#define MC_CMD_SET_MAC_IN_MTU_OFST 0
1045#define MC_CMD_SET_MAC_IN_REJECT_OFST 16 1495#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
1046#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0 1496#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
1047#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1 1497#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
1048#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1 1498#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
1049#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 1499#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
1050#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 1500#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
1051#define MC_CMD_SET_MAC_OUT_LEN 0 1501#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
1052 1502#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
1053/* MC_CMD_PHY_STATS: 1503#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
1054 * Get generic PHY statistics 1504#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
1055 * 1505#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
1056 * This call returns the statistics for a generic PHY in a sparse 1506/* MC_CMD_FCNTL_OFF 0x0 */
1057 * array (indexed by the enumerate). Each value is represented by 1507/* MC_CMD_FCNTL_RESPOND 0x1 */
1058 * a 32bit number. 1508/* MC_CMD_FCNTL_BIDIR 0x2 */
1059 * 1509#define MC_CMD_FCNTL_AUTO 0x3 /* enum */
1060 * If the DMA_ADDR is 0, then no DMA is performed, and the statistics 1510
1061 * may be read directly out of shared memory. If DMA_ADDR != 0, then 1511/* MC_CMD_SET_MAC_OUT msgresponse */
1062 * the statistics are dmad to that (page-aligned location) 1512#define MC_CMD_SET_MAC_OUT_LEN 0
1063 * 1513
1064 * Locks required: None 1514
1065 * Returns: 0, ETIME 1515/***********************************/
1066 * Response methods: shared memory, event 1516/* MC_CMD_PHY_STATS
1517 * Get generic PHY statistics.
1067 */ 1518 */
1068#define MC_CMD_PHY_STATS 0x2d 1519#define MC_CMD_PHY_STATS 0x2d
1069#define MC_CMD_PHY_STATS_IN_LEN 8
1070#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
1071#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
1072#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
1073#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (MC_CMD_PHY_NSTATS * 4)
1074
1075/* Unified MAC statistics enumeration */
1076#define MC_CMD_MAC_GENERATION_START 0
1077#define MC_CMD_MAC_TX_PKTS 1
1078#define MC_CMD_MAC_TX_PAUSE_PKTS 2
1079#define MC_CMD_MAC_TX_CONTROL_PKTS 3
1080#define MC_CMD_MAC_TX_UNICAST_PKTS 4
1081#define MC_CMD_MAC_TX_MULTICAST_PKTS 5
1082#define MC_CMD_MAC_TX_BROADCAST_PKTS 6
1083#define MC_CMD_MAC_TX_BYTES 7
1084#define MC_CMD_MAC_TX_BAD_BYTES 8
1085#define MC_CMD_MAC_TX_LT64_PKTS 9
1086#define MC_CMD_MAC_TX_64_PKTS 10
1087#define MC_CMD_MAC_TX_65_TO_127_PKTS 11
1088#define MC_CMD_MAC_TX_128_TO_255_PKTS 12
1089#define MC_CMD_MAC_TX_256_TO_511_PKTS 13
1090#define MC_CMD_MAC_TX_512_TO_1023_PKTS 14
1091#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 15
1092#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 16
1093#define MC_CMD_MAC_TX_GTJUMBO_PKTS 17
1094#define MC_CMD_MAC_TX_BAD_FCS_PKTS 18
1095#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 19
1096#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 20
1097#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 21
1098#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 22
1099#define MC_CMD_MAC_TX_DEFERRED_PKTS 23
1100#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 24
1101#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 25
1102#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 26
1103#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 27
1104#define MC_CMD_MAC_RX_PKTS 28
1105#define MC_CMD_MAC_RX_PAUSE_PKTS 29
1106#define MC_CMD_MAC_RX_GOOD_PKTS 30
1107#define MC_CMD_MAC_RX_CONTROL_PKTS 31
1108#define MC_CMD_MAC_RX_UNICAST_PKTS 32
1109#define MC_CMD_MAC_RX_MULTICAST_PKTS 33
1110#define MC_CMD_MAC_RX_BROADCAST_PKTS 34
1111#define MC_CMD_MAC_RX_BYTES 35
1112#define MC_CMD_MAC_RX_BAD_BYTES 36
1113#define MC_CMD_MAC_RX_64_PKTS 37
1114#define MC_CMD_MAC_RX_65_TO_127_PKTS 38
1115#define MC_CMD_MAC_RX_128_TO_255_PKTS 39
1116#define MC_CMD_MAC_RX_256_TO_511_PKTS 40
1117#define MC_CMD_MAC_RX_512_TO_1023_PKTS 41
1118#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 42
1119#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 43
1120#define MC_CMD_MAC_RX_GTJUMBO_PKTS 44
1121#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 45
1122#define MC_CMD_MAC_RX_BAD_FCS_PKTS 46
1123#define MC_CMD_MAC_RX_OVERFLOW_PKTS 47
1124#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 48
1125#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 49
1126#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 50
1127#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 51
1128#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 52
1129#define MC_CMD_MAC_RX_JABBER_PKTS 53
1130#define MC_CMD_MAC_RX_NODESC_DROPS 54
1131#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 55
1132#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 56
1133#define MC_CMD_MAC_RX_LANES01_DISP_ERR 57
1134#define MC_CMD_MAC_RX_LANES23_DISP_ERR 58
1135#define MC_CMD_MAC_RX_MATCH_FAULT 59
1136#define MC_CMD_GMAC_DMABUF_START 64
1137#define MC_CMD_GMAC_DMABUF_END 95
1138/* Insert new members here. */
1139#define MC_CMD_MAC_GENERATION_END 96
1140#define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1)
1141
1142/* MC_CMD_MAC_STATS:
1143 * Get unified GMAC/XMAC statistics
1144 *
1145 * This call returns unified statistics maintained by the MC as it
1146 * switches between the GMAC and XMAC. The MC will write out all
1147 * supported stats. The driver should zero initialise the buffer to
1148 * guarantee consistent results.
1149 *
1150 * Locks required: None
1151 * Returns: 0
1152 * Response methods: shared memory, event
1153 */
1154#define MC_CMD_MAC_STATS 0x2e
1155#define MC_CMD_MAC_STATS_IN_LEN 16
1156#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
1157#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
1158#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
1159#define MC_CMD_MAC_STATS_CMD_DMA_LBN 0
1160#define MC_CMD_MAC_STATS_CMD_DMA_WIDTH 1
1161#define MC_CMD_MAC_STATS_CMD_CLEAR_LBN 1
1162#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1
1163#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2
1164#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1
1165/* Remaining PERIOD* fields only relevant when PERIODIC_CHANGE is set */
1166#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3
1167#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1
1168#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4
1169#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1
1170#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_LBN 5
1171#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_WIDTH 1
1172#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16
1173#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16
1174#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
1175
1176#define MC_CMD_MAC_STATS_OUT_LEN 0
1177
1178/* Callisto flags */
1179#define MC_CMD_SFT9001_ROBUST_LBN 0
1180#define MC_CMD_SFT9001_ROBUST_WIDTH 1
1181#define MC_CMD_SFT9001_SHORT_REACH_LBN 1
1182#define MC_CMD_SFT9001_SHORT_REACH_WIDTH 1
1183
1184/* MC_CMD_SFT9001_GET:
1185 * Read current callisto specific setting
1186 *
1187 * Locks required: None
1188 * Returns: 0, ETIME
1189 */
1190#define MC_CMD_SFT9001_GET 0x30
1191#define MC_CMD_SFT9001_GET_IN_LEN 0
1192#define MC_CMD_SFT9001_GET_OUT_LEN 4
1193#define MC_CMD_SFT9001_GET_OUT_FLAGS_OFST 0
1194 1520
1195/* MC_CMD_SFT9001_SET: 1521/* MC_CMD_PHY_STATS_IN msgrequest */
1196 * Write current callisto specific setting 1522#define MC_CMD_PHY_STATS_IN_LEN 8
1197 * 1523#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
1198 * Locks required: None 1524#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
1199 * Returns: 0, ETIME, EINVAL 1525#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
1526#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
1527
1528/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */
1529#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
1530
1531/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */
1532#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3)
1533#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
1534#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
1535#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
1536#define MC_CMD_OUI 0x0 /* enum */
1537#define MC_CMD_PMA_PMD_LINK_UP 0x1 /* enum */
1538#define MC_CMD_PMA_PMD_RX_FAULT 0x2 /* enum */
1539#define MC_CMD_PMA_PMD_TX_FAULT 0x3 /* enum */
1540#define MC_CMD_PMA_PMD_SIGNAL 0x4 /* enum */
1541#define MC_CMD_PMA_PMD_SNR_A 0x5 /* enum */
1542#define MC_CMD_PMA_PMD_SNR_B 0x6 /* enum */
1543#define MC_CMD_PMA_PMD_SNR_C 0x7 /* enum */
1544#define MC_CMD_PMA_PMD_SNR_D 0x8 /* enum */
1545#define MC_CMD_PCS_LINK_UP 0x9 /* enum */
1546#define MC_CMD_PCS_RX_FAULT 0xa /* enum */
1547#define MC_CMD_PCS_TX_FAULT 0xb /* enum */
1548#define MC_CMD_PCS_BER 0xc /* enum */
1549#define MC_CMD_PCS_BLOCK_ERRORS 0xd /* enum */
1550#define MC_CMD_PHYXS_LINK_UP 0xe /* enum */
1551#define MC_CMD_PHYXS_RX_FAULT 0xf /* enum */
1552#define MC_CMD_PHYXS_TX_FAULT 0x10 /* enum */
1553#define MC_CMD_PHYXS_ALIGN 0x11 /* enum */
1554#define MC_CMD_PHYXS_SYNC 0x12 /* enum */
1555#define MC_CMD_AN_LINK_UP 0x13 /* enum */
1556#define MC_CMD_AN_COMPLETE 0x14 /* enum */
1557#define MC_CMD_AN_10GBT_STATUS 0x15 /* enum */
1558#define MC_CMD_CL22_LINK_UP 0x16 /* enum */
1559#define MC_CMD_PHY_NSTATS 0x17 /* enum */
1560
1561
1562/***********************************/
1563/* MC_CMD_MAC_STATS
1564 * Get generic MAC statistics.
1200 */ 1565 */
1201#define MC_CMD_SFT9001_SET 0x31 1566#define MC_CMD_MAC_STATS 0x2e
1202#define MC_CMD_SFT9001_SET_IN_LEN 4
1203#define MC_CMD_SFT9001_SET_IN_FLAGS_OFST 0
1204#define MC_CMD_SFT9001_SET_OUT_LEN 0
1205
1206 1567
1207/* MC_CMD_WOL_FILTER_SET: 1568/* MC_CMD_MAC_STATS_IN msgrequest */
1208 * Set a WoL filter 1569#define MC_CMD_MAC_STATS_IN_LEN 16
1209 * 1570#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
1210 * Locks required: None 1571#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
1211 * Returns: 0, EBUSY, EINVAL, ENOSYS 1572#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
1573#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
1574#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
1575#define MC_CMD_MAC_STATS_IN_DMA_LBN 0
1576#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
1577#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
1578#define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1
1579#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2
1580#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1
1581#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3
1582#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1
1583#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4
1584#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1
1585#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5
1586#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
1587#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
1588#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
1589#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
1590
1591/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
1592#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
1593
1594/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */
1595#define MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
1596#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0
1597#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8
1598#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
1599#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
1600#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
1601#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
1602#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
1603#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
1604#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
1605#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */
1606#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */
1607#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */
1608#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */
1609#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */
1610#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */
1611#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */
1612#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */
1613#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */
1614#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */
1615#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */
1616#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */
1617#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */
1618#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */
1619#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */
1620#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */
1621#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */
1622#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */
1623#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */
1624#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */
1625#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */
1626#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */
1627#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */
1628#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */
1629#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */
1630#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */
1631#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */
1632#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */
1633#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */
1634#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */
1635#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */
1636#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */
1637#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */
1638#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */
1639#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */
1640#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */
1641#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */
1642#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */
1643#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */
1644#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */
1645#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */
1646#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */
1647#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */
1648#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */
1649#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */
1650#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */
1651#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */
1652#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */
1653#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */
1654#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */
1655#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */
1656#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */
1657#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */
1658#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
1659#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
1660#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
1661#define MC_CMD_GMAC_DMABUF_START 0x40 /* enum */
1662#define MC_CMD_GMAC_DMABUF_END 0x5f /* enum */
1663#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
1664#define MC_CMD_MAC_NSTATS 0x61 /* enum */
1665
1666
1667/***********************************/
1668/* MC_CMD_SRIOV
1669 * to be documented
1670 */
1671#define MC_CMD_SRIOV 0x30
1672
1673/* MC_CMD_SRIOV_IN msgrequest */
1674#define MC_CMD_SRIOV_IN_LEN 12
1675#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
1676#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
1677#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
1678
1679/* MC_CMD_SRIOV_OUT msgresponse */
1680#define MC_CMD_SRIOV_OUT_LEN 8
1681#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
1682#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
1683
1684/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
1685#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
1686#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
1687#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
1688#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
1689#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
1690#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
1691#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
1692#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
1693#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
1694#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
1695#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
1696#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
1697#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
1698#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
1699#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
1700#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
1701#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
1702#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
1703#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
1704#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
1705#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
1706#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
1707#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
1708#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
1709#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
1710#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
1711
1712
1713/***********************************/
1714/* MC_CMD_MEMCPY
1715 * Perform memory copy operation.
1716 */
1717#define MC_CMD_MEMCPY 0x31
1718
1719/* MC_CMD_MEMCPY_IN msgrequest */
1720#define MC_CMD_MEMCPY_IN_LENMIN 32
1721#define MC_CMD_MEMCPY_IN_LENMAX 224
1722#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
1723#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
1724#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
1725#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
1726#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
1727
1728/* MC_CMD_MEMCPY_OUT msgresponse */
1729#define MC_CMD_MEMCPY_OUT_LEN 0
1730
1731
1732/***********************************/
1733/* MC_CMD_WOL_FILTER_SET
1734 * Set a WoL filter.
1212 */ 1735 */
1213#define MC_CMD_WOL_FILTER_SET 0x32 1736#define MC_CMD_WOL_FILTER_SET 0x32
1214#define MC_CMD_WOL_FILTER_SET_IN_LEN 192 /* 190 rounded up to a word */ 1737
1215#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 1738/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
1216#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 1739#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
1217 1740#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
1218/* There is a union at offset 8, following defines overlap due to 1741#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
1219 * this */ 1742#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
1220#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8 1743#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
1221 1744#define MC_CMD_WOL_TYPE_MAGIC 0x0 /* enum */
1222#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST \ 1745#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 /* enum */
1223 MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 1746#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 /* enum */
1224 1747#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 /* enum */
1225#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST \ 1748#define MC_CMD_WOL_TYPE_BITMAP 0x5 /* enum */
1226 MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 1749#define MC_CMD_WOL_TYPE_LINK 0x6 /* enum */
1227#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST \ 1750#define MC_CMD_WOL_TYPE_MAX 0x7 /* enum */
1228 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 4) 1751#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
1229#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST \ 1752#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
1230 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 8) 1753#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
1231#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST \ 1754
1232 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 10) 1755/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
1233 1756#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
1234#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST \ 1757/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
1235 MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 1758/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
1236#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST \ 1759#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
1237 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 16) 1760#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
1238#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST \ 1761#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
1239 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 32) 1762#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12
1240#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST \ 1763
1241 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 34) 1764/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
1242 1765#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
1243#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST \ 1766/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
1244 MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 1767/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
1245#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_OFST \ 1768#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
1246 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 48) 1769#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
1247#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST \ 1770#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
1248 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 176) 1771#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
1249#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST \ 1772#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
1250 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 177) 1773#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2
1251#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST \ 1774
1252 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 178) 1775/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
1253 1776#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
1254#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST \ 1777/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
1255 MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 1778/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
1256#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0 1779#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
1257#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1 1780#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
1258#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1 1781#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
1259#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1 1782#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16
1260 1783#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40
1261#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4 1784#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2
1262#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0 1785#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42
1263 1786#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2
1264/* WOL Filter types enumeration */ 1787
1265#define MC_CMD_WOL_TYPE_MAGIC 0x0 1788/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
1266 /* unused 0x1 */ 1789#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
1267#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 1790/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
1268#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 1791/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
1269#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 1792#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
1270#define MC_CMD_WOL_TYPE_BITMAP 0x5 1793#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
1271#define MC_CMD_WOL_TYPE_LINK 0x6 1794#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
1272#define MC_CMD_WOL_TYPE_MAX 0x7 1795#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128
1273 1796#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184
1274#define MC_CMD_FILTER_MODE_SIMPLE 0x0 1797#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1
1275#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff 1798#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185
1276 1799#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1
1277/* MC_CMD_WOL_FILTER_REMOVE: 1800#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186
1278 * Remove a WoL filter 1801#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1
1279 * 1802
1280 * Locks required: None 1803/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
1281 * Returns: 0, EINVAL, ENOSYS 1804#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
1805/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
1806/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
1807#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
1808#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
1809#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
1810#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
1811#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
1812
1813/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
1814#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
1815#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
1816
1817
1818/***********************************/
1819/* MC_CMD_WOL_FILTER_REMOVE
1820 * Remove a WoL filter.
1282 */ 1821 */
1283#define MC_CMD_WOL_FILTER_REMOVE 0x33 1822#define MC_CMD_WOL_FILTER_REMOVE 0x33
1284#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
1285#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
1286#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
1287 1823
1824/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
1825#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
1826#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
1288 1827
1289/* MC_CMD_WOL_FILTER_RESET: 1828/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
1290 * Reset (i.e. remove all) WoL filters 1829#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
1291 * 1830
1292 * Locks required: None 1831
1293 * Returns: 0, ENOSYS 1832/***********************************/
1833/* MC_CMD_WOL_FILTER_RESET
1834 * Reset (i.e. remove all) WoL filters.
1294 */ 1835 */
1295#define MC_CMD_WOL_FILTER_RESET 0x34 1836#define MC_CMD_WOL_FILTER_RESET 0x34
1296#define MC_CMD_WOL_FILTER_RESET_IN_LEN 0
1297#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
1298 1837
1299/* MC_CMD_SET_MCAST_HASH: 1838/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
1300 * Set the MCASH hash value without otherwise 1839#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
1301 * reconfiguring the MAC 1840#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
1841#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
1842#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
1843
1844/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */
1845#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
1846
1847
1848/***********************************/
1849/* MC_CMD_SET_MCAST_HASH
1850 * Set the MCASH hash value.
1302 */ 1851 */
1303#define MC_CMD_SET_MCAST_HASH 0x35 1852#define MC_CMD_SET_MCAST_HASH 0x35
1304#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
1305#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
1306#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
1307#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
1308 1853
1309/* MC_CMD_NVRAM_TYPES: 1854/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
1310 * Return bitfield indicating available types of virtual NVRAM partitions 1855#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
1311 * 1856#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
1312 * Locks required: none 1857#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
1313 * Returns: 0 1858#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
1859#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
1860
1861/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
1862#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
1863
1864
1865/***********************************/
1866/* MC_CMD_NVRAM_TYPES
1867 * Get virtual NVRAM partitions information.
1314 */ 1868 */
1315#define MC_CMD_NVRAM_TYPES 0x36 1869#define MC_CMD_NVRAM_TYPES 0x36
1316#define MC_CMD_NVRAM_TYPES_IN_LEN 0 1870
1317#define MC_CMD_NVRAM_TYPES_OUT_LEN 4 1871/* MC_CMD_NVRAM_TYPES_IN msgrequest */
1318#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0 1872#define MC_CMD_NVRAM_TYPES_IN_LEN 0
1319 1873
1320/* Supported NVRAM types */ 1874/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
1321#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0 1875#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
1322#define MC_CMD_NVRAM_TYPE_MC_FW 1 1876#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
1323#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 2 1877#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 /* enum */
1324#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 3 1878#define MC_CMD_NVRAM_TYPE_MC_FW 0x1 /* enum */
1325#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 4 1879#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2 /* enum */
1326#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 5 1880#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3 /* enum */
1327#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 6 1881#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4 /* enum */
1328#define MC_CMD_NVRAM_TYPE_EXP_ROM 7 1882#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5 /* enum */
1329#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 8 1883#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6 /* enum */
1330#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 9 1884#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7 /* enum */
1331#define MC_CMD_NVRAM_TYPE_PHY_PORT0 10 1885#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8 /* enum */
1332#define MC_CMD_NVRAM_TYPE_PHY_PORT1 11 1886#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9 /* enum */
1333#define MC_CMD_NVRAM_TYPE_LOG 12 1887#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa /* enum */
1334 1888#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb /* enum */
1335/* MC_CMD_NVRAM_INFO: 1889#define MC_CMD_NVRAM_TYPE_LOG 0xc /* enum */
1336 * Read info about a virtual NVRAM partition 1890#define MC_CMD_NVRAM_TYPE_FPGA 0xd /* enum */
1337 * 1891
1338 * Locks required: none 1892
1339 * Returns: 0, EINVAL (bad type) 1893/***********************************/
1894/* MC_CMD_NVRAM_INFO
1895 * Read info about a virtual NVRAM partition.
1340 */ 1896 */
1341#define MC_CMD_NVRAM_INFO 0x37 1897#define MC_CMD_NVRAM_INFO 0x37
1342#define MC_CMD_NVRAM_INFO_IN_LEN 4 1898
1343#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0 1899/* MC_CMD_NVRAM_INFO_IN msgrequest */
1344#define MC_CMD_NVRAM_INFO_OUT_LEN 24 1900#define MC_CMD_NVRAM_INFO_IN_LEN 4
1345#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0 1901#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
1346#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4 1902/* Enum values, see field(s): */
1347#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8 1903/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
1348#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12 1904
1349#define MC_CMD_NVRAM_PROTECTED_LBN 0 1905/* MC_CMD_NVRAM_INFO_OUT msgresponse */
1350#define MC_CMD_NVRAM_PROTECTED_WIDTH 1 1906#define MC_CMD_NVRAM_INFO_OUT_LEN 24
1351#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 1907#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
1352#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 1908/* Enum values, see field(s): */
1353 1909/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
1354/* MC_CMD_NVRAM_UPDATE_START: 1910#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
1355 * Start a group of update operations on a virtual NVRAM partition 1911#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
1356 * 1912#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
1357 * Locks required: PHY_LOCK if type==*PHY* 1913#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
1358 * Returns: 0, EINVAL (bad type), EACCES (if PHY_LOCK required and not held) 1914#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
1915#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
1916#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
1917
1918
1919/***********************************/
1920/* MC_CMD_NVRAM_UPDATE_START
1921 * Start a group of update operations on a virtual NVRAM partition.
1359 */ 1922 */
1360#define MC_CMD_NVRAM_UPDATE_START 0x38 1923#define MC_CMD_NVRAM_UPDATE_START 0x38
1361#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
1362#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
1363#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
1364 1924
1365/* MC_CMD_NVRAM_READ: 1925/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest */
1366 * Read data from a virtual NVRAM partition 1926#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
1367 * 1927#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
1368 * Locks required: PHY_LOCK if type==*PHY* 1928/* Enum values, see field(s): */
1369 * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) 1929/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
1930
1931/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */
1932#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
1933
1934
1935/***********************************/
1936/* MC_CMD_NVRAM_READ
1937 * Read data from a virtual NVRAM partition.
1370 */ 1938 */
1371#define MC_CMD_NVRAM_READ 0x39 1939#define MC_CMD_NVRAM_READ 0x39
1372#define MC_CMD_NVRAM_READ_IN_LEN 12 1940
1373#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0 1941/* MC_CMD_NVRAM_READ_IN msgrequest */
1374#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4 1942#define MC_CMD_NVRAM_READ_IN_LEN 12
1375#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 1943#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
1376#define MC_CMD_NVRAM_READ_OUT_LEN(_read_bytes) (_read_bytes) 1944/* Enum values, see field(s): */
1377#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0 1945/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
1378 1946#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
1379/* MC_CMD_NVRAM_WRITE: 1947#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
1380 * Write data to a virtual NVRAM partition 1948
1381 * 1949/* MC_CMD_NVRAM_READ_OUT msgresponse */
1382 * Locks required: PHY_LOCK if type==*PHY* 1950#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
1383 * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) 1951#define MC_CMD_NVRAM_READ_OUT_LENMAX 255
1952#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
1953#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
1954#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
1955#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
1956#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 255
1957
1958
1959/***********************************/
1960/* MC_CMD_NVRAM_WRITE
1961 * Write data to a virtual NVRAM partition.
1384 */ 1962 */
1385#define MC_CMD_NVRAM_WRITE 0x3a 1963#define MC_CMD_NVRAM_WRITE 0x3a
1386#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0 1964
1387#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4 1965/* MC_CMD_NVRAM_WRITE_IN msgrequest */
1388#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8 1966#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
1389#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12 1967#define MC_CMD_NVRAM_WRITE_IN_LENMAX 255
1390#define MC_CMD_NVRAM_WRITE_IN_LEN(_write_bytes) (12 + _write_bytes) 1968#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
1391#define MC_CMD_NVRAM_WRITE_OUT_LEN 0 1969#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
1392 1970/* Enum values, see field(s): */
1393/* MC_CMD_NVRAM_ERASE: 1971/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
1394 * Erase sector(s) from a virtual NVRAM partition 1972#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
1395 * 1973#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
1396 * Locks required: PHY_LOCK if type==*PHY* 1974#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
1397 * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) 1975#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
1976#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
1977#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 243
1978
1979/* MC_CMD_NVRAM_WRITE_OUT msgresponse */
1980#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
1981
1982
1983/***********************************/
1984/* MC_CMD_NVRAM_ERASE
1985 * Erase sector(s) from a virtual NVRAM partition.
1398 */ 1986 */
1399#define MC_CMD_NVRAM_ERASE 0x3b 1987#define MC_CMD_NVRAM_ERASE 0x3b
1400#define MC_CMD_NVRAM_ERASE_IN_LEN 12 1988
1401#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0 1989/* MC_CMD_NVRAM_ERASE_IN msgrequest */
1402#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4 1990#define MC_CMD_NVRAM_ERASE_IN_LEN 12
1403#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8 1991#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
1404#define MC_CMD_NVRAM_ERASE_OUT_LEN 0 1992/* Enum values, see field(s): */
1405 1993/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
1406/* MC_CMD_NVRAM_UPDATE_FINISH: 1994#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
1407 * Finish a group of update operations on a virtual NVRAM partition 1995#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
1408 * 1996
1409 * Locks required: PHY_LOCK if type==*PHY* 1997/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
1410 * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) 1998#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
1999
2000
2001/***********************************/
2002/* MC_CMD_NVRAM_UPDATE_FINISH
2003 * Finish a group of update operations on a virtual NVRAM partition.
1411 */ 2004 */
1412#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c 2005#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
1413#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
1414#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
1415#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
1416#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
1417 2006
1418/* MC_CMD_REBOOT: 2007/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest */
2008#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
2009#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
2010/* Enum values, see field(s): */
2011/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
2012#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
2013
2014/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse */
2015#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
2016
2017
2018/***********************************/
2019/* MC_CMD_REBOOT
1419 * Reboot the MC. 2020 * Reboot the MC.
1420 *
1421 * The AFTER_ASSERTION flag is intended to be used when the driver notices
1422 * an assertion failure (at which point it is expected to perform a complete
1423 * tear down and reinitialise), to allow both ports to reset the MC once
1424 * in an atomic fashion.
1425 *
1426 * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
1427 * which means that they will automatically reboot out of the assertion
1428 * handler, so this is in practise an optional operation. It is still
1429 * recommended that drivers execute this to support custom firmwares
1430 * with REBOOT_ON_ASSERT=0.
1431 *
1432 * Locks required: NONE
1433 * Returns: Nothing. You get back a response with ERR=1, DATALEN=0
1434 */ 2021 */
1435#define MC_CMD_REBOOT 0x3d 2022#define MC_CMD_REBOOT 0x3d
1436#define MC_CMD_REBOOT_IN_LEN 4
1437#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
1438#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 1
1439#define MC_CMD_REBOOT_OUT_LEN 0
1440 2023
1441/* MC_CMD_SCHEDINFO: 2024/* MC_CMD_REBOOT_IN msgrequest */
1442 * Request scheduler info. from the MC. 2025#define MC_CMD_REBOOT_IN_LEN 4
1443 * 2026#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
1444 * Locks required: NONE 2027#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
1445 * Returns: An array of (timeslice,maximum overrun), one for each thread, 2028
1446 * in ascending order of thread address.s 2029/* MC_CMD_REBOOT_OUT msgresponse */
2030#define MC_CMD_REBOOT_OUT_LEN 0
2031
2032
2033/***********************************/
2034/* MC_CMD_SCHEDINFO
2035 * Request scheduler info.
1447 */ 2036 */
1448#define MC_CMD_SCHEDINFO 0x3e 2037#define MC_CMD_SCHEDINFO 0x3e
1449#define MC_CMD_SCHEDINFO_IN_LEN 0
1450 2038
2039/* MC_CMD_SCHEDINFO_IN msgrequest */
2040#define MC_CMD_SCHEDINFO_IN_LEN 0
1451 2041
1452/* MC_CMD_SET_REBOOT_MODE: (debug) 2042/* MC_CMD_SCHEDINFO_OUT msgresponse */
1453 * Set the mode for the next MC reboot. 2043#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
1454 * 2044#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
1455 * Locks required: NONE 2045#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
1456 * 2046#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
1457 * Sets the reboot mode to the specified value. Returns the old mode. 2047#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
2048#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
2049#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
2050
2051
2052/***********************************/
2053/* MC_CMD_REBOOT_MODE
1458 */ 2054 */
1459#define MC_CMD_REBOOT_MODE 0x3f 2055#define MC_CMD_REBOOT_MODE 0x3f
1460#define MC_CMD_REBOOT_MODE_IN_LEN 4 2056
1461#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 2057/* MC_CMD_REBOOT_MODE_IN msgrequest */
1462#define MC_CMD_REBOOT_MODE_OUT_LEN 4 2058#define MC_CMD_REBOOT_MODE_IN_LEN 4
1463#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0 2059#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
1464#define MC_CMD_REBOOT_MODE_NORMAL 0 2060#define MC_CMD_REBOOT_MODE_NORMAL 0x0 /* enum */
1465#define MC_CMD_REBOOT_MODE_SNAPPER 3 2061#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 /* enum */
1466 2062
1467/* MC_CMD_DEBUG_LOG: 2063/* MC_CMD_REBOOT_MODE_OUT msgresponse */
1468 * Null request/response command (debug) 2064#define MC_CMD_REBOOT_MODE_OUT_LEN 4
1469 * - sequence number is always zero 2065#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
1470 * - only supported on the UART interface 2066
1471 * (the same set of bytes is delivered as an 2067
1472 * event over PCI) 2068/***********************************/
1473 */ 2069/* MC_CMD_SENSOR_INFO
1474#define MC_CMD_DEBUG_LOG 0x40
1475#define MC_CMD_DEBUG_LOG_IN_LEN 0
1476#define MC_CMD_DEBUG_LOG_OUT_LEN 0
1477
1478/* Generic sensor enumeration. Note that a dual port NIC
1479 * will EITHER expose PHY_COMMON_TEMP OR PHY0_TEMP and
1480 * PHY1_TEMP depending on whether there is a single sensor
1481 * in the vicinity of the two port, or one per port.
1482 */
1483#define MC_CMD_SENSOR_CONTROLLER_TEMP 0 /* degC */
1484#define MC_CMD_SENSOR_PHY_COMMON_TEMP 1 /* degC */
1485#define MC_CMD_SENSOR_CONTROLLER_COOLING 2 /* bool */
1486#define MC_CMD_SENSOR_PHY0_TEMP 3 /* degC */
1487#define MC_CMD_SENSOR_PHY0_COOLING 4 /* bool */
1488#define MC_CMD_SENSOR_PHY1_TEMP 5 /* degC */
1489#define MC_CMD_SENSOR_PHY1_COOLING 6 /* bool */
1490#define MC_CMD_SENSOR_IN_1V0 7 /* mV */
1491#define MC_CMD_SENSOR_IN_1V2 8 /* mV */
1492#define MC_CMD_SENSOR_IN_1V8 9 /* mV */
1493#define MC_CMD_SENSOR_IN_2V5 10 /* mV */
1494#define MC_CMD_SENSOR_IN_3V3 11 /* mV */
1495#define MC_CMD_SENSOR_IN_12V0 12 /* mV */
1496
1497
1498/* Sensor state */
1499#define MC_CMD_SENSOR_STATE_OK 0
1500#define MC_CMD_SENSOR_STATE_WARNING 1
1501#define MC_CMD_SENSOR_STATE_FATAL 2
1502#define MC_CMD_SENSOR_STATE_BROKEN 3
1503
1504/* MC_CMD_SENSOR_INFO:
1505 * Returns information about every available sensor. 2070 * Returns information about every available sensor.
1506 *
1507 * Each sensor has a single (16bit) value, and a corresponding state.
1508 * The mapping between value and sensor is nominally determined by the
1509 * MC, but in practise is implemented as zero (BROKEN), one (TEMPERATURE),
1510 * or two (VOLTAGE) ranges per sensor per state.
1511 *
1512 * This call returns a mask (32bit) of the sensors that are supported
1513 * by this platform, then an array (indexed by MC_CMD_SENSOR) of byte
1514 * offsets to the per-sensor arrays. Each sensor array has four 16bit
1515 * numbers, min1, max1, min2, max2.
1516 *
1517 * Locks required: None
1518 * Returns: 0
1519 */ 2071 */
1520#define MC_CMD_SENSOR_INFO 0x41 2072#define MC_CMD_SENSOR_INFO 0x41
1521#define MC_CMD_SENSOR_INFO_IN_LEN 0
1522#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
1523#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
1524 (4 + (_x))
1525#define MC_CMD_SENSOR_INFO_OUT_MIN1_OFST(_ofst) \
1526 ((_ofst) + 0)
1527#define MC_CMD_SENSOR_INFO_OUT_MAX1_OFST(_ofst) \
1528 ((_ofst) + 2)
1529#define MC_CMD_SENSOR_INFO_OUT_MIN2_OFST(_ofst) \
1530 ((_ofst) + 4)
1531#define MC_CMD_SENSOR_INFO_OUT_MAX2_OFST(_ofst) \
1532 ((_ofst) + 6)
1533 2073
2074/* MC_CMD_SENSOR_INFO_IN msgrequest */
2075#define MC_CMD_SENSOR_INFO_IN_LEN 0
2076
2077/* MC_CMD_SENSOR_INFO_OUT msgresponse */
2078#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12
2079#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
2080#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
2081#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
2082#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 /* enum */
2083#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 /* enum */
2084#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 /* enum */
2085#define MC_CMD_SENSOR_PHY0_TEMP 0x3 /* enum */
2086#define MC_CMD_SENSOR_PHY0_COOLING 0x4 /* enum */
2087#define MC_CMD_SENSOR_PHY1_TEMP 0x5 /* enum */
2088#define MC_CMD_SENSOR_PHY1_COOLING 0x6 /* enum */
2089#define MC_CMD_SENSOR_IN_1V0 0x7 /* enum */
2090#define MC_CMD_SENSOR_IN_1V2 0x8 /* enum */
2091#define MC_CMD_SENSOR_IN_1V8 0x9 /* enum */
2092#define MC_CMD_SENSOR_IN_2V5 0xa /* enum */
2093#define MC_CMD_SENSOR_IN_3V3 0xb /* enum */
2094#define MC_CMD_SENSOR_IN_12V0 0xc /* enum */
2095#define MC_CMD_SENSOR_IN_1V2A 0xd /* enum */
2096#define MC_CMD_SENSOR_IN_VREF 0xe /* enum */
2097#define MC_CMD_SENSOR_ENTRY_OFST 4
2098#define MC_CMD_SENSOR_ENTRY_LEN 8
2099#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
2100#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
2101#define MC_CMD_SENSOR_ENTRY_MINNUM 1
2102#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
2103
2104/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
2105#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
2106#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
2107#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2
2108#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0
2109#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16
2110#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2
2111#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2
2112#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16
2113#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16
2114#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4
2115#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2
2116#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32
2117#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16
2118#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6
2119#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2
2120#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48
2121#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16
2122
2123
2124/***********************************/
1534/* MC_CMD_READ_SENSORS 2125/* MC_CMD_READ_SENSORS
1535 * Returns the current reading from each sensor 2126 * Returns the current reading from each sensor.
1536 *
1537 * Returns a sparse array of sensor readings (indexed by the sensor
1538 * type) into host memory. Each array element is a dword.
1539 *
1540 * The MC will send a SENSOREVT event every time any sensor changes state. The
1541 * driver is responsible for ensuring that it doesn't miss any events. The board
1542 * will function normally if all sensors are in STATE_OK or state_WARNING.
1543 * Otherwise the board should not be expected to function.
1544 */ 2127 */
1545#define MC_CMD_READ_SENSORS 0x42 2128#define MC_CMD_READ_SENSORS 0x42
1546#define MC_CMD_READ_SENSORS_IN_LEN 8
1547#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
1548#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
1549#define MC_CMD_READ_SENSORS_OUT_LEN 0
1550 2129
1551/* Sensor reading fields */ 2130/* MC_CMD_READ_SENSORS_IN msgrequest */
1552#define MC_CMD_READ_SENSOR_VALUE_LBN 0 2131#define MC_CMD_READ_SENSORS_IN_LEN 8
1553#define MC_CMD_READ_SENSOR_VALUE_WIDTH 16 2132#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
1554#define MC_CMD_READ_SENSOR_STATE_LBN 16 2133#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
1555#define MC_CMD_READ_SENSOR_STATE_WIDTH 8 2134#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
1556 2135#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
1557 2136
1558/* MC_CMD_GET_PHY_STATE: 2137/* MC_CMD_READ_SENSORS_OUT msgresponse */
1559 * Report current state of PHY. A "zombie" PHY is a PHY that has failed to 2138#define MC_CMD_READ_SENSORS_OUT_LEN 0
1560 * boot (e.g. due to missing or corrupted firmware). 2139
1561 * 2140/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
1562 * Locks required: None 2141#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 3
1563 * Return code: 0 2142#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
2143#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
2144#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
2145#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
2146#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
2147#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
2148#define MC_CMD_SENSOR_STATE_OK 0x0 /* enum */
2149#define MC_CMD_SENSOR_STATE_WARNING 0x1 /* enum */
2150#define MC_CMD_SENSOR_STATE_FATAL 0x2 /* enum */
2151#define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum */
2152#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
2153#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
2154
2155
2156/***********************************/
2157/* MC_CMD_GET_PHY_STATE
2158 * Report current state of PHY.
1564 */ 2159 */
1565#define MC_CMD_GET_PHY_STATE 0x43 2160#define MC_CMD_GET_PHY_STATE 0x43
1566 2161
1567#define MC_CMD_GET_PHY_STATE_IN_LEN 0 2162/* MC_CMD_GET_PHY_STATE_IN msgrequest */
1568#define MC_CMD_GET_PHY_STATE_OUT_LEN 4 2163#define MC_CMD_GET_PHY_STATE_IN_LEN 0
1569#define MC_CMD_GET_PHY_STATE_STATE_OFST 0
1570/* PHY state enumeration: */
1571#define MC_CMD_PHY_STATE_OK 1
1572#define MC_CMD_PHY_STATE_ZOMBIE 2
1573 2164
2165/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
2166#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
2167#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
2168#define MC_CMD_PHY_STATE_OK 0x1 /* enum */
2169#define MC_CMD_PHY_STATE_ZOMBIE 0x2 /* enum */
1574 2170
1575/* 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to 2171
1576 * disable 802.Qbb for a given priority. */ 2172/***********************************/
2173/* MC_CMD_SETUP_8021QBB
2174 * 802.1Qbb control.
2175 */
1577#define MC_CMD_SETUP_8021QBB 0x44 2176#define MC_CMD_SETUP_8021QBB 0x44
1578#define MC_CMD_SETUP_8021QBB_IN_LEN 32
1579#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
1580#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFFST 0
1581 2177
2178/* MC_CMD_SETUP_8021QBB_IN msgrequest */
2179#define MC_CMD_SETUP_8021QBB_IN_LEN 32
2180#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
2181#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
1582 2182
1583/* MC_CMD_WOL_FILTER_GET: 2183/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
1584 * Retrieve ID of any WoL filters 2184#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
1585 *
1586 * Locks required: None
1587 * Returns: 0, ENOSYS
1588 */
1589#define MC_CMD_WOL_FILTER_GET 0x45
1590#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
1591#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
1592#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
1593 2185
1594 2186
1595/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD: 2187/***********************************/
1596 * Offload a protocol to NIC for lights-out state 2188/* MC_CMD_WOL_FILTER_GET
1597 * 2189 * Retrieve ID of any WoL filters.
1598 * Locks required: None
1599 * Returns: 0, ENOSYS
1600 */ 2190 */
1601#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46 2191#define MC_CMD_WOL_FILTER_GET 0x45
1602 2192
1603#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN 16 2193/* MC_CMD_WOL_FILTER_GET_IN msgrequest */
1604#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 2194#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
1605 2195
1606/* There is a union at offset 4, following defines overlap due to 2196/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
1607 * this */ 2197#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
1608#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4 2198#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
1609#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARPMAC_OFST 4
1610#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARPIP_OFST 10
1611#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSMAC_OFST 4
1612#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSSNIPV6_OFST 10
1613#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSIPV6_OFST 26
1614 2199
1615#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
1616#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
1617 2200
2201/***********************************/
2202/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
2203 * Add a protocol offload to NIC for lights-out state.
2204 */
2205#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
1618 2206
1619/* MC_CMD_REMOVE_LIGHTSOUT_PROTOCOL_OFFLOAD: 2207/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
1620 * Offload a protocol to NIC for lights-out state 2208#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
1621 * 2209#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
1622 * Locks required: None 2210#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
1623 * Returns: 0, ENOSYS 2211#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
2212#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
2213#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
2214#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
2215#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
2216#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
2217#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
2218
2219/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
2220#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
2221/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
2222#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
2223#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
2224#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
2225
2226/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
2227#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
2228/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
2229#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
2230#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
2231#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
2232#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
2233#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
2234#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
2235
2236/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
2237#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
2238#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
2239
2240
2241/***********************************/
2242/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
2243 * Remove a protocol offload from NIC for lights-out state.
1624 */ 2244 */
1625#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47 2245#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
1626#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
1627#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
1628 2246
1629#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 2247/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
1630#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4 2248#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
2249#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
2250#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
1631 2251
1632/* Lights-out offload protocols enumeration */ 2252/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
1633#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 2253#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
1634#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2
1635 2254
1636 2255
1637/* MC_CMD_MAC_RESET_RESTORE: 2256/***********************************/
1638 * Restore MAC after block reset 2257/* MC_CMD_MAC_RESET_RESTORE
1639 * 2258 * Restore MAC after block reset.
1640 * Locks required: None
1641 * Returns: 0
1642 */ 2259 */
1643
1644#define MC_CMD_MAC_RESET_RESTORE 0x48 2260#define MC_CMD_MAC_RESET_RESTORE 0x48
1645#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
1646#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
1647 2261
2262/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
2263#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
2264
2265/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
2266#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
1648 2267
1649/* MC_CMD_TEST_ASSERT:
1650 * Deliberately trigger an assert-detonation in the firmware for testing
1651 * purposes (i.e. to allow tests that the driver copes gracefully).
1652 *
1653 * Locks required: None
1654 * Returns: 0
1655 */
1656 2268
2269/***********************************/
2270/* MC_CMD_TESTASSERT
2271 */
1657#define MC_CMD_TESTASSERT 0x49 2272#define MC_CMD_TESTASSERT 0x49
1658#define MC_CMD_TESTASSERT_IN_LEN 0
1659#define MC_CMD_TESTASSERT_OUT_LEN 0
1660 2273
1661/* MC_CMD_WORKAROUND 0x4a 2274/* MC_CMD_TESTASSERT_IN msgrequest */
1662 * 2275#define MC_CMD_TESTASSERT_IN_LEN 0
1663 * Enable/Disable a given workaround. The mcfw will return EINVAL if it 2276
1664 * doesn't understand the given workaround number - which should not 2277/* MC_CMD_TESTASSERT_OUT msgresponse */
1665 * be treated as a hard error by client code. 2278#define MC_CMD_TESTASSERT_OUT_LEN 0
1666 * 2279
1667 * This op does not imply any semantics about each workaround, that's between 2280
1668 * the driver and the mcfw on a per-workaround basis. 2281/***********************************/
1669 * 2282/* MC_CMD_WORKAROUND
1670 * Locks required: None 2283 * Enable/Disable a given workaround.
1671 * Returns: 0, EINVAL
1672 */ 2284 */
1673#define MC_CMD_WORKAROUND 0x4a 2285#define MC_CMD_WORKAROUND 0x4a
1674#define MC_CMD_WORKAROUND_IN_LEN 8 2286
1675#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 2287/* MC_CMD_WORKAROUND_IN msgrequest */
1676#define MC_CMD_WORKAROUND_BUG17230 1 2288#define MC_CMD_WORKAROUND_IN_LEN 8
1677#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 2289#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
1678#define MC_CMD_WORKAROUND_OUT_LEN 0 2290#define MC_CMD_WORKAROUND_BUG17230 0x1 /* enum */
1679 2291#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
1680/* MC_CMD_GET_PHY_MEDIA_INFO: 2292
1681 * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for 2293/* MC_CMD_WORKAROUND_OUT msgresponse */
1682 * SFP+ PHYs). 2294#define MC_CMD_WORKAROUND_OUT_LEN 0
1683 * 2295
1684 * The "media type" can be found via GET_PHY_CFG (GET_PHY_CFG_OUT_MEDIA_TYPE); 2296
1685 * the valid "page number" input values, and the output data, are interpreted 2297/***********************************/
1686 * on a per-type basis. 2298/* MC_CMD_GET_PHY_MEDIA_INFO
1687 * 2299 * Read media-specific data from PHY.
1688 * For SFP+: PAGE=0 or 1 returns a 128-byte block read from module I2C address
1689 * 0xA0 offset 0 or 0x80.
1690 * Anything else: currently undefined.
1691 *
1692 * Locks required: None
1693 * Return code: 0
1694 */ 2300 */
1695#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b 2301#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
1696#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4 2302
1697#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0 2303/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
1698#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(_num_bytes) (4 + (_num_bytes)) 2304#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
1699#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 2305#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
1700#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 2306
1701 2307/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
1702/* MC_CMD_NVRAM_TEST: 2308#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
1703 * Test a particular NVRAM partition for valid contents (where "valid" 2309#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 255
1704 * depends on the type of partition). 2310#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
1705 * 2311#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
1706 * Locks required: None 2312#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
1707 * Return code: 0 2313#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
2314#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
2315#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 251
2316
2317
2318/***********************************/
2319/* MC_CMD_NVRAM_TEST
2320 * Test a particular NVRAM partition.
1708 */ 2321 */
1709#define MC_CMD_NVRAM_TEST 0x4c 2322#define MC_CMD_NVRAM_TEST 0x4c
1710#define MC_CMD_NVRAM_TEST_IN_LEN 4 2323
1711#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0 2324/* MC_CMD_NVRAM_TEST_IN msgrequest */
1712#define MC_CMD_NVRAM_TEST_OUT_LEN 4 2325#define MC_CMD_NVRAM_TEST_IN_LEN 4
1713#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0 2326#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
1714#define MC_CMD_NVRAM_TEST_PASS 0 2327/* Enum values, see field(s): */
1715#define MC_CMD_NVRAM_TEST_FAIL 1 2328/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
1716#define MC_CMD_NVRAM_TEST_NOTSUPP 2 2329
1717 2330/* MC_CMD_NVRAM_TEST_OUT msgresponse */
1718/* MC_CMD_MRSFP_TWEAK: (debug) 2331#define MC_CMD_NVRAM_TEST_OUT_LEN 4
1719 * Read status and/or set parameters for the "mrsfp" driver in mr_rusty builds. 2332#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
1720 * I2C I/O expander bits are always read; if equaliser parameters are supplied, 2333#define MC_CMD_NVRAM_TEST_PASS 0x0 /* enum */
1721 * they are configured first. 2334#define MC_CMD_NVRAM_TEST_FAIL 0x1 /* enum */
1722 * 2335#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 /* enum */
1723 * Locks required: None 2336
1724 * Return code: 0, EINVAL 2337
2338/***********************************/
2339/* MC_CMD_MRSFP_TWEAK
2340 * Read status and/or set parameters for the 'mrsfp' driver.
1725 */ 2341 */
1726#define MC_CMD_MRSFP_TWEAK 0x4d 2342#define MC_CMD_MRSFP_TWEAK 0x4d
1727#define MC_CMD_MRSFP_TWEAK_IN_LEN_READ_ONLY 0 2343
1728#define MC_CMD_MRSFP_TWEAK_IN_LEN_EQ_CONFIG 16 2344/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
1729#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_LEVEL_OFST 0 /* 0-6 low->high de-emph. */ 2345#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
1730#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_DT_CFG_OFST 4 /* 0-8 low->high ref.V */ 2346#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
1731#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_BOOST_OFST 8 /* 0-8 low->high boost */ 2347#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
1732#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_DT_CFG_OFST 12 /* 0-8 low->high ref.V */ 2348#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
1733#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12 2349#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
1734#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 /* input bits */ 2350
1735#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */ 2351/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
1736#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */ 2352#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
1737 2353
1738/* MC_CMD_TEST_HACK: (debug (unsurprisingly)) 2354/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
1739 * Change bits of network port state for test purposes in ways that would never be 2355#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
1740 * useful in normal operation and so need a special command to change. */ 2356#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
1741#define MC_CMD_TEST_HACK 0x2f 2357#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
1742#define MC_CMD_TEST_HACK_IN_LEN 8 2358#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
1743#define MC_CMD_TEST_HACK_IN_TXPAD_OFST 0 2359#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 /* enum */
1744#define MC_CMD_TEST_HACK_IN_TXPAD_AUTO 0 /* Let the MC manage things */ 2360#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 /* enum */
1745#define MC_CMD_TEST_HACK_IN_TXPAD_ON 1 /* Force on */ 2361
1746#define MC_CMD_TEST_HACK_IN_TXPAD_OFF 2 /* Force on */ 2362
1747#define MC_CMD_TEST_HACK_IN_IPG_OFST 4 /* Takes a value in bits */ 2363/***********************************/
1748#define MC_CMD_TEST_HACK_IN_IPG_AUTO 0 /* The MC picks the value */ 2364/* MC_CMD_SENSOR_SET_LIMS
1749#define MC_CMD_TEST_HACK_OUT_LEN 0 2365 * Adjusts the sensor limits.
1750
1751/* MC_CMD_SENSOR_SET_LIMS: (debug) (mostly) adjust the sensor limits. This
1752 * is a warranty-voiding operation.
1753 *
1754 * IN: sensor identifier (one of the enumeration starting with MC_CMD_SENSOR_CONTROLLER_TEMP
1755 * followed by 4 32-bit values: min(warning) max(warning), min(fatal), max(fatal). Which
1756 * of these limits are meaningful and what their interpretation is is sensor-specific.
1757 *
1758 * OUT: nothing
1759 *
1760 * Returns: ENOENT if the sensor specified does not exist, EINVAL if the limits are
1761 * out of range.
1762 */ 2366 */
1763#define MC_CMD_SENSOR_SET_LIMS 0x4e 2367#define MC_CMD_SENSOR_SET_LIMS 0x4e
1764#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20 2368
1765#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0 2369/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
1766#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4 2370#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
1767#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8 2371#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
1768#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12 2372/* Enum values, see field(s): */
1769#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16 2373/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
1770 2374#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
1771/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be 2375#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
1772 * used for post-3.0 extensions. If you run out of space, look for gaps or 2376#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
1773 * commands that are unused in the existing range. */ 2377#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
2378
2379/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
2380#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
2381
2382
2383/***********************************/
2384/* MC_CMD_GET_RESOURCE_LIMITS
2385 */
2386#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
2387
2388/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
2389#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
2390
2391/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
2392#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
2393#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
2394#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
2395#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
2396#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
2397
2398/* MC_CMD_RESOURCE_SPECIFIER enum */
2399#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff /* enum */
2400#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */
2401
1774 2402
1775#endif /* MCDI_PCOL_H */ 2403#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_phy.c b/drivers/net/ethernet/sfc/mcdi_phy.c
index 6c63ab0710af..7bcad899a936 100644
--- a/drivers/net/ethernet/sfc/mcdi_phy.c
+++ b/drivers/net/ethernet/sfc/mcdi_phy.c
@@ -116,7 +116,7 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
116 goto fail; 116 goto fail;
117 } 117 }
118 118
119 *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_SUGGESTED); 119 *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED);
120 120
121 return 0; 121 return 0;
122 122
@@ -264,22 +264,22 @@ static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
264 264
265 /* TODO: Advertise the capabilities supported by this PHY */ 265 /* TODO: Advertise the capabilities supported by this PHY */
266 supported = 0; 266 supported = 0;
267 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_TXDIS_LBN)) 267 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN))
268 supported |= PHY_MODE_TX_DISABLED; 268 supported |= PHY_MODE_TX_DISABLED;
269 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_LOWPOWER_LBN)) 269 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN))
270 supported |= PHY_MODE_LOW_POWER; 270 supported |= PHY_MODE_LOW_POWER;
271 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_POWEROFF_LBN)) 271 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN))
272 supported |= PHY_MODE_OFF; 272 supported |= PHY_MODE_OFF;
273 273
274 mode = efx->phy_mode & supported; 274 mode = efx->phy_mode & supported;
275 275
276 flags = 0; 276 flags = 0;
277 if (mode & PHY_MODE_TX_DISABLED) 277 if (mode & PHY_MODE_TX_DISABLED)
278 flags |= (1 << MC_CMD_SET_LINK_TXDIS_LBN); 278 flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN);
279 if (mode & PHY_MODE_LOW_POWER) 279 if (mode & PHY_MODE_LOW_POWER)
280 flags |= (1 << MC_CMD_SET_LINK_LOWPOWER_LBN); 280 flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN);
281 if (mode & PHY_MODE_OFF) 281 if (mode & PHY_MODE_OFF)
282 flags |= (1 << MC_CMD_SET_LINK_POWEROFF_LBN); 282 flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN);
283 283
284 return flags; 284 return flags;
285} 285}
@@ -436,8 +436,8 @@ void efx_mcdi_phy_decode_link(struct efx_nic *efx,
436 break; 436 break;
437 } 437 }
438 438
439 link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_LINK_UP_LBN)); 439 link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
440 link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_FULL_DUPLEX_LBN)); 440 link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
441 link_state->speed = speed; 441 link_state->speed = speed;
442} 442}
443 443
@@ -592,7 +592,7 @@ static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
592 592
593 if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN) 593 if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
594 return -EIO; 594 return -EIO;
595 if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK) 595 if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK)
596 return -EINVAL; 596 return -EINVAL;
597 597
598 return 0; 598 return 0;
@@ -680,7 +680,7 @@ static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
680 u32 mode; 680 u32 mode;
681 int rc; 681 int rc;
682 682
683 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) { 683 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
684 rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results); 684 rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
685 if (rc < 0) 685 if (rc < 0)
686 return rc; 686 return rc;
@@ -691,15 +691,15 @@ static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
691 /* If we support both LONG and SHORT, then run each in response to 691 /* If we support both LONG and SHORT, then run each in response to
692 * break or not. Otherwise, run the one we support */ 692 * break or not. Otherwise, run the one we support */
693 mode = 0; 693 mode = 0;
694 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN)) { 694 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN)) {
695 if ((flags & ETH_TEST_FL_OFFLINE) && 695 if ((flags & ETH_TEST_FL_OFFLINE) &&
696 (phy_cfg->flags & 696 (phy_cfg->flags &
697 (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) 697 (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN)))
698 mode = MC_CMD_PHY_BIST_CABLE_LONG; 698 mode = MC_CMD_PHY_BIST_CABLE_LONG;
699 else 699 else
700 mode = MC_CMD_PHY_BIST_CABLE_SHORT; 700 mode = MC_CMD_PHY_BIST_CABLE_SHORT;
701 } else if (phy_cfg->flags & 701 } else if (phy_cfg->flags &
702 (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN)) 702 (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))
703 mode = MC_CMD_PHY_BIST_CABLE_LONG; 703 mode = MC_CMD_PHY_BIST_CABLE_LONG;
704 704
705 if (mode != 0) { 705 if (mode != 0) {
@@ -717,14 +717,14 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
717{ 717{
718 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; 718 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
719 719
720 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) { 720 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
721 if (index == 0) 721 if (index == 0)
722 return "bist"; 722 return "bist";
723 --index; 723 --index;
724 } 724 }
725 725
726 if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN) | 726 if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN) |
727 (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) { 727 (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) {
728 if (index == 0) 728 if (index == 0)
729 return "cable"; 729 return "cable";
730 --index; 730 --index;
@@ -741,7 +741,7 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
741 741
742const struct efx_phy_operations efx_mcdi_phy_ops = { 742const struct efx_phy_operations efx_mcdi_phy_ops = {
743 .probe = efx_mcdi_phy_probe, 743 .probe = efx_mcdi_phy_probe,
744 .init = efx_port_dummy_op_int, 744 .init = efx_port_dummy_op_int,
745 .reconfigure = efx_mcdi_phy_reconfigure, 745 .reconfigure = efx_mcdi_phy_reconfigure,
746 .poll = efx_mcdi_phy_poll, 746 .poll = efx_mcdi_phy_poll,
747 .fini = efx_port_dummy_op_void, 747 .fini = efx_port_dummy_op_void,
diff --git a/drivers/net/ethernet/sfc/mdio_10g.c b/drivers/net/ethernet/sfc/mdio_10g.c
index 7ab385c8136d..9acfd6696ffb 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.c
+++ b/drivers/net/ethernet/sfc/mdio_10g.c
@@ -228,7 +228,7 @@ void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
228/** 228/**
229 * efx_mdio_set_settings - Set (some of) the PHY settings over MDIO. 229 * efx_mdio_set_settings - Set (some of) the PHY settings over MDIO.
230 * @efx: Efx NIC 230 * @efx: Efx NIC
231 * @ecmd: New settings 231 * @ecmd: New settings
232 */ 232 */
233int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 233int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
234{ 234{
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index bc9dcd6b30d7..79c192272047 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -382,7 +382,7 @@ static int falcon_mtd_sync(struct mtd_info *mtd)
382 return rc; 382 return rc;
383} 383}
384 384
385static struct efx_mtd_ops falcon_mtd_ops = { 385static const struct efx_mtd_ops falcon_mtd_ops = {
386 .read = falcon_mtd_read, 386 .read = falcon_mtd_read,
387 .erase = falcon_mtd_erase, 387 .erase = falcon_mtd_erase,
388 .write = falcon_mtd_write, 388 .write = falcon_mtd_write,
@@ -560,7 +560,7 @@ static int siena_mtd_sync(struct mtd_info *mtd)
560 return rc; 560 return rc;
561} 561}
562 562
563static struct efx_mtd_ops siena_mtd_ops = { 563static const struct efx_mtd_ops siena_mtd_ops = {
564 .read = siena_mtd_read, 564 .read = siena_mtd_read,
565 .erase = siena_mtd_erase, 565 .erase = siena_mtd_erase,
566 .write = siena_mtd_write, 566 .write = siena_mtd_write,
@@ -572,7 +572,7 @@ struct siena_nvram_type_info {
572 const char *name; 572 const char *name;
573}; 573};
574 574
575static struct siena_nvram_type_info siena_nvram_types[] = { 575static const struct siena_nvram_type_info siena_nvram_types[] = {
576 [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" }, 576 [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
577 [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" }, 577 [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
578 [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" }, 578 [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
@@ -593,7 +593,7 @@ static int siena_mtd_probe_partition(struct efx_nic *efx,
593 unsigned int type) 593 unsigned int type)
594{ 594{
595 struct efx_mtd_partition *part = &efx_mtd->part[part_id]; 595 struct efx_mtd_partition *part = &efx_mtd->part[part_id];
596 struct siena_nvram_type_info *info; 596 const struct siena_nvram_type_info *info;
597 size_t size, erase_size; 597 size_t size, erase_size;
598 bool protected; 598 bool protected;
599 int rc; 599 int rc;
@@ -627,11 +627,10 @@ static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
627 struct efx_mtd *efx_mtd) 627 struct efx_mtd *efx_mtd)
628{ 628{
629 struct efx_mtd_partition *part; 629 struct efx_mtd_partition *part;
630 uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN / 630 uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM];
631 sizeof(uint16_t)];
632 int rc; 631 int rc;
633 632
634 rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list); 633 rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
635 if (rc) 634 if (rc)
636 return rc; 635 return rc;
637 636
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index c49502bab6a3..53864014c2b4 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -13,10 +13,6 @@
13#ifndef EFX_NET_DRIVER_H 13#ifndef EFX_NET_DRIVER_H
14#define EFX_NET_DRIVER_H 14#define EFX_NET_DRIVER_H
15 15
16#if defined(EFX_ENABLE_DEBUG) && !defined(DEBUG)
17#define DEBUG
18#endif
19
20#include <linux/netdevice.h> 16#include <linux/netdevice.h>
21#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
22#include <linux/ethtool.h> 18#include <linux/ethtool.h>
@@ -42,7 +38,7 @@
42 38
43#define EFX_DRIVER_VERSION "3.1" 39#define EFX_DRIVER_VERSION "3.1"
44 40
45#ifdef EFX_ENABLE_DEBUG 41#ifdef DEBUG
46#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 42#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
47#define EFX_WARN_ON_PARANOID(x) WARN_ON(x) 43#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
48#else 44#else
@@ -209,12 +205,12 @@ struct efx_tx_queue {
209/** 205/**
210 * struct efx_rx_buffer - An Efx RX data buffer 206 * struct efx_rx_buffer - An Efx RX data buffer
211 * @dma_addr: DMA base address of the buffer 207 * @dma_addr: DMA base address of the buffer
212 * @skb: The associated socket buffer, if any. 208 * @skb: The associated socket buffer. Valid iff !(@flags & %EFX_RX_BUF_PAGE).
213 * If both this and page are %NULL, the buffer slot is currently free. 209 * Will be %NULL if the buffer slot is currently free.
214 * @page: The associated page buffer, if any. 210 * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
215 * If both this and skb are %NULL, the buffer slot is currently free. 211 * Will be %NULL if the buffer slot is currently free.
216 * @len: Buffer length, in bytes. 212 * @len: Buffer length, in bytes.
217 * @is_page: Indicates if @page is valid. If false, @skb is valid. 213 * @flags: Flags for buffer and packet state.
218 */ 214 */
219struct efx_rx_buffer { 215struct efx_rx_buffer {
220 dma_addr_t dma_addr; 216 dma_addr_t dma_addr;
@@ -223,8 +219,11 @@ struct efx_rx_buffer {
223 struct page *page; 219 struct page *page;
224 } u; 220 } u;
225 unsigned int len; 221 unsigned int len;
226 bool is_page; 222 u16 flags;
227}; 223};
224#define EFX_RX_BUF_PAGE 0x0001
225#define EFX_RX_PKT_CSUMMED 0x0002
226#define EFX_RX_PKT_DISCARD 0x0004
228 227
229/** 228/**
230 * struct efx_rx_page_state - Page-based rx buffer state 229 * struct efx_rx_page_state - Page-based rx buffer state
@@ -329,6 +328,7 @@ enum efx_rx_alloc_method {
329 * @eventq_mask: Event queue pointer mask 328 * @eventq_mask: Event queue pointer mask
330 * @eventq_read_ptr: Event queue read pointer 329 * @eventq_read_ptr: Event queue read pointer
331 * @last_eventq_read_ptr: Last event queue read pointer value. 330 * @last_eventq_read_ptr: Last event queue read pointer value.
331 * @last_irq_cpu: Last CPU to handle interrupt for this channel
332 * @irq_count: Number of IRQs since last adaptive moderation decision 332 * @irq_count: Number of IRQs since last adaptive moderation decision
333 * @irq_mod_score: IRQ moderation score 333 * @irq_mod_score: IRQ moderation score
334 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors 334 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -359,6 +359,7 @@ struct efx_channel {
359 unsigned int eventq_read_ptr; 359 unsigned int eventq_read_ptr;
360 unsigned int last_eventq_read_ptr; 360 unsigned int last_eventq_read_ptr;
361 361
362 int last_irq_cpu;
362 unsigned int irq_count; 363 unsigned int irq_count;
363 unsigned int irq_mod_score; 364 unsigned int irq_mod_score;
364#ifdef CONFIG_RFS_ACCEL 365#ifdef CONFIG_RFS_ACCEL
@@ -380,7 +381,6 @@ struct efx_channel {
380 * access with prefetches. 381 * access with prefetches.
381 */ 382 */
382 struct efx_rx_buffer *rx_pkt; 383 struct efx_rx_buffer *rx_pkt;
383 bool rx_pkt_csummed;
384 384
385 struct efx_rx_queue rx_queue; 385 struct efx_rx_queue rx_queue;
386 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; 386 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
@@ -395,12 +395,12 @@ enum efx_led_mode {
395#define STRING_TABLE_LOOKUP(val, member) \ 395#define STRING_TABLE_LOOKUP(val, member) \
396 ((val) < member ## _max) ? member ## _names[val] : "(invalid)" 396 ((val) < member ## _max) ? member ## _names[val] : "(invalid)"
397 397
398extern const char *efx_loopback_mode_names[]; 398extern const char *const efx_loopback_mode_names[];
399extern const unsigned int efx_loopback_mode_max; 399extern const unsigned int efx_loopback_mode_max;
400#define LOOPBACK_MODE(efx) \ 400#define LOOPBACK_MODE(efx) \
401 STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) 401 STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
402 402
403extern const char *efx_reset_type_names[]; 403extern const char *const efx_reset_type_names[];
404extern const unsigned int efx_reset_type_max; 404extern const unsigned int efx_reset_type_max;
405#define RESET_TYPE(type) \ 405#define RESET_TYPE(type) \
406 STRING_TABLE_LOOKUP(type, efx_reset_type) 406 STRING_TABLE_LOOKUP(type, efx_reset_type)
@@ -474,18 +474,6 @@ static inline bool efx_link_state_equal(const struct efx_link_state *left,
474} 474}
475 475
476/** 476/**
477 * struct efx_mac_operations - Efx MAC operations table
478 * @reconfigure: Reconfigure MAC. Serialised by the mac_lock
479 * @update_stats: Update statistics
480 * @check_fault: Check fault state. True if fault present.
481 */
482struct efx_mac_operations {
483 int (*reconfigure) (struct efx_nic *efx);
484 void (*update_stats) (struct efx_nic *efx);
485 bool (*check_fault)(struct efx_nic *efx);
486};
487
488/**
489 * struct efx_phy_operations - Efx PHY operations table 477 * struct efx_phy_operations - Efx PHY operations table
490 * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds, 478 * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds,
491 * efx->loopback_modes. 479 * efx->loopback_modes.
@@ -552,64 +540,64 @@ struct efx_mac_stats {
552 u64 tx_bytes; 540 u64 tx_bytes;
553 u64 tx_good_bytes; 541 u64 tx_good_bytes;
554 u64 tx_bad_bytes; 542 u64 tx_bad_bytes;
555 unsigned long tx_packets; 543 u64 tx_packets;
556 unsigned long tx_bad; 544 u64 tx_bad;
557 unsigned long tx_pause; 545 u64 tx_pause;
558 unsigned long tx_control; 546 u64 tx_control;
559 unsigned long tx_unicast; 547 u64 tx_unicast;
560 unsigned long tx_multicast; 548 u64 tx_multicast;
561 unsigned long tx_broadcast; 549 u64 tx_broadcast;
562 unsigned long tx_lt64; 550 u64 tx_lt64;
563 unsigned long tx_64; 551 u64 tx_64;
564 unsigned long tx_65_to_127; 552 u64 tx_65_to_127;
565 unsigned long tx_128_to_255; 553 u64 tx_128_to_255;
566 unsigned long tx_256_to_511; 554 u64 tx_256_to_511;
567 unsigned long tx_512_to_1023; 555 u64 tx_512_to_1023;
568 unsigned long tx_1024_to_15xx; 556 u64 tx_1024_to_15xx;
569 unsigned long tx_15xx_to_jumbo; 557 u64 tx_15xx_to_jumbo;
570 unsigned long tx_gtjumbo; 558 u64 tx_gtjumbo;
571 unsigned long tx_collision; 559 u64 tx_collision;
572 unsigned long tx_single_collision; 560 u64 tx_single_collision;
573 unsigned long tx_multiple_collision; 561 u64 tx_multiple_collision;
574 unsigned long tx_excessive_collision; 562 u64 tx_excessive_collision;
575 unsigned long tx_deferred; 563 u64 tx_deferred;
576 unsigned long tx_late_collision; 564 u64 tx_late_collision;
577 unsigned long tx_excessive_deferred; 565 u64 tx_excessive_deferred;
578 unsigned long tx_non_tcpudp; 566 u64 tx_non_tcpudp;
579 unsigned long tx_mac_src_error; 567 u64 tx_mac_src_error;
580 unsigned long tx_ip_src_error; 568 u64 tx_ip_src_error;
581 u64 rx_bytes; 569 u64 rx_bytes;
582 u64 rx_good_bytes; 570 u64 rx_good_bytes;
583 u64 rx_bad_bytes; 571 u64 rx_bad_bytes;
584 unsigned long rx_packets; 572 u64 rx_packets;
585 unsigned long rx_good; 573 u64 rx_good;
586 unsigned long rx_bad; 574 u64 rx_bad;
587 unsigned long rx_pause; 575 u64 rx_pause;
588 unsigned long rx_control; 576 u64 rx_control;
589 unsigned long rx_unicast; 577 u64 rx_unicast;
590 unsigned long rx_multicast; 578 u64 rx_multicast;
591 unsigned long rx_broadcast; 579 u64 rx_broadcast;
592 unsigned long rx_lt64; 580 u64 rx_lt64;
593 unsigned long rx_64; 581 u64 rx_64;
594 unsigned long rx_65_to_127; 582 u64 rx_65_to_127;
595 unsigned long rx_128_to_255; 583 u64 rx_128_to_255;
596 unsigned long rx_256_to_511; 584 u64 rx_256_to_511;
597 unsigned long rx_512_to_1023; 585 u64 rx_512_to_1023;
598 unsigned long rx_1024_to_15xx; 586 u64 rx_1024_to_15xx;
599 unsigned long rx_15xx_to_jumbo; 587 u64 rx_15xx_to_jumbo;
600 unsigned long rx_gtjumbo; 588 u64 rx_gtjumbo;
601 unsigned long rx_bad_lt64; 589 u64 rx_bad_lt64;
602 unsigned long rx_bad_64_to_15xx; 590 u64 rx_bad_64_to_15xx;
603 unsigned long rx_bad_15xx_to_jumbo; 591 u64 rx_bad_15xx_to_jumbo;
604 unsigned long rx_bad_gtjumbo; 592 u64 rx_bad_gtjumbo;
605 unsigned long rx_overflow; 593 u64 rx_overflow;
606 unsigned long rx_missed; 594 u64 rx_missed;
607 unsigned long rx_false_carrier; 595 u64 rx_false_carrier;
608 unsigned long rx_symbol_error; 596 u64 rx_symbol_error;
609 unsigned long rx_align_error; 597 u64 rx_align_error;
610 unsigned long rx_length_error; 598 u64 rx_length_error;
611 unsigned long rx_internal_error; 599 u64 rx_internal_error;
612 unsigned long rx_good_lt64; 600 u64 rx_good_lt64;
613}; 601};
614 602
615/* Number of bits used in a multicast filter hash address */ 603/* Number of bits used in a multicast filter hash address */
@@ -640,6 +628,7 @@ struct efx_filter_state;
640 * @membase_phys: Memory BAR value as physical address 628 * @membase_phys: Memory BAR value as physical address
641 * @membase: Memory BAR value 629 * @membase: Memory BAR value
642 * @interrupt_mode: Interrupt mode 630 * @interrupt_mode: Interrupt mode
631 * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
643 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues 632 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
644 * @irq_rx_moderation: IRQ moderation time for RX event queues 633 * @irq_rx_moderation: IRQ moderation time for RX event queues
645 * @msg_enable: Log message enable flags 634 * @msg_enable: Log message enable flags
@@ -663,7 +652,7 @@ struct efx_filter_state;
663 * @int_error_expire: Time at which error count will be expired 652 * @int_error_expire: Time at which error count will be expired
664 * @irq_status: Interrupt status buffer 653 * @irq_status: Interrupt status buffer
665 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 654 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
666 * @fatal_irq_level: IRQ level (bit number) used for serious errors 655 * @irq_level: IRQ level/index for IRQs not triggered by an event queue
667 * @mtd_list: List of MTDs attached to the NIC 656 * @mtd_list: List of MTDs attached to the NIC
668 * @nic_data: Hardware dependent state 657 * @nic_data: Hardware dependent state
669 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, 658 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
@@ -676,7 +665,6 @@ struct efx_filter_state;
676 * @port_initialized: Port initialized? 665 * @port_initialized: Port initialized?
677 * @net_dev: Operating system network device. Consider holding the rtnl lock 666 * @net_dev: Operating system network device. Consider holding the rtnl lock
678 * @stats_buffer: DMA buffer for statistics 667 * @stats_buffer: DMA buffer for statistics
679 * @mac_op: MAC interface
680 * @phy_type: PHY type 668 * @phy_type: PHY type
681 * @phy_op: PHY interface 669 * @phy_op: PHY interface
682 * @phy_data: PHY private data (including PHY-specific stats) 670 * @phy_data: PHY private data (including PHY-specific stats)
@@ -695,15 +683,15 @@ struct efx_filter_state;
695 * @loopback_selftest: Offline self-test private state 683 * @loopback_selftest: Offline self-test private state
696 * @monitor_work: Hardware monitor workitem 684 * @monitor_work: Hardware monitor workitem
697 * @biu_lock: BIU (bus interface unit) lock 685 * @biu_lock: BIU (bus interface unit) lock
698 * @last_irq_cpu: Last CPU to handle interrupt. 686 * @last_irq_cpu: Last CPU to handle a possible test interrupt. This
699 * This register is written with the SMP processor ID whenever an 687 * field is used by efx_test_interrupts() to verify that an
700 * interrupt is handled. It is used by efx_nic_test_interrupt() 688 * interrupt has occurred.
701 * to verify that an interrupt has occurred.
702 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count 689 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
703 * @mac_stats: MAC statistics. These include all statistics the MACs 690 * @mac_stats: MAC statistics. These include all statistics the MACs
704 * can provide. Generic code converts these into a standard 691 * can provide. Generic code converts these into a standard
705 * &struct net_device_stats. 692 * &struct net_device_stats.
706 * @stats_lock: Statistics update lock. Serialises statistics fetches 693 * @stats_lock: Statistics update lock. Serialises statistics fetches
694 * and access to @mac_stats.
707 * 695 *
708 * This is stored in the private area of the &struct net_device. 696 * This is stored in the private area of the &struct net_device.
709 */ 697 */
@@ -722,6 +710,7 @@ struct efx_nic {
722 void __iomem *membase; 710 void __iomem *membase;
723 711
724 enum efx_int_mode interrupt_mode; 712 enum efx_int_mode interrupt_mode;
713 unsigned int timer_quantum_ns;
725 bool irq_rx_adaptive; 714 bool irq_rx_adaptive;
726 unsigned int irq_rx_moderation; 715 unsigned int irq_rx_moderation;
727 u32 msg_enable; 716 u32 msg_enable;
@@ -749,7 +738,7 @@ struct efx_nic {
749 738
750 struct efx_buffer irq_status; 739 struct efx_buffer irq_status;
751 unsigned irq_zero_count; 740 unsigned irq_zero_count;
752 unsigned fatal_irq_level; 741 unsigned irq_level;
753 742
754#ifdef CONFIG_SFC_MTD 743#ifdef CONFIG_SFC_MTD
755 struct list_head mtd_list; 744 struct list_head mtd_list;
@@ -766,8 +755,6 @@ struct efx_nic {
766 755
767 struct efx_buffer stats_buffer; 756 struct efx_buffer stats_buffer;
768 757
769 const struct efx_mac_operations *mac_op;
770
771 unsigned int phy_type; 758 unsigned int phy_type;
772 const struct efx_phy_operations *phy_op; 759 const struct efx_phy_operations *phy_op;
773 void *phy_data; 760 void *phy_data;
@@ -795,7 +782,7 @@ struct efx_nic {
795 782
796 struct delayed_work monitor_work ____cacheline_aligned_in_smp; 783 struct delayed_work monitor_work ____cacheline_aligned_in_smp;
797 spinlock_t biu_lock; 784 spinlock_t biu_lock;
798 volatile signed int last_irq_cpu; 785 int last_irq_cpu;
799 unsigned n_rx_nodesc_drop_cnt; 786 unsigned n_rx_nodesc_drop_cnt;
800 struct efx_mac_stats mac_stats; 787 struct efx_mac_stats mac_stats;
801 spinlock_t stats_lock; 788 spinlock_t stats_lock;
@@ -806,15 +793,6 @@ static inline int efx_dev_registered(struct efx_nic *efx)
806 return efx->net_dev->reg_state == NETREG_REGISTERED; 793 return efx->net_dev->reg_state == NETREG_REGISTERED;
807} 794}
808 795
809/* Net device name, for inclusion in log messages if it has been registered.
810 * Use efx->name not efx->net_dev->name so that races with (un)registration
811 * are harmless.
812 */
813static inline const char *efx_dev_name(struct efx_nic *efx)
814{
815 return efx_dev_registered(efx) ? efx->name : "";
816}
817
818static inline unsigned int efx_port_num(struct efx_nic *efx) 796static inline unsigned int efx_port_num(struct efx_nic *efx)
819{ 797{
820 return efx->net_dev->dev_id; 798 return efx->net_dev->dev_id;
@@ -840,14 +818,15 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
840 * @stop_stats: Stop the regular fetching of statistics 818 * @stop_stats: Stop the regular fetching of statistics
841 * @set_id_led: Set state of identifying LED or revert to automatic function 819 * @set_id_led: Set state of identifying LED or revert to automatic function
842 * @push_irq_moderation: Apply interrupt moderation value 820 * @push_irq_moderation: Apply interrupt moderation value
843 * @push_multicast_hash: Apply multicast hash table
844 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY 821 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
822 * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
823 * to the hardware. Serialised by the mac_lock.
824 * @check_mac_fault: Check MAC fault state. True if fault present.
845 * @get_wol: Get WoL configuration from driver state 825 * @get_wol: Get WoL configuration from driver state
846 * @set_wol: Push WoL configuration to the NIC 826 * @set_wol: Push WoL configuration to the NIC
847 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) 827 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
848 * @test_registers: Test read/write functionality of control registers 828 * @test_registers: Test read/write functionality of control registers
849 * @test_nvram: Test validity of NVRAM contents 829 * @test_nvram: Test validity of NVRAM contents
850 * @default_mac_ops: efx_mac_operations to set at startup
851 * @revision: Hardware architecture revision 830 * @revision: Hardware architecture revision
852 * @mem_map_size: Memory BAR mapped size 831 * @mem_map_size: Memory BAR mapped size
853 * @txd_ptr_tbl_base: TX descriptor ring base address 832 * @txd_ptr_tbl_base: TX descriptor ring base address
@@ -862,6 +841,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
862 * from &enum efx_init_mode. 841 * from &enum efx_init_mode.
863 * @phys_addr_channels: Number of channels with physically addressed 842 * @phys_addr_channels: Number of channels with physically addressed
864 * descriptors 843 * descriptors
844 * @timer_period_max: Maximum period of interrupt timer (in ticks)
865 * @tx_dc_base: Base address in SRAM of TX queue descriptor caches 845 * @tx_dc_base: Base address in SRAM of TX queue descriptor caches
866 * @rx_dc_base: Base address in SRAM of RX queue descriptor caches 846 * @rx_dc_base: Base address in SRAM of RX queue descriptor caches
867 * @offload_features: net_device feature flags for protocol offload 847 * @offload_features: net_device feature flags for protocol offload
@@ -885,14 +865,14 @@ struct efx_nic_type {
885 void (*stop_stats)(struct efx_nic *efx); 865 void (*stop_stats)(struct efx_nic *efx);
886 void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode); 866 void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
887 void (*push_irq_moderation)(struct efx_channel *channel); 867 void (*push_irq_moderation)(struct efx_channel *channel);
888 void (*push_multicast_hash)(struct efx_nic *efx);
889 int (*reconfigure_port)(struct efx_nic *efx); 868 int (*reconfigure_port)(struct efx_nic *efx);
869 int (*reconfigure_mac)(struct efx_nic *efx);
870 bool (*check_mac_fault)(struct efx_nic *efx);
890 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); 871 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
891 int (*set_wol)(struct efx_nic *efx, u32 type); 872 int (*set_wol)(struct efx_nic *efx, u32 type);
892 void (*resume_wol)(struct efx_nic *efx); 873 void (*resume_wol)(struct efx_nic *efx);
893 int (*test_registers)(struct efx_nic *efx); 874 int (*test_registers)(struct efx_nic *efx);
894 int (*test_nvram)(struct efx_nic *efx); 875 int (*test_nvram)(struct efx_nic *efx);
895 const struct efx_mac_operations *default_mac_ops;
896 876
897 int revision; 877 int revision;
898 unsigned int mem_map_size; 878 unsigned int mem_map_size;
@@ -906,6 +886,7 @@ struct efx_nic_type {
906 unsigned int rx_buffer_padding; 886 unsigned int rx_buffer_padding;
907 unsigned int max_interrupt_mode; 887 unsigned int max_interrupt_mode;
908 unsigned int phys_addr_channels; 888 unsigned int phys_addr_channels;
889 unsigned int timer_period_max;
909 unsigned int tx_dc_base; 890 unsigned int tx_dc_base;
910 unsigned int rx_dc_base; 891 unsigned int rx_dc_base;
911 netdev_features_t offload_features; 892 netdev_features_t offload_features;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 3edfbaf5f022..a43d1ca270c0 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -726,11 +726,9 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
726 tx_queue = efx_channel_get_tx_queue( 726 tx_queue = efx_channel_get_tx_queue(
727 channel, tx_ev_q_label % EFX_TXQ_TYPES); 727 channel, tx_ev_q_label % EFX_TXQ_TYPES);
728 728
729 if (efx_dev_registered(efx)) 729 netif_tx_lock(efx->net_dev);
730 netif_tx_lock(efx->net_dev);
731 efx_notify_tx_desc(tx_queue); 730 efx_notify_tx_desc(tx_queue);
732 if (efx_dev_registered(efx)) 731 netif_tx_unlock(efx->net_dev);
733 netif_tx_unlock(efx->net_dev);
734 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && 732 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
735 EFX_WORKAROUND_10727(efx)) { 733 EFX_WORKAROUND_10727(efx)) {
736 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 734 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
@@ -745,10 +743,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
745} 743}
746 744
747/* Detect errors included in the rx_evt_pkt_ok bit. */ 745/* Detect errors included in the rx_evt_pkt_ok bit. */
748static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 746static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
749 const efx_qword_t *event, 747 const efx_qword_t *event)
750 bool *rx_ev_pkt_ok,
751 bool *discard)
752{ 748{
753 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 749 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
754 struct efx_nic *efx = rx_queue->efx; 750 struct efx_nic *efx = rx_queue->efx;
@@ -793,15 +789,11 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
793 ++channel->n_rx_tcp_udp_chksum_err; 789 ++channel->n_rx_tcp_udp_chksum_err;
794 } 790 }
795 791
796 /* The frame must be discarded if any of these are true. */
797 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
798 rx_ev_tobe_disc | rx_ev_pause_frm);
799
800 /* TOBE_DISC is expected on unicast mismatches; don't print out an 792 /* TOBE_DISC is expected on unicast mismatches; don't print out an
801 * error message. FRM_TRUNC indicates RXDP dropped the packet due 793 * error message. FRM_TRUNC indicates RXDP dropped the packet due
802 * to a FIFO overflow. 794 * to a FIFO overflow.
803 */ 795 */
804#ifdef EFX_ENABLE_DEBUG 796#ifdef DEBUG
805 if (rx_ev_other_err && net_ratelimit()) { 797 if (rx_ev_other_err && net_ratelimit()) {
806 netif_dbg(efx, rx_err, efx->net_dev, 798 netif_dbg(efx, rx_err, efx->net_dev,
807 " RX queue %d unexpected RX event " 799 " RX queue %d unexpected RX event "
@@ -819,6 +811,11 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
819 rx_ev_pause_frm ? " [PAUSE]" : ""); 811 rx_ev_pause_frm ? " [PAUSE]" : "");
820 } 812 }
821#endif 813#endif
814
815 /* The frame must be discarded if any of these are true. */
816 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
817 rx_ev_tobe_disc | rx_ev_pause_frm) ?
818 EFX_RX_PKT_DISCARD : 0;
822} 819}
823 820
824/* Handle receive events that are not in-order. */ 821/* Handle receive events that are not in-order. */
@@ -851,7 +848,8 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
851 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 848 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
852 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 849 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
853 unsigned expected_ptr; 850 unsigned expected_ptr;
854 bool rx_ev_pkt_ok, discard = false, checksummed; 851 bool rx_ev_pkt_ok;
852 u16 flags;
855 struct efx_rx_queue *rx_queue; 853 struct efx_rx_queue *rx_queue;
856 854
857 /* Basic packet information */ 855 /* Basic packet information */
@@ -874,12 +872,11 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
874 /* If packet is marked as OK and packet type is TCP/IP or 872 /* If packet is marked as OK and packet type is TCP/IP or
875 * UDP/IP, then we can rely on the hardware checksum. 873 * UDP/IP, then we can rely on the hardware checksum.
876 */ 874 */
877 checksummed = 875 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
878 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 876 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ?
879 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP; 877 EFX_RX_PKT_CSUMMED : 0;
880 } else { 878 } else {
881 efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); 879 flags = efx_handle_rx_not_ok(rx_queue, event);
882 checksummed = false;
883 } 880 }
884 881
885 /* Detect multicast packets that didn't match the filter */ 882 /* Detect multicast packets that didn't match the filter */
@@ -890,15 +887,14 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
890 887
891 if (unlikely(!rx_ev_mcast_hash_match)) { 888 if (unlikely(!rx_ev_mcast_hash_match)) {
892 ++channel->n_rx_mcast_mismatch; 889 ++channel->n_rx_mcast_mismatch;
893 discard = true; 890 flags |= EFX_RX_PKT_DISCARD;
894 } 891 }
895 } 892 }
896 893
897 channel->irq_mod_score += 2; 894 channel->irq_mod_score += 2;
898 895
899 /* Handle received packet */ 896 /* Handle received packet */
900 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, 897 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags);
901 checksummed, discard);
902} 898}
903 899
904static void 900static void
@@ -1311,7 +1307,7 @@ static inline void efx_nic_interrupts(struct efx_nic *efx,
1311 efx_oword_t int_en_reg_ker; 1307 efx_oword_t int_en_reg_ker;
1312 1308
1313 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1309 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1314 FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level, 1310 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1315 FRF_AZ_KER_INT_KER, force, 1311 FRF_AZ_KER_INT_KER, force,
1316 FRF_AZ_DRV_INT_EN_KER, enabled); 1312 FRF_AZ_DRV_INT_EN_KER, enabled);
1317 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1313 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
@@ -1427,11 +1423,12 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1427 efx_readd(efx, &reg, FR_BZ_INT_ISR0); 1423 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1428 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1424 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1429 1425
1430 /* Check to see if we have a serious error condition */ 1426 /* Handle non-event-queue sources */
1431 if (queues & (1U << efx->fatal_irq_level)) { 1427 if (queues & (1U << efx->irq_level)) {
1432 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1428 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1433 if (unlikely(syserr)) 1429 if (unlikely(syserr))
1434 return efx_nic_fatal_interrupt(efx); 1430 return efx_nic_fatal_interrupt(efx);
1431 efx->last_irq_cpu = raw_smp_processor_id();
1435 } 1432 }
1436 1433
1437 if (queues != 0) { 1434 if (queues != 0) {
@@ -1441,7 +1438,7 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1441 /* Schedule processing of any interrupting queues */ 1438 /* Schedule processing of any interrupting queues */
1442 efx_for_each_channel(channel, efx) { 1439 efx_for_each_channel(channel, efx) {
1443 if (queues & 1) 1440 if (queues & 1)
1444 efx_schedule_channel(channel); 1441 efx_schedule_channel_irq(channel);
1445 queues >>= 1; 1442 queues >>= 1;
1446 } 1443 }
1447 result = IRQ_HANDLED; 1444 result = IRQ_HANDLED;
@@ -1458,18 +1455,16 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1458 efx_for_each_channel(channel, efx) { 1455 efx_for_each_channel(channel, efx) {
1459 event = efx_event(channel, channel->eventq_read_ptr); 1456 event = efx_event(channel, channel->eventq_read_ptr);
1460 if (efx_event_present(event)) 1457 if (efx_event_present(event))
1461 efx_schedule_channel(channel); 1458 efx_schedule_channel_irq(channel);
1462 else 1459 else
1463 efx_nic_eventq_read_ack(channel); 1460 efx_nic_eventq_read_ack(channel);
1464 } 1461 }
1465 } 1462 }
1466 1463
1467 if (result == IRQ_HANDLED) { 1464 if (result == IRQ_HANDLED)
1468 efx->last_irq_cpu = raw_smp_processor_id();
1469 netif_vdbg(efx, intr, efx->net_dev, 1465 netif_vdbg(efx, intr, efx->net_dev,
1470 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1466 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1471 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1467 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1472 }
1473 1468
1474 return result; 1469 return result;
1475} 1470}
@@ -1488,20 +1483,20 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1488 efx_oword_t *int_ker = efx->irq_status.addr; 1483 efx_oword_t *int_ker = efx->irq_status.addr;
1489 int syserr; 1484 int syserr;
1490 1485
1491 efx->last_irq_cpu = raw_smp_processor_id();
1492 netif_vdbg(efx, intr, efx->net_dev, 1486 netif_vdbg(efx, intr, efx->net_dev,
1493 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1487 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1494 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1488 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1495 1489
1496 /* Check to see if we have a serious error condition */ 1490 /* Handle non-event-queue sources */
1497 if (channel->channel == efx->fatal_irq_level) { 1491 if (channel->channel == efx->irq_level) {
1498 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1492 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1499 if (unlikely(syserr)) 1493 if (unlikely(syserr))
1500 return efx_nic_fatal_interrupt(efx); 1494 return efx_nic_fatal_interrupt(efx);
1495 efx->last_irq_cpu = raw_smp_processor_id();
1501 } 1496 }
1502 1497
1503 /* Schedule processing of the channel */ 1498 /* Schedule processing of the channel */
1504 efx_schedule_channel(channel); 1499 efx_schedule_channel_irq(channel);
1505 1500
1506 return IRQ_HANDLED; 1501 return IRQ_HANDLED;
1507} 1502}
@@ -1640,10 +1635,10 @@ void efx_nic_init_common(struct efx_nic *efx)
1640 1635
1641 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1636 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1642 /* Use an interrupt level unused by event queues */ 1637 /* Use an interrupt level unused by event queues */
1643 efx->fatal_irq_level = 0x1f; 1638 efx->irq_level = 0x1f;
1644 else 1639 else
1645 /* Use a valid MSI-X vector */ 1640 /* Use a valid MSI-X vector */
1646 efx->fatal_irq_level = 0; 1641 efx->irq_level = 0;
1647 1642
1648 /* Enable all the genuinely fatal interrupts. (They are still 1643 /* Enable all the genuinely fatal interrupts. (They are still
1649 * masked by the overall interrupt mask, controlled by 1644 * masked by the overall interrupt mask, controlled by
@@ -1837,7 +1832,7 @@ struct efx_nic_reg_table {
1837 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 1832 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
1838 step, rows \ 1833 step, rows \
1839} 1834}
1840#define REGISTER_TABLE(name, min_rev, max_rev) \ 1835#define REGISTER_TABLE(name, min_rev, max_rev) \
1841 REGISTER_TABLE_DIMENSIONS( \ 1836 REGISTER_TABLE_DIMENSIONS( \
1842 name, FR_ ## min_rev ## max_rev ## _ ## name, \ 1837 name, FR_ ## min_rev ## max_rev ## _ ## name, \
1843 min_rev, max_rev, \ 1838 min_rev, max_rev, \
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 5fb24d3aa3ca..905a1877d603 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -144,12 +144,26 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
144 * struct siena_nic_data - Siena NIC state 144 * struct siena_nic_data - Siena NIC state
145 * @mcdi: Management-Controller-to-Driver Interface 145 * @mcdi: Management-Controller-to-Driver Interface
146 * @wol_filter_id: Wake-on-LAN packet filter id 146 * @wol_filter_id: Wake-on-LAN packet filter id
147 * @hwmon: Hardware monitor state
147 */ 148 */
148struct siena_nic_data { 149struct siena_nic_data {
149 struct efx_mcdi_iface mcdi; 150 struct efx_mcdi_iface mcdi;
150 int wol_filter_id; 151 int wol_filter_id;
152#ifdef CONFIG_SFC_MCDI_MON
153 struct efx_mcdi_mon hwmon;
154#endif
151}; 155};
152 156
157#ifdef CONFIG_SFC_MCDI_MON
158static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
159{
160 struct siena_nic_data *nic_data;
161 EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
162 nic_data = efx->nic_data;
163 return &nic_data->hwmon;
164}
165#endif
166
153extern const struct efx_nic_type falcon_a1_nic_type; 167extern const struct efx_nic_type falcon_a1_nic_type;
154extern const struct efx_nic_type falcon_b0_nic_type; 168extern const struct efx_nic_type falcon_b0_nic_type;
155extern const struct efx_nic_type siena_a0_nic_type; 169extern const struct efx_nic_type siena_a0_nic_type;
@@ -189,6 +203,9 @@ extern bool efx_nic_event_present(struct efx_channel *channel);
189/* MAC/PHY */ 203/* MAC/PHY */
190extern void falcon_drain_tx_fifo(struct efx_nic *efx); 204extern void falcon_drain_tx_fifo(struct efx_nic *efx);
191extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); 205extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
206extern bool falcon_xmac_check_fault(struct efx_nic *efx);
207extern int falcon_reconfigure_xmac(struct efx_nic *efx);
208extern void falcon_update_stats_xmac(struct efx_nic *efx);
192 209
193/* Interrupts and test events */ 210/* Interrupts and test events */
194extern int efx_nic_init_interrupt(struct efx_nic *efx); 211extern int efx_nic_init_interrupt(struct efx_nic *efx);
@@ -202,9 +219,6 @@ extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
202extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id); 219extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
203extern void falcon_irq_ack_a1(struct efx_nic *efx); 220extern void falcon_irq_ack_a1(struct efx_nic *efx);
204 221
205#define EFX_IRQ_MOD_RESOLUTION 5
206#define EFX_IRQ_MOD_MAX 0x1000
207
208/* Global Resources */ 222/* Global Resources */
209extern int efx_nic_flush_queues(struct efx_nic *efx); 223extern int efx_nic_flush_queues(struct efx_nic *efx);
210extern void falcon_start_nic_stats(struct efx_nic *efx); 224extern void falcon_start_nic_stats(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/qt202x_phy.c
index 7ad97e397406..8a7caf88ffb6 100644
--- a/drivers/net/ethernet/sfc/qt202x_phy.c
+++ b/drivers/net/ethernet/sfc/qt202x_phy.c
@@ -47,7 +47,7 @@
47#define PMA_PMD_FTX_STATIC_LBN 13 47#define PMA_PMD_FTX_STATIC_LBN 13
48#define PMA_PMD_VEND1_REG 0xc001 48#define PMA_PMD_VEND1_REG 0xc001
49#define PMA_PMD_VEND1_LBTXD_LBN 15 49#define PMA_PMD_VEND1_LBTXD_LBN 15
50#define PCS_VEND1_REG 0xc000 50#define PCS_VEND1_REG 0xc000
51#define PCS_VEND1_LBTXD_LBN 5 51#define PCS_VEND1_LBTXD_LBN 5
52 52
53void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode) 53void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
@@ -453,9 +453,9 @@ const struct efx_phy_operations falcon_qt202x_phy_ops = {
453 .probe = qt202x_phy_probe, 453 .probe = qt202x_phy_probe,
454 .init = qt202x_phy_init, 454 .init = qt202x_phy_init,
455 .reconfigure = qt202x_phy_reconfigure, 455 .reconfigure = qt202x_phy_reconfigure,
456 .poll = qt202x_phy_poll, 456 .poll = qt202x_phy_poll,
457 .fini = efx_port_dummy_op_void, 457 .fini = efx_port_dummy_op_void,
458 .remove = qt202x_phy_remove, 458 .remove = qt202x_phy_remove,
459 .get_settings = qt202x_phy_get_settings, 459 .get_settings = qt202x_phy_get_settings,
460 .set_settings = efx_mdio_set_settings, 460 .set_settings = efx_mdio_set_settings,
461 .test_alive = efx_mdio_test_alive, 461 .test_alive = efx_mdio_test_alive,
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index aca349861767..1dfda5e27919 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -98,8 +98,8 @@ static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
98 /* Offset is always within one page, so we don't need to consider 98 /* Offset is always within one page, so we don't need to consider
99 * the page order. 99 * the page order.
100 */ 100 */
101 return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) + 101 return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
102 efx->type->rx_buffer_hash_size); 102 efx->type->rx_buffer_hash_size;
103} 103}
104static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) 104static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
105{ 105{
@@ -108,11 +108,10 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
108 108
109static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) 109static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
110{ 110{
111 if (buf->is_page) 111 if (buf->flags & EFX_RX_BUF_PAGE)
112 return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf); 112 return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
113 else 113 else
114 return ((u8 *)buf->u.skb->data + 114 return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size;
115 efx->type->rx_buffer_hash_size);
116} 115}
117 116
118static inline u32 efx_rx_buf_hash(const u8 *eh) 117static inline u32 efx_rx_buf_hash(const u8 *eh)
@@ -122,10 +121,10 @@ static inline u32 efx_rx_buf_hash(const u8 *eh)
122 return __le32_to_cpup((const __le32 *)(eh - 4)); 121 return __le32_to_cpup((const __le32 *)(eh - 4));
123#else 122#else
124 const u8 *data = eh - 4; 123 const u8 *data = eh - 4;
125 return ((u32)data[0] | 124 return (u32)data[0] |
126 (u32)data[1] << 8 | 125 (u32)data[1] << 8 |
127 (u32)data[2] << 16 | 126 (u32)data[2] << 16 |
128 (u32)data[3] << 24); 127 (u32)data[3] << 24;
129#endif 128#endif
130} 129}
131 130
@@ -159,7 +158,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
159 /* Adjust the SKB for padding and checksum */ 158 /* Adjust the SKB for padding and checksum */
160 skb_reserve(skb, NET_IP_ALIGN); 159 skb_reserve(skb, NET_IP_ALIGN);
161 rx_buf->len = skb_len - NET_IP_ALIGN; 160 rx_buf->len = skb_len - NET_IP_ALIGN;
162 rx_buf->is_page = false; 161 rx_buf->flags = 0;
163 skb->ip_summed = CHECKSUM_UNNECESSARY; 162 skb->ip_summed = CHECKSUM_UNNECESSARY;
164 163
165 rx_buf->dma_addr = pci_map_single(efx->pci_dev, 164 rx_buf->dma_addr = pci_map_single(efx->pci_dev,
@@ -228,7 +227,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
228 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 227 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
229 rx_buf->u.page = page; 228 rx_buf->u.page = page;
230 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 229 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
231 rx_buf->is_page = true; 230 rx_buf->flags = EFX_RX_BUF_PAGE;
232 ++rx_queue->added_count; 231 ++rx_queue->added_count;
233 ++rx_queue->alloc_page_count; 232 ++rx_queue->alloc_page_count;
234 ++state->refcnt; 233 ++state->refcnt;
@@ -249,7 +248,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
249static void efx_unmap_rx_buffer(struct efx_nic *efx, 248static void efx_unmap_rx_buffer(struct efx_nic *efx,
250 struct efx_rx_buffer *rx_buf) 249 struct efx_rx_buffer *rx_buf)
251{ 250{
252 if (rx_buf->is_page && rx_buf->u.page) { 251 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
253 struct efx_rx_page_state *state; 252 struct efx_rx_page_state *state;
254 253
255 state = page_address(rx_buf->u.page); 254 state = page_address(rx_buf->u.page);
@@ -259,7 +258,7 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
259 efx_rx_buf_size(efx), 258 efx_rx_buf_size(efx),
260 PCI_DMA_FROMDEVICE); 259 PCI_DMA_FROMDEVICE);
261 } 260 }
262 } else if (!rx_buf->is_page && rx_buf->u.skb) { 261 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
263 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, 262 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
264 rx_buf->len, PCI_DMA_FROMDEVICE); 263 rx_buf->len, PCI_DMA_FROMDEVICE);
265 } 264 }
@@ -268,10 +267,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
268static void efx_free_rx_buffer(struct efx_nic *efx, 267static void efx_free_rx_buffer(struct efx_nic *efx,
269 struct efx_rx_buffer *rx_buf) 268 struct efx_rx_buffer *rx_buf)
270{ 269{
271 if (rx_buf->is_page && rx_buf->u.page) { 270 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
272 __free_pages(rx_buf->u.page, efx->rx_buffer_order); 271 __free_pages(rx_buf->u.page, efx->rx_buffer_order);
273 rx_buf->u.page = NULL; 272 rx_buf->u.page = NULL;
274 } else if (!rx_buf->is_page && rx_buf->u.skb) { 273 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
275 dev_kfree_skb_any(rx_buf->u.skb); 274 dev_kfree_skb_any(rx_buf->u.skb);
276 rx_buf->u.skb = NULL; 275 rx_buf->u.skb = NULL;
277 } 276 }
@@ -311,7 +310,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
311 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); 310 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
312 new_buf->u.page = rx_buf->u.page; 311 new_buf->u.page = rx_buf->u.page;
313 new_buf->len = rx_buf->len; 312 new_buf->len = rx_buf->len;
314 new_buf->is_page = true; 313 new_buf->flags = EFX_RX_BUF_PAGE;
315 ++rx_queue->added_count; 314 ++rx_queue->added_count;
316} 315}
317 316
@@ -325,7 +324,10 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
325 struct efx_rx_buffer *new_buf; 324 struct efx_rx_buffer *new_buf;
326 unsigned index; 325 unsigned index;
327 326
328 if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && 327 rx_buf->flags &= EFX_RX_BUF_PAGE;
328
329 if ((rx_buf->flags & EFX_RX_BUF_PAGE) &&
330 efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
329 page_count(rx_buf->u.page) == 1) 331 page_count(rx_buf->u.page) == 1)
330 efx_resurrect_rx_buffer(rx_queue, rx_buf); 332 efx_resurrect_rx_buffer(rx_queue, rx_buf);
331 333
@@ -412,8 +414,7 @@ void efx_rx_slow_fill(unsigned long context)
412 414
413static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, 415static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
414 struct efx_rx_buffer *rx_buf, 416 struct efx_rx_buffer *rx_buf,
415 int len, bool *discard, 417 int len, bool *leak_packet)
416 bool *leak_packet)
417{ 418{
418 struct efx_nic *efx = rx_queue->efx; 419 struct efx_nic *efx = rx_queue->efx;
419 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; 420 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
@@ -424,7 +425,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
424 /* The packet must be discarded, but this is only a fatal error 425 /* The packet must be discarded, but this is only a fatal error
425 * if the caller indicated it was 426 * if the caller indicated it was
426 */ 427 */
427 *discard = true; 428 rx_buf->flags |= EFX_RX_PKT_DISCARD;
428 429
429 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { 430 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
430 if (net_ratelimit()) 431 if (net_ratelimit())
@@ -437,7 +438,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
437 * data at the end of the skb will be trashed. So 438 * data at the end of the skb will be trashed. So
438 * we have no choice but to leak the fragment. 439 * we have no choice but to leak the fragment.
439 */ 440 */
440 *leak_packet = !rx_buf->is_page; 441 *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE);
441 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); 442 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
442 } else { 443 } else {
443 if (net_ratelimit()) 444 if (net_ratelimit())
@@ -457,13 +458,13 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
457 */ 458 */
458static void efx_rx_packet_gro(struct efx_channel *channel, 459static void efx_rx_packet_gro(struct efx_channel *channel,
459 struct efx_rx_buffer *rx_buf, 460 struct efx_rx_buffer *rx_buf,
460 const u8 *eh, bool checksummed) 461 const u8 *eh)
461{ 462{
462 struct napi_struct *napi = &channel->napi_str; 463 struct napi_struct *napi = &channel->napi_str;
463 gro_result_t gro_result; 464 gro_result_t gro_result;
464 465
465 /* Pass the skb/page into the GRO engine */ 466 /* Pass the skb/page into the GRO engine */
466 if (rx_buf->is_page) { 467 if (rx_buf->flags & EFX_RX_BUF_PAGE) {
467 struct efx_nic *efx = channel->efx; 468 struct efx_nic *efx = channel->efx;
468 struct page *page = rx_buf->u.page; 469 struct page *page = rx_buf->u.page;
469 struct sk_buff *skb; 470 struct sk_buff *skb;
@@ -485,8 +486,8 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
485 skb->len = rx_buf->len; 486 skb->len = rx_buf->len;
486 skb->data_len = rx_buf->len; 487 skb->data_len = rx_buf->len;
487 skb->truesize += rx_buf->len; 488 skb->truesize += rx_buf->len;
488 skb->ip_summed = 489 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
489 checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; 490 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
490 491
491 skb_record_rx_queue(skb, channel->channel); 492 skb_record_rx_queue(skb, channel->channel);
492 493
@@ -494,7 +495,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
494 } else { 495 } else {
495 struct sk_buff *skb = rx_buf->u.skb; 496 struct sk_buff *skb = rx_buf->u.skb;
496 497
497 EFX_BUG_ON_PARANOID(!checksummed); 498 EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED));
498 rx_buf->u.skb = NULL; 499 rx_buf->u.skb = NULL;
499 500
500 gro_result = napi_gro_receive(napi, skb); 501 gro_result = napi_gro_receive(napi, skb);
@@ -509,7 +510,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
509} 510}
510 511
511void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 512void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
512 unsigned int len, bool checksummed, bool discard) 513 unsigned int len, u16 flags)
513{ 514{
514 struct efx_nic *efx = rx_queue->efx; 515 struct efx_nic *efx = rx_queue->efx;
515 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 516 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
@@ -517,6 +518,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
517 bool leak_packet = false; 518 bool leak_packet = false;
518 519
519 rx_buf = efx_rx_buffer(rx_queue, index); 520 rx_buf = efx_rx_buffer(rx_queue, index);
521 rx_buf->flags |= flags;
520 522
521 /* This allows the refill path to post another buffer. 523 /* This allows the refill path to post another buffer.
522 * EFX_RXD_HEAD_ROOM ensures that the slot we are using 524 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
@@ -525,18 +527,17 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
525 rx_queue->removed_count++; 527 rx_queue->removed_count++;
526 528
527 /* Validate the length encoded in the event vs the descriptor pushed */ 529 /* Validate the length encoded in the event vs the descriptor pushed */
528 efx_rx_packet__check_len(rx_queue, rx_buf, len, 530 efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet);
529 &discard, &leak_packet);
530 531
531 netif_vdbg(efx, rx_status, efx->net_dev, 532 netif_vdbg(efx, rx_status, efx->net_dev,
532 "RX queue %d received id %x at %llx+%x %s%s\n", 533 "RX queue %d received id %x at %llx+%x %s%s\n",
533 efx_rx_queue_index(rx_queue), index, 534 efx_rx_queue_index(rx_queue), index,
534 (unsigned long long)rx_buf->dma_addr, len, 535 (unsigned long long)rx_buf->dma_addr, len,
535 (checksummed ? " [SUMMED]" : ""), 536 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
536 (discard ? " [DISCARD]" : "")); 537 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
537 538
538 /* Discard packet, if instructed to do so */ 539 /* Discard packet, if instructed to do so */
539 if (unlikely(discard)) { 540 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
540 if (unlikely(leak_packet)) 541 if (unlikely(leak_packet))
541 channel->n_skbuff_leaks++; 542 channel->n_skbuff_leaks++;
542 else 543 else
@@ -563,18 +564,33 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
563 rx_buf->len = len - efx->type->rx_buffer_hash_size; 564 rx_buf->len = len - efx->type->rx_buffer_hash_size;
564out: 565out:
565 if (channel->rx_pkt) 566 if (channel->rx_pkt)
566 __efx_rx_packet(channel, 567 __efx_rx_packet(channel, channel->rx_pkt);
567 channel->rx_pkt, channel->rx_pkt_csummed);
568 channel->rx_pkt = rx_buf; 568 channel->rx_pkt = rx_buf;
569 channel->rx_pkt_csummed = checksummed; 569}
570
571static void efx_rx_deliver(struct efx_channel *channel,
572 struct efx_rx_buffer *rx_buf)
573{
574 struct sk_buff *skb;
575
576 /* We now own the SKB */
577 skb = rx_buf->u.skb;
578 rx_buf->u.skb = NULL;
579
580 /* Set the SKB flags */
581 skb_checksum_none_assert(skb);
582
583 /* Pass the packet up */
584 netif_receive_skb(skb);
585
586 /* Update allocation strategy method */
587 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
570} 588}
571 589
572/* Handle a received packet. Second half: Touches packet payload. */ 590/* Handle a received packet. Second half: Touches packet payload. */
573void __efx_rx_packet(struct efx_channel *channel, 591void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
574 struct efx_rx_buffer *rx_buf, bool checksummed)
575{ 592{
576 struct efx_nic *efx = channel->efx; 593 struct efx_nic *efx = channel->efx;
577 struct sk_buff *skb;
578 u8 *eh = efx_rx_buf_eh(efx, rx_buf); 594 u8 *eh = efx_rx_buf_eh(efx, rx_buf);
579 595
580 /* If we're in loopback test, then pass the packet directly to the 596 /* If we're in loopback test, then pass the packet directly to the
@@ -586,8 +602,8 @@ void __efx_rx_packet(struct efx_channel *channel,
586 return; 602 return;
587 } 603 }
588 604
589 if (!rx_buf->is_page) { 605 if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) {
590 skb = rx_buf->u.skb; 606 struct sk_buff *skb = rx_buf->u.skb;
591 607
592 prefetch(skb_shinfo(skb)); 608 prefetch(skb_shinfo(skb));
593 609
@@ -605,25 +621,12 @@ void __efx_rx_packet(struct efx_channel *channel,
605 } 621 }
606 622
607 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) 623 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
608 checksummed = false; 624 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
609
610 if (likely(checksummed || rx_buf->is_page)) {
611 efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
612 return;
613 }
614
615 /* We now own the SKB */
616 skb = rx_buf->u.skb;
617 rx_buf->u.skb = NULL;
618 625
619 /* Set the SKB flags */ 626 if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)))
620 skb_checksum_none_assert(skb); 627 efx_rx_packet_gro(channel, rx_buf, eh);
621 628 else
622 /* Pass the packet up */ 629 efx_rx_deliver(channel, rx_buf);
623 netif_receive_skb(skb);
624
625 /* Update allocation strategy method */
626 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
627} 630}
628 631
629void efx_rx_strategy(struct efx_channel *channel) 632void efx_rx_strategy(struct efx_channel *channel)
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 52edd24fcde3..febe2a9e6211 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -19,7 +19,6 @@
19#include <linux/udp.h> 19#include <linux/udp.h>
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <asm/io.h>
23#include "net_driver.h" 22#include "net_driver.h"
24#include "efx.h" 23#include "efx.h"
25#include "nic.h" 24#include "nic.h"
@@ -50,7 +49,7 @@ static const char payload_msg[] =
50 49
51/* Interrupt mode names */ 50/* Interrupt mode names */
52static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX; 51static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
53static const char *efx_interrupt_mode_names[] = { 52static const char *const efx_interrupt_mode_names[] = {
54 [EFX_INT_MODE_MSIX] = "MSI-X", 53 [EFX_INT_MODE_MSIX] = "MSI-X",
55 [EFX_INT_MODE_MSI] = "MSI", 54 [EFX_INT_MODE_MSI] = "MSI",
56 [EFX_INT_MODE_LEGACY] = "legacy", 55 [EFX_INT_MODE_LEGACY] = "legacy",
@@ -131,6 +130,8 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
131static int efx_test_interrupts(struct efx_nic *efx, 130static int efx_test_interrupts(struct efx_nic *efx,
132 struct efx_self_tests *tests) 131 struct efx_self_tests *tests)
133{ 132{
133 int cpu;
134
134 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); 135 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
135 tests->interrupt = -1; 136 tests->interrupt = -1;
136 137
@@ -143,7 +144,8 @@ static int efx_test_interrupts(struct efx_nic *efx,
143 /* Wait for arrival of test interrupt. */ 144 /* Wait for arrival of test interrupt. */
144 netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); 145 netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
145 schedule_timeout_uninterruptible(HZ / 10); 146 schedule_timeout_uninterruptible(HZ / 10);
146 if (efx->last_irq_cpu >= 0) 147 cpu = ACCESS_ONCE(efx->last_irq_cpu);
148 if (cpu >= 0)
147 goto success; 149 goto success;
148 150
149 netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); 151 netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
@@ -151,8 +153,7 @@ static int efx_test_interrupts(struct efx_nic *efx,
151 153
152 success: 154 success:
153 netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", 155 netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
154 INT_MODE(efx), 156 INT_MODE(efx), cpu);
155 efx->last_irq_cpu);
156 tests->interrupt = 1; 157 tests->interrupt = 1;
157 return 0; 158 return 0;
158} 159}
@@ -162,56 +163,57 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
162 struct efx_self_tests *tests) 163 struct efx_self_tests *tests)
163{ 164{
164 struct efx_nic *efx = channel->efx; 165 struct efx_nic *efx = channel->efx;
165 unsigned int read_ptr, count; 166 unsigned int read_ptr;
166 167 bool napi_ran, dma_seen, int_seen;
167 tests->eventq_dma[channel->channel] = -1;
168 tests->eventq_int[channel->channel] = -1;
169 tests->eventq_poll[channel->channel] = -1;
170 168
171 read_ptr = channel->eventq_read_ptr; 169 read_ptr = channel->eventq_read_ptr;
172 channel->efx->last_irq_cpu = -1; 170 channel->last_irq_cpu = -1;
173 smp_wmb(); 171 smp_wmb();
174 172
175 efx_nic_generate_test_event(channel); 173 efx_nic_generate_test_event(channel);
176 174
177 /* Wait for arrival of interrupt */ 175 /* Wait for arrival of interrupt. NAPI processing may or may
178 count = 0; 176 * not complete in time, but we can cope in any case.
179 do { 177 */
180 schedule_timeout_uninterruptible(HZ / 100); 178 msleep(10);
181 179 napi_disable(&channel->napi_str);
182 if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr) 180 if (channel->eventq_read_ptr != read_ptr) {
183 goto eventq_ok; 181 napi_ran = true;
184 } while (++count < 2); 182 dma_seen = true;
185 183 int_seen = true;
186 netif_err(efx, drv, efx->net_dev, 184 } else {
187 "channel %d timed out waiting for event queue\n", 185 napi_ran = false;
188 channel->channel); 186 dma_seen = efx_nic_event_present(channel);
189 187 int_seen = ACCESS_ONCE(channel->last_irq_cpu) >= 0;
190 /* See if interrupt arrived */
191 if (channel->efx->last_irq_cpu >= 0) {
192 netif_err(efx, drv, efx->net_dev,
193 "channel %d saw interrupt on CPU%d "
194 "during event queue test\n", channel->channel,
195 raw_smp_processor_id());
196 tests->eventq_int[channel->channel] = 1;
197 } 188 }
189 napi_enable(&channel->napi_str);
190 efx_nic_eventq_read_ack(channel);
191
192 tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
193 tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
198 194
199 /* Check to see if event was received even if interrupt wasn't */ 195 if (dma_seen && int_seen) {
200 if (efx_nic_event_present(channel)) { 196 netif_dbg(efx, drv, efx->net_dev,
197 "channel %d event queue passed (with%s NAPI)\n",
198 channel->channel, napi_ran ? "" : "out");
199 return 0;
200 } else {
201 /* Report failure and whether either interrupt or DMA worked */
201 netif_err(efx, drv, efx->net_dev, 202 netif_err(efx, drv, efx->net_dev,
202 "channel %d event was generated, but " 203 "channel %d timed out waiting for event queue\n",
203 "failed to trigger an interrupt\n", channel->channel); 204 channel->channel);
204 tests->eventq_dma[channel->channel] = 1; 205 if (int_seen)
206 netif_err(efx, drv, efx->net_dev,
207 "channel %d saw interrupt "
208 "during event queue test\n",
209 channel->channel);
210 if (dma_seen)
211 netif_err(efx, drv, efx->net_dev,
212 "channel %d event was generated, but "
213 "failed to trigger an interrupt\n",
214 channel->channel);
215 return -ETIMEDOUT;
205 } 216 }
206
207 return -ETIMEDOUT;
208 eventq_ok:
209 netif_dbg(efx, drv, efx->net_dev, "channel %d event queue passed\n",
210 channel->channel);
211 tests->eventq_dma[channel->channel] = 1;
212 tests->eventq_int[channel->channel] = 1;
213 tests->eventq_poll[channel->channel] = 1;
214 return 0;
215} 217}
216 218
217static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, 219static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
@@ -316,7 +318,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
316 return; 318 return;
317 319
318 err: 320 err:
319#ifdef EFX_ENABLE_DEBUG 321#ifdef DEBUG
320 if (atomic_read(&state->rx_bad) == 0) { 322 if (atomic_read(&state->rx_bad) == 0) {
321 netif_err(efx, drv, efx->net_dev, "received packet:\n"); 323 netif_err(efx, drv, efx->net_dev, "received packet:\n");
322 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, 324 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
@@ -395,11 +397,9 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
395 * interrupt handler. */ 397 * interrupt handler. */
396 smp_wmb(); 398 smp_wmb();
397 399
398 if (efx_dev_registered(efx)) 400 netif_tx_lock_bh(efx->net_dev);
399 netif_tx_lock_bh(efx->net_dev);
400 rc = efx_enqueue_skb(tx_queue, skb); 401 rc = efx_enqueue_skb(tx_queue, skb);
401 if (efx_dev_registered(efx)) 402 netif_tx_unlock_bh(efx->net_dev);
402 netif_tx_unlock_bh(efx->net_dev);
403 403
404 if (rc != NETDEV_TX_OK) { 404 if (rc != NETDEV_TX_OK) {
405 netif_err(efx, drv, efx->net_dev, 405 netif_err(efx, drv, efx->net_dev,
@@ -440,20 +440,18 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
440 int tx_done = 0, rx_good, rx_bad; 440 int tx_done = 0, rx_good, rx_bad;
441 int i, rc = 0; 441 int i, rc = 0;
442 442
443 if (efx_dev_registered(efx)) 443 netif_tx_lock_bh(efx->net_dev);
444 netif_tx_lock_bh(efx->net_dev);
445 444
446 /* Count the number of tx completions, and decrement the refcnt. Any 445 /* Count the number of tx completions, and decrement the refcnt. Any
447 * skbs not already completed will be free'd when the queue is flushed */ 446 * skbs not already completed will be free'd when the queue is flushed */
448 for (i=0; i < state->packet_count; i++) { 447 for (i = 0; i < state->packet_count; i++) {
449 skb = state->skbs[i]; 448 skb = state->skbs[i];
450 if (skb && !skb_shared(skb)) 449 if (skb && !skb_shared(skb))
451 ++tx_done; 450 ++tx_done;
452 dev_kfree_skb_any(skb); 451 dev_kfree_skb_any(skb);
453 } 452 }
454 453
455 if (efx_dev_registered(efx)) 454 netif_tx_unlock_bh(efx->net_dev);
456 netif_tx_unlock_bh(efx->net_dev);
457 455
458 /* Check TX completion and received packet counts */ 456 /* Check TX completion and received packet counts */
459 rx_good = atomic_read(&state->rx_good); 457 rx_good = atomic_read(&state->rx_good);
@@ -570,7 +568,7 @@ static int efx_wait_for_link(struct efx_nic *efx)
570 mutex_lock(&efx->mac_lock); 568 mutex_lock(&efx->mac_lock);
571 link_up = link_state->up; 569 link_up = link_state->up;
572 if (link_up) 570 if (link_up)
573 link_up = !efx->mac_op->check_fault(efx); 571 link_up = !efx->type->check_mac_fault(efx);
574 mutex_unlock(&efx->mac_lock); 572 mutex_unlock(&efx->mac_lock);
575 573
576 if (link_up) { 574 if (link_up) {
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index dba5456e70f3..87abe2a53846 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -37,7 +37,6 @@ struct efx_self_tests {
37 int interrupt; 37 int interrupt;
38 int eventq_dma[EFX_MAX_CHANNELS]; 38 int eventq_dma[EFX_MAX_CHANNELS];
39 int eventq_int[EFX_MAX_CHANNELS]; 39 int eventq_int[EFX_MAX_CHANNELS];
40 int eventq_poll[EFX_MAX_CHANNELS];
41 /* offline tests */ 40 /* offline tests */
42 int registers; 41 int registers;
43 int phy_ext[EFX_MAX_PHY_TESTS]; 42 int phy_ext[EFX_MAX_PHY_TESTS];
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 4d5d619feaa6..d3c4169e2a0b 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -18,7 +18,6 @@
18#include "bitfield.h" 18#include "bitfield.h"
19#include "efx.h" 19#include "efx.h"
20#include "nic.h" 20#include "nic.h"
21#include "mac.h"
22#include "spi.h" 21#include "spi.h"
23#include "regs.h" 22#include "regs.h"
24#include "io.h" 23#include "io.h"
@@ -36,8 +35,6 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
36{ 35{
37 efx_dword_t timer_cmd; 36 efx_dword_t timer_cmd;
38 37
39 BUILD_BUG_ON(EFX_IRQ_MOD_MAX > (1 << FRF_CZ_TC_TIMER_VAL_WIDTH));
40
41 if (channel->irq_moderation) 38 if (channel->irq_moderation)
42 EFX_POPULATE_DWORD_2(timer_cmd, 39 EFX_POPULATE_DWORD_2(timer_cmd,
43 FRF_CZ_TC_TIMER_MODE, 40 FRF_CZ_TC_TIMER_MODE,
@@ -53,15 +50,6 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
53 channel->channel); 50 channel->channel);
54} 51}
55 52
56static void siena_push_multicast_hash(struct efx_nic *efx)
57{
58 WARN_ON(!mutex_is_locked(&efx->mac_lock));
59
60 efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
61 efx->multicast_hash.byte, sizeof(efx->multicast_hash),
62 NULL, 0, NULL);
63}
64
65static int siena_mdio_write(struct net_device *net_dev, 53static int siena_mdio_write(struct net_device *net_dev,
66 int prtad, int devad, u16 addr, u16 value) 54 int prtad, int devad, u16 addr, u16 value)
67{ 55{
@@ -226,7 +214,15 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
226 214
227static int siena_probe_nvconfig(struct efx_nic *efx) 215static int siena_probe_nvconfig(struct efx_nic *efx)
228{ 216{
229 return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL); 217 u32 caps = 0;
218 int rc;
219
220 rc = efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL, &caps);
221
222 efx->timer_quantum_ns =
223 (caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ?
224 3072 : 6144; /* 768 cycles */
225 return rc;
230} 226}
231 227
232static int siena_probe_nic(struct efx_nic *efx) 228static int siena_probe_nic(struct efx_nic *efx)
@@ -304,6 +300,10 @@ static int siena_probe_nic(struct efx_nic *efx)
304 goto fail5; 300 goto fail5;
305 } 301 }
306 302
303 rc = efx_mcdi_mon_probe(efx);
304 if (rc)
305 goto fail5;
306
307 return 0; 307 return 0;
308 308
309fail5: 309fail5:
@@ -391,6 +391,8 @@ static int siena_init_nic(struct efx_nic *efx)
391 391
392static void siena_remove_nic(struct efx_nic *efx) 392static void siena_remove_nic(struct efx_nic *efx)
393{ 393{
394 efx_mcdi_mon_remove(efx);
395
394 efx_nic_free_buffer(efx, &efx->irq_status); 396 efx_nic_free_buffer(efx, &efx->irq_status);
395 397
396 siena_reset_hw(efx, RESET_TYPE_ALL); 398 siena_reset_hw(efx, RESET_TYPE_ALL);
@@ -630,14 +632,14 @@ const struct efx_nic_type siena_a0_nic_type = {
630 .stop_stats = siena_stop_nic_stats, 632 .stop_stats = siena_stop_nic_stats,
631 .set_id_led = efx_mcdi_set_id_led, 633 .set_id_led = efx_mcdi_set_id_led,
632 .push_irq_moderation = siena_push_irq_moderation, 634 .push_irq_moderation = siena_push_irq_moderation,
633 .push_multicast_hash = siena_push_multicast_hash, 635 .reconfigure_mac = efx_mcdi_mac_reconfigure,
636 .check_mac_fault = efx_mcdi_mac_check_fault,
634 .reconfigure_port = efx_mcdi_phy_reconfigure, 637 .reconfigure_port = efx_mcdi_phy_reconfigure,
635 .get_wol = siena_get_wol, 638 .get_wol = siena_get_wol,
636 .set_wol = siena_set_wol, 639 .set_wol = siena_set_wol,
637 .resume_wol = siena_init_wol, 640 .resume_wol = siena_init_wol,
638 .test_registers = siena_test_registers, 641 .test_registers = siena_test_registers,
639 .test_nvram = efx_mcdi_nvram_test_all, 642 .test_nvram = efx_mcdi_nvram_test_all,
640 .default_mac_ops = &efx_mcdi_mac_operations,
641 643
642 .revision = EFX_REV_SIENA_A0, 644 .revision = EFX_REV_SIENA_A0,
643 .mem_map_size = (FR_CZ_MC_TREG_SMEM + 645 .mem_map_size = (FR_CZ_MC_TREG_SMEM +
@@ -654,6 +656,7 @@ const struct efx_nic_type siena_a0_nic_type = {
654 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy 656 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
655 * interrupt handler only supports 32 657 * interrupt handler only supports 32
656 * channels */ 658 * channels */
659 .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
657 .tx_dc_base = 0x88000, 660 .tx_dc_base = 0x88000,
658 .rx_dc_base = 0x68000, 661 .rx_dc_base = 0x68000,
659 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 662 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
diff --git a/drivers/net/ethernet/sfc/spi.h b/drivers/net/ethernet/sfc/spi.h
index 71f2e3ebe1c7..5431a1bbff5c 100644
--- a/drivers/net/ethernet/sfc/spi.h
+++ b/drivers/net/ethernet/sfc/spi.h
@@ -68,7 +68,7 @@ static inline bool efx_spi_present(const struct efx_spi_device *spi)
68 68
69int falcon_spi_cmd(struct efx_nic *efx, 69int falcon_spi_cmd(struct efx_nic *efx,
70 const struct efx_spi_device *spi, unsigned int command, 70 const struct efx_spi_device *spi, unsigned int command,
71 int address, const void* in, void *out, size_t len); 71 int address, const void *in, void *out, size_t len);
72int falcon_spi_wait_write(struct efx_nic *efx, 72int falcon_spi_wait_write(struct efx_nic *efx,
73 const struct efx_spi_device *spi); 73 const struct efx_spi_device *spi);
74int falcon_spi_read(struct efx_nic *efx, 74int falcon_spi_read(struct efx_nic *efx,
diff --git a/drivers/net/ethernet/sfc/tenxpress.c b/drivers/net/ethernet/sfc/tenxpress.c
index 7b0fd89e7b85..d37cb5017129 100644
--- a/drivers/net/ethernet/sfc/tenxpress.c
+++ b/drivers/net/ethernet/sfc/tenxpress.c
@@ -121,7 +121,7 @@
121#define GPHY_XCONTROL_REG 49152 121#define GPHY_XCONTROL_REG 49152
122#define GPHY_ISOLATE_LBN 10 122#define GPHY_ISOLATE_LBN 10
123#define GPHY_ISOLATE_WIDTH 1 123#define GPHY_ISOLATE_WIDTH 1
124#define GPHY_DUPLEX_LBN 8 124#define GPHY_DUPLEX_LBN 8
125#define GPHY_DUPLEX_WIDTH 1 125#define GPHY_DUPLEX_WIDTH 1
126#define GPHY_LOOPBACK_NEAR_LBN 14 126#define GPHY_LOOPBACK_NEAR_LBN 14
127#define GPHY_LOOPBACK_NEAR_WIDTH 1 127#define GPHY_LOOPBACK_NEAR_WIDTH 1
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 72f0fbc73b1a..5cb81fa3fcbd 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -446,10 +446,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
446 likely(efx->port_enabled) && 446 likely(efx->port_enabled) &&
447 likely(netif_device_present(efx->net_dev))) { 447 likely(netif_device_present(efx->net_dev))) {
448 fill_level = tx_queue->insert_count - tx_queue->read_count; 448 fill_level = tx_queue->insert_count - tx_queue->read_count;
449 if (fill_level < EFX_TXQ_THRESHOLD(efx)) { 449 if (fill_level < EFX_TXQ_THRESHOLD(efx))
450 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
451 netif_tx_wake_queue(tx_queue->core_txq); 450 netif_tx_wake_queue(tx_queue->core_txq);
452 }
453 } 451 }
454 452
455 /* Check whether the hardware queue is now empty */ 453 /* Check whether the hardware queue is now empty */
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c
index 7c21b334a75b..29bb3f9941c0 100644
--- a/drivers/net/ethernet/sfc/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/txc43128_phy.c
@@ -512,7 +512,7 @@ static bool txc43128_phy_poll(struct efx_nic *efx)
512 return efx->link_state.up != was_up; 512 return efx->link_state.up != was_up;
513} 513}
514 514
515static const char *txc43128_test_names[] = { 515static const char *const txc43128_test_names[] = {
516 "bist" 516 "bist"
517}; 517};
518 518
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 5b118cd5bf94..a9deda8eaf63 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1462,8 +1462,6 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1462 1462
1463 dev = alloc_etherdev(sizeof(*tp)); 1463 dev = alloc_etherdev(sizeof(*tp));
1464 if (!dev) { 1464 if (!dev) {
1465 if (netif_msg_drv(&debug))
1466 pr_err("unable to alloc new ethernet\n");
1467 rc = -ENOMEM; 1465 rc = -ENOMEM;
1468 goto err_out_0; 1466 goto err_out_0;
1469 } 1467 }
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index c8efc708c792..91c44688bc3e 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -619,7 +619,6 @@ static int __devinit sis900_mii_probe(struct net_device * net_dev)
619 } 619 }
620 620
621 if ((mii_phy = kmalloc(sizeof(struct mii_phy), GFP_KERNEL)) == NULL) { 621 if ((mii_phy = kmalloc(sizeof(struct mii_phy), GFP_KERNEL)) == NULL) {
622 printk(KERN_WARNING "Cannot allocate mem for struct mii_phy\n");
623 mii_phy = sis_priv->first_mii; 622 mii_phy = sis_priv->first_mii;
624 while (mii_phy) { 623 while (mii_phy) {
625 struct mii_phy *phy; 624 struct mii_phy *phy;
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 2c077ce0b6d6..11dcb38b99f7 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -363,10 +363,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
363 ret = -ENOMEM; 363 ret = -ENOMEM;
364 364
365 dev = alloc_etherdev(sizeof (*ep)); 365 dev = alloc_etherdev(sizeof (*ep));
366 if (!dev) { 366 if (!dev)
367 dev_err(&pdev->dev, "no memory for eth device\n");
368 goto err_out_free_res; 367 goto err_out_free_res;
369 } 368
370 SET_NETDEV_DEV(dev, &pdev->dev); 369 SET_NETDEV_DEV(dev, &pdev->dev);
371 370
372#ifdef USE_IO_OPS 371#ifdef USE_IO_OPS
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 313ba3b32ab4..c28230148efd 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -2065,7 +2065,6 @@ static int __devinit smc911x_drv_probe(struct platform_device *pdev)
2065 2065
2066 ndev = alloc_etherdev(sizeof(struct smc911x_local)); 2066 ndev = alloc_etherdev(sizeof(struct smc911x_local));
2067 if (!ndev) { 2067 if (!ndev) {
2068 printk("%s: could not allocate device.\n", CARDNAME);
2069 ret = -ENOMEM; 2068 ret = -ENOMEM;
2070 goto release_1; 2069 goto release_1;
2071 } 2070 }
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 64ad3ed74495..24104a1ee6a5 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2223,7 +2223,6 @@ static int __devinit smc_drv_probe(struct platform_device *pdev)
2223 2223
2224 ndev = alloc_etherdev(sizeof(struct smc_local)); 2224 ndev = alloc_etherdev(sizeof(struct smc_local));
2225 if (!ndev) { 2225 if (!ndev) {
2226 printk("%s: could not allocate device.\n", CARDNAME);
2227 ret = -ENOMEM; 2226 ret = -ENOMEM;
2228 goto out; 2227 goto out;
2229 } 2228 }
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 24d2df068d71..6a1cd2360818 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2374,7 +2374,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2374 2374
2375 dev = alloc_etherdev(sizeof(struct smsc911x_data)); 2375 dev = alloc_etherdev(sizeof(struct smsc911x_data));
2376 if (!dev) { 2376 if (!dev) {
2377 pr_warn("Could not allocate device\n");
2378 retval = -ENOMEM; 2377 retval = -ENOMEM;
2379 goto out_release_io_1; 2378 goto out_release_io_1;
2380 } 2379 }
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index a9efbdfe5302..595f9881e096 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1598,10 +1598,8 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1598 pci_set_master(pdev); 1598 pci_set_master(pdev);
1599 1599
1600 dev = alloc_etherdev(sizeof(*pd)); 1600 dev = alloc_etherdev(sizeof(*pd));
1601 if (!dev) { 1601 if (!dev)
1602 printk(KERN_ERR "ether device alloc failed\n");
1603 goto out_disable_pci_device_1; 1602 goto out_disable_pci_device_1;
1604 }
1605 1603
1606 SET_NETDEV_DEV(dev, &pdev->dev); 1604 SET_NETDEV_DEV(dev, &pdev->dev);
1607 1605
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 96fa2da30763..166fc95e5baf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -954,10 +954,9 @@ static int stmmac_open(struct net_device *dev)
954 954
955#ifdef CONFIG_STMMAC_TIMER 955#ifdef CONFIG_STMMAC_TIMER
956 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); 956 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
957 if (unlikely(priv->tm == NULL)) { 957 if (unlikely(priv->tm == NULL))
958 pr_err("%s: ERROR: timer memory alloc failed\n", __func__);
959 return -ENOMEM; 958 return -ENOMEM;
960 } 959
961 priv->tm->freq = tmrate; 960 priv->tm->freq = tmrate;
962 961
963 /* Test if the external timer can be actually used. 962 /* Test if the external timer can be actually used.
@@ -1802,10 +1801,8 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
1802 struct stmmac_priv *priv; 1801 struct stmmac_priv *priv;
1803 1802
1804 ndev = alloc_etherdev(sizeof(struct stmmac_priv)); 1803 ndev = alloc_etherdev(sizeof(struct stmmac_priv));
1805 if (!ndev) { 1804 if (!ndev)
1806 pr_err("%s: ERROR: allocating the device\n", __func__);
1807 return NULL; 1805 return NULL;
1808 }
1809 1806
1810 SET_NETDEV_DEV(ndev, device); 1807 SET_NETDEV_DEV(ndev, device);
1811 1808
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index f10665f594c4..45292760fce4 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -835,7 +835,6 @@ static int cas_saturn_firmware_init(struct cas *cp)
835 cp->fw_data = vmalloc(cp->fw_size); 835 cp->fw_data = vmalloc(cp->fw_size);
836 if (!cp->fw_data) { 836 if (!cp->fw_data) {
837 err = -ENOMEM; 837 err = -ENOMEM;
838 pr_err("\"%s\" Failed %d\n", fw_name, err);
839 goto out; 838 goto out;
840 } 839 }
841 memcpy(cp->fw_data, &fw->data[2], cp->fw_size); 840 memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
@@ -4947,7 +4946,6 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
4947 4946
4948 dev = alloc_etherdev(sizeof(*cp)); 4947 dev = alloc_etherdev(sizeof(*cp));
4949 if (!dev) { 4948 if (!dev) {
4950 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
4951 err = -ENOMEM; 4949 err = -ENOMEM;
4952 goto err_out_disable_pdev; 4950 goto err_out_disable_pdev;
4953 } 4951 }
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index cf433931304f..d83c50816714 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9685,10 +9685,8 @@ static struct net_device * __devinit niu_alloc_and_init(
9685 struct niu *np; 9685 struct niu *np;
9686 9686
9687 dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN); 9687 dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
9688 if (!dev) { 9688 if (!dev)
9689 dev_err(gen_dev, "Etherdev alloc failed, aborting\n");
9690 return NULL; 9689 return NULL;
9691 }
9692 9690
9693 SET_NETDEV_DEV(dev, gen_dev); 9691 SET_NETDEV_DEV(dev, gen_dev);
9694 9692
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 31441a870b0b..ba041596e046 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2885,7 +2885,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
2885 2885
2886 dev = alloc_etherdev(sizeof(*gp)); 2886 dev = alloc_etherdev(sizeof(*gp));
2887 if (!dev) { 2887 if (!dev) {
2888 pr_err("Etherdev alloc failed, aborting\n");
2889 err = -ENOMEM; 2888 err = -ENOMEM;
2890 goto err_disable_device; 2889 goto err_disable_device;
2891 } 2890 }
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 8c6c059f3489..92a037a8228a 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -949,10 +949,9 @@ static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port)
949 int map_len = (ETH_FRAME_LEN + 7) & ~7; 949 int map_len = (ETH_FRAME_LEN + 7) & ~7;
950 950
951 err = -ENOMEM; 951 err = -ENOMEM;
952 if (!buf) { 952 if (!buf)
953 pr_err("TX buffer allocation failure\n");
954 goto err_out; 953 goto err_out;
955 } 954
956 err = -EFAULT; 955 err = -EFAULT;
957 if ((unsigned long)buf & (8UL - 1)) { 956 if ((unsigned long)buf & (8UL - 1)) {
958 pr_err("TX buffer misaligned\n"); 957 pr_err("TX buffer misaligned\n");
@@ -1027,10 +1026,8 @@ static struct vnet * __devinit vnet_new(const u64 *local_mac)
1027 int err, i; 1026 int err, i;
1028 1027
1029 dev = alloc_etherdev(sizeof(*vp)); 1028 dev = alloc_etherdev(sizeof(*vp));
1030 if (!dev) { 1029 if (!dev)
1031 pr_err("Etherdev alloc failed, aborting\n");
1032 return ERR_PTR(-ENOMEM); 1030 return ERR_PTR(-ENOMEM);
1033 }
1034 1031
1035 for (i = 0; i < ETH_ALEN; i++) 1032 for (i = 0; i < ETH_ALEN; i++)
1036 dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff; 1033 dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
@@ -1165,10 +1162,8 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
1165 1162
1166 port = kzalloc(sizeof(*port), GFP_KERNEL); 1163 port = kzalloc(sizeof(*port), GFP_KERNEL);
1167 err = -ENOMEM; 1164 err = -ENOMEM;
1168 if (!port) { 1165 if (!port)
1169 pr_err("Cannot allocate vnet_port\n");
1170 goto err_out_put_mdesc; 1166 goto err_out_put_mdesc;
1171 }
1172 1167
1173 for (i = 0; i < ETH_ALEN; i++) 1168 for (i = 0; i < ETH_ALEN; i++)
1174 port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff; 1169 port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 4b19e9b0606b..df7febba9ea6 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1978,7 +1978,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1978 ndev = alloc_etherdev(sizeof(struct bdx_priv)); 1978 ndev = alloc_etherdev(sizeof(struct bdx_priv));
1979 if (!ndev) { 1979 if (!ndev) {
1980 err = -ENOMEM; 1980 err = -ENOMEM;
1981 pr_err("alloc_etherdev failed\n");
1982 goto err_out_iomap; 1981 goto err_out_iomap;
1983 } 1982 }
1984 1983
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 4d9a28ffd3c3..97e1df330a17 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1143,11 +1143,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1143 } 1143 }
1144 1144
1145 dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES); 1145 dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
1146 1146 if (!dev)
1147 if (!dev) {
1148 printk(KERN_ERR "cpmac: Unable to allocate net_device\n");
1149 return -ENOMEM; 1147 return -ENOMEM;
1150 }
1151 1148
1152 platform_set_drvdata(pdev, dev); 1149 platform_set_drvdata(pdev, dev);
1153 priv = netdev_priv(dev); 1150 priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 4fa0bcb25dfc..922a937e05cf 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1790,7 +1790,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
1790 1790
1791 ndev = alloc_etherdev(sizeof(struct emac_priv)); 1791 ndev = alloc_etherdev(sizeof(struct emac_priv));
1792 if (!ndev) { 1792 if (!ndev) {
1793 dev_err(&pdev->dev, "error allocating net_device\n");
1794 rc = -ENOMEM; 1793 rc = -ENOMEM;
1795 goto free_clk; 1794 goto free_clk;
1796 } 1795 }
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 9c0dd6b8d6c9..817ad3bc4957 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -486,7 +486,6 @@ static int __devinit tlan_probe1(struct pci_dev *pdev,
486 486
487 dev = alloc_etherdev(sizeof(struct tlan_priv)); 487 dev = alloc_etherdev(sizeof(struct tlan_priv));
488 if (dev == NULL) { 488 if (dev == NULL) {
489 pr_err("Could not allocate memory for device\n");
490 rc = -ENOMEM; 489 rc = -ENOMEM;
491 goto err_out_regions; 490 goto err_out_regions;
492 } 491 }
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index fd4ed7f8cfa1..5c14f82c4954 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1621,10 +1621,9 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
1621 kfree(target->hwinfo); 1621 kfree(target->hwinfo);
1622 target->hwinfo = kzalloc(be16_to_cpu(scan_info->size), 1622 target->hwinfo = kzalloc(be16_to_cpu(scan_info->size),
1623 GFP_KERNEL); 1623 GFP_KERNEL);
1624 if (!target->hwinfo) { 1624 if (!target->hwinfo)
1625 pr_info("%s: kzalloc failed\n", __func__);
1626 continue; 1625 continue;
1627 } 1626
1628 /* copy hw scan info */ 1627 /* copy hw scan info */
1629 memcpy(target->hwinfo, scan_info, scan_info->size); 1628 memcpy(target->hwinfo, scan_info, scan_info->size);
1630 target->essid_len = strnlen(scan_info->essid, 1629 target->essid_len = strnlen(scan_info->essid,
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 71b785cd7563..f5ac603d5608 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -808,10 +808,9 @@ static int __devinit tc35815_init_one(struct pci_dev *pdev,
808 808
809 /* dev zeroed in alloc_etherdev */ 809 /* dev zeroed in alloc_etherdev */
810 dev = alloc_etherdev(sizeof(*lp)); 810 dev = alloc_etherdev(sizeof(*lp));
811 if (dev == NULL) { 811 if (dev == NULL)
812 dev_err(&pdev->dev, "unable to alloc new ethernet\n");
813 return -ENOMEM; 812 return -ENOMEM;
814 } 813
815 SET_NETDEV_DEV(dev, &pdev->dev); 814 SET_NETDEV_DEV(dev, &pdev->dev);
816 lp = netdev_priv(dev); 815 lp = netdev_priv(dev);
817 lp->dev = dev; 816 lp->dev = dev;
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 164fb775d7b3..fc5521c9c089 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1582,10 +1582,8 @@ tsi108_init_one(struct platform_device *pdev)
1582 /* Create an ethernet device instance */ 1582 /* Create an ethernet device instance */
1583 1583
1584 dev = alloc_etherdev(sizeof(struct tsi108_prv_data)); 1584 dev = alloc_etherdev(sizeof(struct tsi108_prv_data));
1585 if (!dev) { 1585 if (!dev)
1586 printk("tsi108_eth: Could not allocate a device structure\n");
1587 return -ENOMEM; 1586 return -ENOMEM;
1588 }
1589 1587
1590 printk("tsi108_eth%d: probe...\n", pdev->id); 1588 printk("tsi108_eth%d: probe...\n", pdev->id);
1591 data = netdev_priv(dev); 1589 data = netdev_priv(dev);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 10b18eb63d25..1b95daa372c4 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -927,7 +927,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
927 dev = alloc_etherdev(sizeof(struct rhine_private)); 927 dev = alloc_etherdev(sizeof(struct rhine_private));
928 if (!dev) { 928 if (!dev) {
929 rc = -ENOMEM; 929 rc = -ENOMEM;
930 dev_err(&pdev->dev, "alloc_etherdev failed\n");
931 goto err_out; 930 goto err_out;
932 } 931 }
933 SET_NETDEV_DEV(dev, &pdev->dev); 932 SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 4128d6b8cc28..2776bbc67936 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2733,10 +2733,8 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2733 } 2733 }
2734 2734
2735 dev = alloc_etherdev(sizeof(struct velocity_info)); 2735 dev = alloc_etherdev(sizeof(struct velocity_info));
2736 if (!dev) { 2736 if (!dev)
2737 dev_err(&pdev->dev, "allocate net device failed.\n");
2738 goto out; 2737 goto out;
2739 }
2740 2738
2741 /* Chain it all together */ 2739 /* Chain it all together */
2742 2740
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index d5a826063a82..5778a4ae1164 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -25,6 +25,14 @@ config XILINX_EMACLITE
25 ---help--- 25 ---help---
26 This driver supports the 10/100 Ethernet Lite from Xilinx. 26 This driver supports the 10/100 Ethernet Lite from Xilinx.
27 27
28config XILINX_AXI_EMAC
29 tristate "Xilinx 10/100/1000 AXI Ethernet support"
30 depends on (PPC32 || MICROBLAZE)
31 select PHYLIB
32 ---help---
33 This driver supports the 10/100/1000 Ethernet from Xilinx for the
34 AXI bus interface used in Xilinx Virtex FPGAs.
35
28config XILINX_LL_TEMAC 36config XILINX_LL_TEMAC
29 tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" 37 tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
30 depends on (PPC || MICROBLAZE) 38 depends on (PPC || MICROBLAZE)
diff --git a/drivers/net/ethernet/xilinx/Makefile b/drivers/net/ethernet/xilinx/Makefile
index 5feac734ea45..214205e975e3 100644
--- a/drivers/net/ethernet/xilinx/Makefile
+++ b/drivers/net/ethernet/xilinx/Makefile
@@ -5,3 +5,5 @@
5ll_temac-objs := ll_temac_main.o ll_temac_mdio.o 5ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
6obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o 6obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
7obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o 7obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
8xilinx_emac-objs := xilinx_axienet_main.o xilinx_axienet_mdio.o
9obj-$(CONFIG_XILINX_AXI_EMAC) += xilinx_emac.o
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index f21addb1db95..d8eb9c9e3ee2 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1011,10 +1011,9 @@ static int __devinit temac_of_probe(struct platform_device *op)
1011 1011
1012 /* Init network device structure */ 1012 /* Init network device structure */
1013 ndev = alloc_etherdev(sizeof(*lp)); 1013 ndev = alloc_etherdev(sizeof(*lp));
1014 if (!ndev) { 1014 if (!ndev)
1015 dev_err(&op->dev, "could not allocate device.\n");
1016 return -ENOMEM; 1015 return -ENOMEM;
1017 } 1016
1018 ether_setup(ndev); 1017 ether_setup(ndev);
1019 dev_set_drvdata(&op->dev, ndev); 1018 dev_set_drvdata(&op->dev, ndev);
1020 SET_NETDEV_DEV(ndev, &op->dev); 1019 SET_NETDEV_DEV(ndev, &op->dev);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
new file mode 100644
index 000000000000..cc83af083fd7
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -0,0 +1,508 @@
1/*
2 * Definitions for Xilinx Axi Ethernet device driver.
3 *
4 * Copyright (c) 2009 Secret Lab Technologies, Ltd.
5 * Copyright (c) 2010 Xilinx, Inc. All rights reserved.
6 * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
7 * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
8 */
9
10#ifndef XILINX_AXIENET_H
11#define XILINX_AXIENET_H
12
13#include <linux/netdevice.h>
14#include <linux/spinlock.h>
15#include <linux/interrupt.h>
16
17/* Packet size info */
18#define XAE_HDR_SIZE 14 /* Size of Ethernet header */
19#define XAE_HDR_VLAN_SIZE 18 /* Size of an Ethernet hdr + VLAN */
20#define XAE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
21#define XAE_MTU 1500 /* Max MTU of an Ethernet frame */
22#define XAE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
23
24#define XAE_MAX_FRAME_SIZE (XAE_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
25#define XAE_MAX_VLAN_FRAME_SIZE (XAE_MTU + XAE_HDR_VLAN_SIZE + XAE_TRL_SIZE)
26#define XAE_MAX_JUMBO_FRAME_SIZE (XAE_JUMBO_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
27
28/* Configuration options */
29
30/* Accept all incoming packets. Default: disabled (cleared) */
31#define XAE_OPTION_PROMISC (1 << 0)
32
33/* Jumbo frame support for Tx & Rx. Default: disabled (cleared) */
34#define XAE_OPTION_JUMBO (1 << 1)
35
36/* VLAN Rx & Tx frame support. Default: disabled (cleared) */
37#define XAE_OPTION_VLAN (1 << 2)
38
39/* Enable recognition of flow control frames on Rx. Default: enabled (set) */
40#define XAE_OPTION_FLOW_CONTROL (1 << 4)
41
42/* Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not
43 * stripped. Default: disabled (set) */
44#define XAE_OPTION_FCS_STRIP (1 << 5)
45
46/* Generate FCS field and add PAD automatically for outgoing frames.
47 * Default: enabled (set) */
48#define XAE_OPTION_FCS_INSERT (1 << 6)
49
50/* Enable Length/Type error checking for incoming frames. When this option is
51 * set, the MAC will filter frames that have a mismatched type/length field
52 * and if XAE_OPTION_REPORT_RXERR is set, the user is notified when these
53 * types of frames are encountered. When this option is cleared, the MAC will
54 * allow these types of frames to be received. Default: enabled (set) */
55#define XAE_OPTION_LENTYPE_ERR (1 << 7)
56
57/* Enable the transmitter. Default: enabled (set) */
58#define XAE_OPTION_TXEN (1 << 11)
59
60/* Enable the receiver. Default: enabled (set) */
61#define XAE_OPTION_RXEN (1 << 12)
62
63/* Default options set when device is initialized or reset */
64#define XAE_OPTION_DEFAULTS \
65 (XAE_OPTION_TXEN | \
66 XAE_OPTION_FLOW_CONTROL | \
67 XAE_OPTION_RXEN)
68
69/* Axi DMA Register definitions */
70
71#define XAXIDMA_TX_CR_OFFSET 0x00000000 /* Channel control */
72#define XAXIDMA_TX_SR_OFFSET 0x00000004 /* Status */
73#define XAXIDMA_TX_CDESC_OFFSET 0x00000008 /* Current descriptor pointer */
74#define XAXIDMA_TX_TDESC_OFFSET 0x00000010 /* Tail descriptor pointer */
75
76#define XAXIDMA_RX_CR_OFFSET 0x00000030 /* Channel control */
77#define XAXIDMA_RX_SR_OFFSET 0x00000034 /* Status */
78#define XAXIDMA_RX_CDESC_OFFSET 0x00000038 /* Current descriptor pointer */
79#define XAXIDMA_RX_TDESC_OFFSET 0x00000040 /* Tail descriptor pointer */
80
81#define XAXIDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA channel */
82#define XAXIDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
83
84#define XAXIDMA_BD_NDESC_OFFSET 0x00 /* Next descriptor pointer */
85#define XAXIDMA_BD_BUFA_OFFSET 0x08 /* Buffer address */
86#define XAXIDMA_BD_CTRL_LEN_OFFSET 0x18 /* Control/buffer length */
87#define XAXIDMA_BD_STS_OFFSET 0x1C /* Status */
88#define XAXIDMA_BD_USR0_OFFSET 0x20 /* User IP specific word0 */
89#define XAXIDMA_BD_USR1_OFFSET 0x24 /* User IP specific word1 */
90#define XAXIDMA_BD_USR2_OFFSET 0x28 /* User IP specific word2 */
91#define XAXIDMA_BD_USR3_OFFSET 0x2C /* User IP specific word3 */
92#define XAXIDMA_BD_USR4_OFFSET 0x30 /* User IP specific word4 */
93#define XAXIDMA_BD_ID_OFFSET 0x34 /* Sw ID */
94#define XAXIDMA_BD_HAS_STSCNTRL_OFFSET 0x38 /* Whether has stscntrl strm */
95#define XAXIDMA_BD_HAS_DRE_OFFSET 0x3C /* Whether has DRE */
96
97#define XAXIDMA_BD_HAS_DRE_SHIFT 8 /* Whether has DRE shift */
98#define XAXIDMA_BD_HAS_DRE_MASK 0xF00 /* Whether has DRE mask */
99#define XAXIDMA_BD_WORDLEN_MASK 0xFF /* Whether has DRE mask */
100
101#define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */
102#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
103#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
104#define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
105
106#define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
107#define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
108
109#define XAXIDMA_DELAY_SHIFT 24
110#define XAXIDMA_COALESCE_SHIFT 16
111
112#define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
113#define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
114#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
115#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
116
117/* Default TX/RX Threshold and waitbound values for SGDMA mode */
118#define XAXIDMA_DFT_TX_THRESHOLD 24
119#define XAXIDMA_DFT_TX_WAITBOUND 254
120#define XAXIDMA_DFT_RX_THRESHOLD 24
121#define XAXIDMA_DFT_RX_WAITBOUND 254
122
123#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
124#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
125#define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
126
127#define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */
128#define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */
129#define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */
130#define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */
131#define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */
132#define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */
133#define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */
134#define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */
135#define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */
136
137#define XAXIDMA_BD_MINIMUM_ALIGNMENT 0x40
138
139/* Axi Ethernet registers definition */
140#define XAE_RAF_OFFSET 0x00000000 /* Reset and Address filter */
141#define XAE_TPF_OFFSET 0x00000004 /* Tx Pause Frame */
142#define XAE_IFGP_OFFSET 0x00000008 /* Tx Inter-frame gap adjustment*/
143#define XAE_IS_OFFSET 0x0000000C /* Interrupt status */
144#define XAE_IP_OFFSET 0x00000010 /* Interrupt pending */
145#define XAE_IE_OFFSET 0x00000014 /* Interrupt enable */
146#define XAE_TTAG_OFFSET 0x00000018 /* Tx VLAN TAG */
147#define XAE_RTAG_OFFSET 0x0000001C /* Rx VLAN TAG */
148#define XAE_UAWL_OFFSET 0x00000020 /* Unicast address word lower */
149#define XAE_UAWU_OFFSET 0x00000024 /* Unicast address word upper */
150#define XAE_TPID0_OFFSET 0x00000028 /* VLAN TPID0 register */
151#define XAE_TPID1_OFFSET 0x0000002C /* VLAN TPID1 register */
152#define XAE_PPST_OFFSET 0x00000030 /* PCS PMA Soft Temac Status Reg */
153#define XAE_RCW0_OFFSET 0x00000400 /* Rx Configuration Word 0 */
154#define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */
155#define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */
156#define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */
157#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */
158#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */
159#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */
160#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */
161#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
162#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
163#define XAE_MDIO_MIS_OFFSET 0x00000600 /* MII Management Interrupt Status */
164#define XAE_MDIO_MIP_OFFSET 0x00000620 /* MII Mgmt Interrupt Pending
165 * register offset */
166#define XAE_MDIO_MIE_OFFSET 0x00000640 /* MII Management Interrupt Enable
167 * register offset */
168#define XAE_MDIO_MIC_OFFSET 0x00000660 /* MII Management Interrupt Clear
169 * register offset. */
170#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
171#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
172#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
173#define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */
174#define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */
175
176#define XAE_TX_VLAN_DATA_OFFSET 0x00004000 /* TX VLAN data table address */
177#define XAE_RX_VLAN_DATA_OFFSET 0x00008000 /* RX VLAN data table address */
178#define XAE_MCAST_TABLE_OFFSET 0x00020000 /* Multicast table address */
179
180/* Bit Masks for Axi Ethernet RAF register */
181#define XAE_RAF_MCSTREJ_MASK 0x00000002 /* Reject receive multicast
182 * destination address */
183#define XAE_RAF_BCSTREJ_MASK 0x00000004 /* Reject receive broadcast
184 * destination address */
185#define XAE_RAF_TXVTAGMODE_MASK 0x00000018 /* Tx VLAN TAG mode */
186#define XAE_RAF_RXVTAGMODE_MASK 0x00000060 /* Rx VLAN TAG mode */
187#define XAE_RAF_TXVSTRPMODE_MASK 0x00000180 /* Tx VLAN STRIP mode */
188#define XAE_RAF_RXVSTRPMODE_MASK 0x00000600 /* Rx VLAN STRIP mode */
189#define XAE_RAF_NEWFNCENBL_MASK 0x00000800 /* New function mode */
190#define XAE_RAF_EMULTIFLTRENBL_MASK 0x00001000 /* Exteneded Multicast
191 * Filtering mode
192 */
193#define XAE_RAF_STATSRST_MASK 0x00002000 /* Stats. Counter Reset */
194#define XAE_RAF_RXBADFRMEN_MASK 0x00004000 /* Recv Bad Frame Enable */
195#define XAE_RAF_TXVTAGMODE_SHIFT 3 /* Tx Tag mode shift bits */
196#define XAE_RAF_RXVTAGMODE_SHIFT 5 /* Rx Tag mode shift bits */
197#define XAE_RAF_TXVSTRPMODE_SHIFT 7 /* Tx strip mode shift bits*/
198#define XAE_RAF_RXVSTRPMODE_SHIFT 9 /* Rx Strip mode shift bits*/
199
200/* Bit Masks for Axi Ethernet TPF and IFGP registers */
201#define XAE_TPF_TPFV_MASK 0x0000FFFF /* Tx pause frame value */
202#define XAE_IFGP0_IFGP_MASK 0x0000007F /* Transmit inter-frame
203 * gap adjustment value */
204
205/* Bit Masks for Axi Ethernet IS, IE and IP registers, Same masks apply
206 * for all 3 registers. */
207#define XAE_INT_HARDACSCMPLT_MASK 0x00000001 /* Hard register access
208 * complete */
209#define XAE_INT_AUTONEG_MASK 0x00000002 /* Auto negotiation
210 * complete */
211#define XAE_INT_RXCMPIT_MASK 0x00000004 /* Rx complete */
212#define XAE_INT_RXRJECT_MASK 0x00000008 /* Rx frame rejected */
213#define XAE_INT_RXFIFOOVR_MASK 0x00000010 /* Rx fifo overrun */
214#define XAE_INT_TXCMPIT_MASK 0x00000020 /* Tx complete */
215#define XAE_INT_RXDCMLOCK_MASK 0x00000040 /* Rx Dcm Lock */
216#define XAE_INT_MGTRDY_MASK 0x00000080 /* MGT clock Lock */
217#define XAE_INT_PHYRSTCMPLT_MASK 0x00000100 /* Phy Reset complete */
218#define XAE_INT_ALL_MASK 0x0000003F /* All the ints */
219
220#define XAE_INT_RECV_ERROR_MASK \
221 (XAE_INT_RXRJECT_MASK | XAE_INT_RXFIFOOVR_MASK) /* INT bits that
222 * indicate receive
223 * errors */
224
225/* Bit masks for Axi Ethernet VLAN TPID Word 0 register */
226#define XAE_TPID_0_MASK 0x0000FFFF /* TPID 0 */
227#define XAE_TPID_1_MASK 0xFFFF0000 /* TPID 1 */
228
229/* Bit masks for Axi Ethernet VLAN TPID Word 1 register */
230#define XAE_TPID_2_MASK 0x0000FFFF /* TPID 0 */
231#define XAE_TPID_3_MASK 0xFFFF0000 /* TPID 1 */
232
233/* Bit masks for Axi Ethernet RCW1 register */
234#define XAE_RCW1_RST_MASK 0x80000000 /* Reset */
235#define XAE_RCW1_JUM_MASK 0x40000000 /* Jumbo frame enable */
236#define XAE_RCW1_FCS_MASK 0x20000000 /* In-Band FCS enable
237 * (FCS not stripped) */
238#define XAE_RCW1_RX_MASK 0x10000000 /* Receiver enable */
239#define XAE_RCW1_VLAN_MASK 0x08000000 /* VLAN frame enable */
240#define XAE_RCW1_LT_DIS_MASK 0x02000000 /* Length/type field valid check
241 * disable */
242#define XAE_RCW1_CL_DIS_MASK 0x01000000 /* Control frame Length check
243 * disable */
244#define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF /* Pause frame source address
245 * bits [47:32]. Bits [31:0] are
246 * stored in register RCW0 */
247
248/* Bit masks for Axi Ethernet TC register */
249#define XAE_TC_RST_MASK 0x80000000 /* Reset */
250#define XAE_TC_JUM_MASK 0x40000000 /* Jumbo frame enable */
251#define XAE_TC_FCS_MASK 0x20000000 /* In-Band FCS enable
252 * (FCS not generated) */
253#define XAE_TC_TX_MASK 0x10000000 /* Transmitter enable */
254#define XAE_TC_VLAN_MASK 0x08000000 /* VLAN frame enable */
255#define XAE_TC_IFG_MASK 0x02000000 /* Inter-frame gap adjustment
256 * enable */
257
258/* Bit masks for Axi Ethernet FCC register */
259#define XAE_FCC_FCRX_MASK 0x20000000 /* Rx flow control enable */
260#define XAE_FCC_FCTX_MASK 0x40000000 /* Tx flow control enable */
261
262/* Bit masks for Axi Ethernet EMMC register */
263#define XAE_EMMC_LINKSPEED_MASK 0xC0000000 /* Link speed */
264#define XAE_EMMC_RGMII_MASK 0x20000000 /* RGMII mode enable */
265#define XAE_EMMC_SGMII_MASK 0x10000000 /* SGMII mode enable */
266#define XAE_EMMC_GPCS_MASK 0x08000000 /* 1000BaseX mode enable */
267#define XAE_EMMC_HOST_MASK 0x04000000 /* Host interface enable */
268#define XAE_EMMC_TX16BIT 0x02000000 /* 16 bit Tx client enable */
269#define XAE_EMMC_RX16BIT 0x01000000 /* 16 bit Rx client enable */
270#define XAE_EMMC_LINKSPD_10 0x00000000 /* Link Speed mask for 10 Mbit */
271#define XAE_EMMC_LINKSPD_100 0x40000000 /* Link Speed mask for 100 Mbit */
272#define XAE_EMMC_LINKSPD_1000 0x80000000 /* Link Speed mask for 1000 Mbit */
273
274/* Bit masks for Axi Ethernet PHYC register */
275#define XAE_PHYC_SGMIILINKSPEED_MASK 0xC0000000 /* SGMII link speed mask*/
276#define XAE_PHYC_RGMIILINKSPEED_MASK 0x0000000C /* RGMII link speed */
277#define XAE_PHYC_RGMIIHD_MASK 0x00000002 /* RGMII Half-duplex */
278#define XAE_PHYC_RGMIILINK_MASK 0x00000001 /* RGMII link status */
279#define XAE_PHYC_RGLINKSPD_10 0x00000000 /* RGMII link 10 Mbit */
280#define XAE_PHYC_RGLINKSPD_100 0x00000004 /* RGMII link 100 Mbit */
281#define XAE_PHYC_RGLINKSPD_1000 0x00000008 /* RGMII link 1000 Mbit */
282#define XAE_PHYC_SGLINKSPD_10 0x00000000 /* SGMII link 10 Mbit */
283#define XAE_PHYC_SGLINKSPD_100 0x40000000 /* SGMII link 100 Mbit */
284#define XAE_PHYC_SGLINKSPD_1000 0x80000000 /* SGMII link 1000 Mbit */
285
286/* Bit masks for Axi Ethernet MDIO interface MC register */
287#define XAE_MDIO_MC_MDIOEN_MASK 0x00000040 /* MII management enable */
288#define XAE_MDIO_MC_CLOCK_DIVIDE_MAX 0x3F /* Maximum MDIO divisor */
289
290/* Bit masks for Axi Ethernet MDIO interface MCR register */
291#define XAE_MDIO_MCR_PHYAD_MASK 0x1F000000 /* Phy Address Mask */
292#define XAE_MDIO_MCR_PHYAD_SHIFT 24 /* Phy Address Shift */
293#define XAE_MDIO_MCR_REGAD_MASK 0x001F0000 /* Reg Address Mask */
294#define XAE_MDIO_MCR_REGAD_SHIFT 16 /* Reg Address Shift */
295#define XAE_MDIO_MCR_OP_MASK 0x0000C000 /* Operation Code Mask */
296#define XAE_MDIO_MCR_OP_SHIFT 13 /* Operation Code Shift */
297#define XAE_MDIO_MCR_OP_READ_MASK 0x00008000 /* Op Code Read Mask */
298#define XAE_MDIO_MCR_OP_WRITE_MASK 0x00004000 /* Op Code Write Mask */
299#define XAE_MDIO_MCR_INITIATE_MASK 0x00000800 /* Ready Mask */
300#define XAE_MDIO_MCR_READY_MASK 0x00000080 /* Ready Mask */
301
302/* Bit masks for Axi Ethernet MDIO interface MIS, MIP, MIE, MIC registers */
303#define XAE_MDIO_INT_MIIM_RDY_MASK 0x00000001 /* MIIM Interrupt */
304
305/* Bit masks for Axi Ethernet UAW1 register */
306#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF /* Station address bits
307 * [47:32]; Station address
308 * bits [31:0] are stored in
309 * register UAW0 */
310
311/* Bit masks for Axi Ethernet FMI register */
312#define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */
313#define XAE_FMI_IND_MASK 0x00000003 /* Index Mask */
314
315#define XAE_MDIO_DIV_DFT 29 /* Default MDIO clock divisor */
316
317/* Defines for different options for C_PHY_TYPE parameter in Axi Ethernet IP */
318#define XAE_PHY_TYPE_MII 0
319#define XAE_PHY_TYPE_GMII 1
320#define XAE_PHY_TYPE_RGMII_1_3 2
321#define XAE_PHY_TYPE_RGMII_2_0 3
322#define XAE_PHY_TYPE_SGMII 4
323#define XAE_PHY_TYPE_1000BASE_X 5
324
325#define XAE_MULTICAST_CAM_TABLE_NUM 4 /* Total number of entries in the
326 * hardware multicast table. */
327
328/* Axi Ethernet Synthesis features */
329#define XAE_FEATURE_PARTIAL_RX_CSUM (1 << 0)
330#define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1)
331#define XAE_FEATURE_FULL_RX_CSUM (1 << 2)
332#define XAE_FEATURE_FULL_TX_CSUM (1 << 3)
333
334#define XAE_NO_CSUM_OFFLOAD 0
335
336#define XAE_FULL_CSUM_STATUS_MASK 0x00000038
337#define XAE_IP_UDP_CSUM_VALIDATED 0x00000003
338#define XAE_IP_TCP_CSUM_VALIDATED 0x00000002
339
340#define DELAY_OF_ONE_MILLISEC 1000
341
342/**
343 * struct axidma_bd - Axi Dma buffer descriptor layout
344 * @next: MM2S/S2MM Next Descriptor Pointer
345 * @reserved1: Reserved and not used
346 * @phys: MM2S/S2MM Buffer Address
347 * @reserved2: Reserved and not used
348 * @reserved3: Reserved and not used
349 * @reserved4: Reserved and not used
350 * @cntrl: MM2S/S2MM Control value
351 * @status: MM2S/S2MM Status value
352 * @app0: MM2S/S2MM User Application Field 0.
353 * @app1: MM2S/S2MM User Application Field 1.
354 * @app2: MM2S/S2MM User Application Field 2.
355 * @app3: MM2S/S2MM User Application Field 3.
356 * @app4: MM2S/S2MM User Application Field 4.
357 * @sw_id_offset: MM2S/S2MM Sw ID
358 * @reserved5: Reserved and not used
359 * @reserved6: Reserved and not used
360 */
361struct axidma_bd {
362 u32 next; /* Physical address of next buffer descriptor */
363 u32 reserved1;
364 u32 phys;
365 u32 reserved2;
366 u32 reserved3;
367 u32 reserved4;
368 u32 cntrl;
369 u32 status;
370 u32 app0;
371 u32 app1; /* TX start << 16 | insert */
372 u32 app2; /* TX csum seed */
373 u32 app3;
374 u32 app4;
375 u32 sw_id_offset;
376 u32 reserved5;
377 u32 reserved6;
378};
379
380/**
381 * struct axienet_local - axienet private per device data
382 * @ndev: Pointer for net_device to which it will be attached.
383 * @dev: Pointer to device structure
384 * @phy_dev: Pointer to PHY device structure attached to the axienet_local
385 * @phy_node: Pointer to device node structure
386 * @mii_bus: Pointer to MII bus structure
387 * @mdio_irqs: IRQs table for MDIO bus required in mii_bus structure
388 * @regs: Base address for the axienet_local device address space
389 * @dma_regs: Base address for the axidma device address space
390 * @dma_err_tasklet: Tasklet structure to process Axi DMA errors
391 * @tx_irq: Axidma TX IRQ number
392 * @rx_irq: Axidma RX IRQ number
393 * @temac_type: axienet type to identify between soft and hard temac
394 * @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
395 * @options: AxiEthernet option word
396 * @last_link: Phy link state in which the PHY was negotiated earlier
397 * @features: Stores the extended features supported by the axienet hw
398 * @tx_bd_v: Virtual address of the TX buffer descriptor ring
399 * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
400 * @rx_bd_v: Virtual address of the RX buffer descriptor ring
401 * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
402 * @tx_bd_ci: Stores the index of the Tx buffer descriptor in the ring being
403 * accessed currently. Used while alloc. BDs before a TX starts
404 * @tx_bd_tail: Stores the index of the Tx buffer descriptor in the ring being
405 * accessed currently. Used while processing BDs after the TX
406 * completed.
407 * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being
408 * accessed currently.
409 * @max_frm_size: Stores the maximum size of the frame that can be that
410 * Txed/Rxed in the existing hardware. If jumbo option is
411 * supported, the maximum frame size would be 9k. Else it is
412 * 1522 bytes (assuming support for basic VLAN)
413 * @jumbo_support: Stores hardware configuration for jumbo support. If hardware
414 * can handle jumbo packets, this entry will be 1, else 0.
415 */
416struct axienet_local {
417 struct net_device *ndev;
418 struct device *dev;
419
420 /* Connection to PHY device */
421 struct phy_device *phy_dev; /* Pointer to PHY device */
422 struct device_node *phy_node;
423
424 /* MDIO bus data */
425 struct mii_bus *mii_bus; /* MII bus reference */
426 int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */
427
428 /* IO registers, dma functions and IRQs */
429 void __iomem *regs;
430 void __iomem *dma_regs;
431
432 struct tasklet_struct dma_err_tasklet;
433
434 int tx_irq;
435 int rx_irq;
436 u32 temac_type;
437 u32 phy_type;
438
439 u32 options; /* Current options word */
440 u32 last_link;
441 u32 features;
442
443 /* Buffer descriptors */
444 struct axidma_bd *tx_bd_v;
445 dma_addr_t tx_bd_p;
446 struct axidma_bd *rx_bd_v;
447 dma_addr_t rx_bd_p;
448 u32 tx_bd_ci;
449 u32 tx_bd_tail;
450 u32 rx_bd_ci;
451
452 u32 max_frm_size;
453 u32 jumbo_support;
454
455 int csum_offload_on_tx_path;
456 int csum_offload_on_rx_path;
457
458 u32 coalesce_count_rx;
459 u32 coalesce_count_tx;
460};
461
462/**
463 * struct axiethernet_option - Used to set axi ethernet hardware options
464 * @opt: Option to be set.
465 * @reg: Register offset to be written for setting the option
466 * @m_or: Mask to be ORed for setting the option in the register
467 */
468struct axienet_option {
469 u32 opt;
470 u32 reg;
471 u32 m_or;
472};
473
474/**
475 * axienet_ior - Memory mapped Axi Ethernet register read
476 * @lp: Pointer to axienet local structure
477 * @offset: Address offset from the base address of Axi Ethernet core
478 *
479 * returns: The contents of the Axi Ethernet register
480 *
481 * This function returns the contents of the corresponding register.
482 */
483static inline u32 axienet_ior(struct axienet_local *lp, off_t offset)
484{
485 return in_be32(lp->regs + offset);
486}
487
488/**
489 * axienet_iow - Memory mapped Axi Ethernet register write
490 * @lp: Pointer to axienet local structure
491 * @offset: Address offset from the base address of Axi Ethernet core
492 * @value: Value to be written into the Axi Ethernet register
493 *
494 * This function writes the desired value into the corresponding Axi Ethernet
495 * register.
496 */
497static inline void axienet_iow(struct axienet_local *lp, off_t offset,
498 u32 value)
499{
500 out_be32((lp->regs + offset), value);
501}
502
503/* Function prototypes visible in xilinx_axienet_mdio.c for other files */
504int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np);
505int axienet_mdio_wait_until_ready(struct axienet_local *lp);
506void axienet_mdio_teardown(struct axienet_local *lp);
507
508#endif /* XILINX_AXI_ENET_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
new file mode 100644
index 000000000000..a7cf00438a36
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -0,0 +1,1680 @@
1/*
2 * Xilinx Axi Ethernet device driver
3 *
4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
7 * Copyright (c) 2010 Xilinx, Inc. All rights reserved.
8 * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
9 * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
10 *
11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
12 * and Spartan6.
13 *
14 * TODO:
15 * - Add Axi Fifo support.
16 * - Factor out Axi DMA code into separate driver.
17 * - Test and fix basic multicast filtering.
18 * - Add support for extended multicast filtering.
19 * - Test basic VLAN support.
20 * - Add support for extended VLAN support.
21 */
22
23#include <linux/delay.h>
24#include <linux/etherdevice.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/netdevice.h>
28#include <linux/of_mdio.h>
29#include <linux/of_platform.h>
30#include <linux/of_address.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/phy.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36
37#include "xilinx_axienet.h"
38
39/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */
40#define TX_BD_NUM 64
41#define RX_BD_NUM 128
42
43/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
44#define DRIVER_NAME "xaxienet"
45#define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
46#define DRIVER_VERSION "1.00a"
47
48#define AXIENET_REGS_N 32
49
50/* Match table for of_platform binding */
51static struct of_device_id axienet_of_match[] __devinitdata = {
52 { .compatible = "xlnx,axi-ethernet-1.00.a", },
53 { .compatible = "xlnx,axi-ethernet-1.01.a", },
54 { .compatible = "xlnx,axi-ethernet-2.01.a", },
55 {},
56};
57
58MODULE_DEVICE_TABLE(of, axienet_of_match);
59
60/* Option table for setting up Axi Ethernet hardware options */
61static struct axienet_option axienet_options[] = {
62 /* Turn on jumbo packet support for both Rx and Tx */
63 {
64 .opt = XAE_OPTION_JUMBO,
65 .reg = XAE_TC_OFFSET,
66 .m_or = XAE_TC_JUM_MASK,
67 }, {
68 .opt = XAE_OPTION_JUMBO,
69 .reg = XAE_RCW1_OFFSET,
70 .m_or = XAE_RCW1_JUM_MASK,
71 }, { /* Turn on VLAN packet support for both Rx and Tx */
72 .opt = XAE_OPTION_VLAN,
73 .reg = XAE_TC_OFFSET,
74 .m_or = XAE_TC_VLAN_MASK,
75 }, {
76 .opt = XAE_OPTION_VLAN,
77 .reg = XAE_RCW1_OFFSET,
78 .m_or = XAE_RCW1_VLAN_MASK,
79 }, { /* Turn on FCS stripping on receive packets */
80 .opt = XAE_OPTION_FCS_STRIP,
81 .reg = XAE_RCW1_OFFSET,
82 .m_or = XAE_RCW1_FCS_MASK,
83 }, { /* Turn on FCS insertion on transmit packets */
84 .opt = XAE_OPTION_FCS_INSERT,
85 .reg = XAE_TC_OFFSET,
86 .m_or = XAE_TC_FCS_MASK,
87 }, { /* Turn off length/type field checking on receive packets */
88 .opt = XAE_OPTION_LENTYPE_ERR,
89 .reg = XAE_RCW1_OFFSET,
90 .m_or = XAE_RCW1_LT_DIS_MASK,
91 }, { /* Turn on Rx flow control */
92 .opt = XAE_OPTION_FLOW_CONTROL,
93 .reg = XAE_FCC_OFFSET,
94 .m_or = XAE_FCC_FCRX_MASK,
95 }, { /* Turn on Tx flow control */
96 .opt = XAE_OPTION_FLOW_CONTROL,
97 .reg = XAE_FCC_OFFSET,
98 .m_or = XAE_FCC_FCTX_MASK,
99 }, { /* Turn on promiscuous frame filtering */
100 .opt = XAE_OPTION_PROMISC,
101 .reg = XAE_FMI_OFFSET,
102 .m_or = XAE_FMI_PM_MASK,
103 }, { /* Enable transmitter */
104 .opt = XAE_OPTION_TXEN,
105 .reg = XAE_TC_OFFSET,
106 .m_or = XAE_TC_TX_MASK,
107 }, { /* Enable receiver */
108 .opt = XAE_OPTION_RXEN,
109 .reg = XAE_RCW1_OFFSET,
110 .m_or = XAE_RCW1_RX_MASK,
111 },
112 {}
113};
114
115/**
116 * axienet_dma_in32 - Memory mapped Axi DMA register read
117 * @lp: Pointer to axienet local structure
118 * @reg: Address offset from the base address of the Axi DMA core
119 *
120 * returns: The contents of the Axi DMA register
121 *
122 * This function returns the contents of the corresponding Axi DMA register.
123 */
124static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
125{
126 return in_be32(lp->dma_regs + reg);
127}
128
129/**
130 * axienet_dma_out32 - Memory mapped Axi DMA register write.
131 * @lp: Pointer to axienet local structure
132 * @reg: Address offset from the base address of the Axi DMA core
133 * @value: Value to be written into the Axi DMA register
134 *
135 * This function writes the desired value into the corresponding Axi DMA
136 * register.
137 */
138static inline void axienet_dma_out32(struct axienet_local *lp,
139 off_t reg, u32 value)
140{
141 out_be32((lp->dma_regs + reg), value);
142}
143
144/**
145 * axienet_dma_bd_release - Release buffer descriptor rings
146 * @ndev: Pointer to the net_device structure
147 *
148 * This function is used to release the descriptors allocated in
149 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
150 * driver stop api is called.
151 */
152static void axienet_dma_bd_release(struct net_device *ndev)
153{
154 int i;
155 struct axienet_local *lp = netdev_priv(ndev);
156
157 for (i = 0; i < RX_BD_NUM; i++) {
158 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
159 lp->max_frm_size, DMA_FROM_DEVICE);
160 dev_kfree_skb((struct sk_buff *)
161 (lp->rx_bd_v[i].sw_id_offset));
162 }
163
164 if (lp->rx_bd_v) {
165 dma_free_coherent(ndev->dev.parent,
166 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
167 lp->rx_bd_v,
168 lp->rx_bd_p);
169 }
170 if (lp->tx_bd_v) {
171 dma_free_coherent(ndev->dev.parent,
172 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
173 lp->tx_bd_v,
174 lp->tx_bd_p);
175 }
176}
177
178/**
179 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
180 * @ndev: Pointer to the net_device structure
181 *
182 * returns: 0, on success
183 * -ENOMEM, on failure
184 *
185 * This function is called to initialize the Rx and Tx DMA descriptor
186 * rings. This initializes the descriptors with required default values
187 * and is called when Axi Ethernet driver reset is called.
188 */
189static int axienet_dma_bd_init(struct net_device *ndev)
190{
191 u32 cr;
192 int i;
193 struct sk_buff *skb;
194 struct axienet_local *lp = netdev_priv(ndev);
195
196 /* Reset the indexes which are used for accessing the BDs */
197 lp->tx_bd_ci = 0;
198 lp->tx_bd_tail = 0;
199 lp->rx_bd_ci = 0;
200
201 /*
202 * Allocate the Tx and Rx buffer descriptors.
203 */
204 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
205 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
206 &lp->tx_bd_p,
207 GFP_KERNEL);
208 if (!lp->tx_bd_v) {
209 dev_err(&ndev->dev, "unable to allocate DMA Tx buffer "
210 "descriptors");
211 goto out;
212 }
213
214 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
215 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
216 &lp->rx_bd_p,
217 GFP_KERNEL);
218 if (!lp->rx_bd_v) {
219 dev_err(&ndev->dev, "unable to allocate DMA Rx buffer "
220 "descriptors");
221 goto out;
222 }
223
224 memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
225 for (i = 0; i < TX_BD_NUM; i++) {
226 lp->tx_bd_v[i].next = lp->tx_bd_p +
227 sizeof(*lp->tx_bd_v) *
228 ((i + 1) % TX_BD_NUM);
229 }
230
231 memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
232 for (i = 0; i < RX_BD_NUM; i++) {
233 lp->rx_bd_v[i].next = lp->rx_bd_p +
234 sizeof(*lp->rx_bd_v) *
235 ((i + 1) % RX_BD_NUM);
236
237 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
238 if (!skb) {
239 dev_err(&ndev->dev, "alloc_skb error %d\n", i);
240 goto out;
241 }
242
243 lp->rx_bd_v[i].sw_id_offset = (u32) skb;
244 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
245 skb->data,
246 lp->max_frm_size,
247 DMA_FROM_DEVICE);
248 lp->rx_bd_v[i].cntrl = lp->max_frm_size;
249 }
250
251 /* Start updating the Rx channel control register */
252 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
253 /* Update the interrupt coalesce count */
254 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
255 ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
256 /* Update the delay timer count */
257 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
258 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
259 /* Enable coalesce, delay timer and error interrupts */
260 cr |= XAXIDMA_IRQ_ALL_MASK;
261 /* Write to the Rx channel control register */
262 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
263
264 /* Start updating the Tx channel control register */
265 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
266 /* Update the interrupt coalesce count */
267 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
268 ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
269 /* Update the delay timer count */
270 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
271 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
272 /* Enable coalesce, delay timer and error interrupts */
273 cr |= XAXIDMA_IRQ_ALL_MASK;
274 /* Write to the Tx channel control register */
275 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
276
277 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
278 * halted state. This will make the Rx side ready for reception.*/
279 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
280 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
281 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
282 cr | XAXIDMA_CR_RUNSTOP_MASK);
283 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
284 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
285
286 /* Write to the RS (Run-stop) bit in the Tx channel control register.
287 * Tx channel is now ready to run. But only after we write to the
288 * tail pointer register that the Tx channel will start transmitting */
289 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
290 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
291 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
292 cr | XAXIDMA_CR_RUNSTOP_MASK);
293
294 return 0;
295out:
296 axienet_dma_bd_release(ndev);
297 return -ENOMEM;
298}
299
300/**
301 * axienet_set_mac_address - Write the MAC address
302 * @ndev: Pointer to the net_device structure
303 * @address: 6 byte Address to be written as MAC address
304 *
305 * This function is called to initialize the MAC address of the Axi Ethernet
306 * core. It writes to the UAW0 and UAW1 registers of the core.
307 */
308static void axienet_set_mac_address(struct net_device *ndev, void *address)
309{
310 struct axienet_local *lp = netdev_priv(ndev);
311
312 if (address)
313 memcpy(ndev->dev_addr, address, ETH_ALEN);
314 if (!is_valid_ether_addr(ndev->dev_addr))
315 random_ether_addr(ndev->dev_addr);
316
317 /* Set up unicast MAC address filter set its mac address */
318 axienet_iow(lp, XAE_UAW0_OFFSET,
319 (ndev->dev_addr[0]) |
320 (ndev->dev_addr[1] << 8) |
321 (ndev->dev_addr[2] << 16) |
322 (ndev->dev_addr[3] << 24));
323 axienet_iow(lp, XAE_UAW1_OFFSET,
324 (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
325 ~XAE_UAW1_UNICASTADDR_MASK) |
326 (ndev->dev_addr[4] |
327 (ndev->dev_addr[5] << 8))));
328}
329
330/**
331 * netdev_set_mac_address - Write the MAC address (from outside the driver)
332 * @ndev: Pointer to the net_device structure
333 * @p: 6 byte Address to be written as MAC address
334 *
335 * returns: 0 for all conditions. Presently, there is no failure case.
336 *
337 * This function is called to initialize the MAC address of the Axi Ethernet
338 * core. It calls the core specific axienet_set_mac_address. This is the
339 * function that goes into net_device_ops structure entry ndo_set_mac_address.
340 */
341static int netdev_set_mac_address(struct net_device *ndev, void *p)
342{
343 struct sockaddr *addr = p;
344 axienet_set_mac_address(ndev, addr->sa_data);
345 return 0;
346}
347
348/**
349 * axienet_set_multicast_list - Prepare the multicast table
350 * @ndev: Pointer to the net_device structure
351 *
352 * This function is called to initialize the multicast table during
353 * initialization. The Axi Ethernet basic multicast support has a four-entry
354 * multicast table which is initialized here. Additionally this function
355 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
356 * means whenever the multicast table entries need to be updated this
357 * function gets called.
358 */
359static void axienet_set_multicast_list(struct net_device *ndev)
360{
361 int i;
362 u32 reg, af0reg, af1reg;
363 struct axienet_local *lp = netdev_priv(ndev);
364
365 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
366 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
367 /* We must make the kernel realize we had to move into
368 * promiscuous mode. If it was a promiscuous mode request
369 * the flag is already set. If not we set it. */
370 ndev->flags |= IFF_PROMISC;
371 reg = axienet_ior(lp, XAE_FMI_OFFSET);
372 reg |= XAE_FMI_PM_MASK;
373 axienet_iow(lp, XAE_FMI_OFFSET, reg);
374 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
375 } else if (!netdev_mc_empty(ndev)) {
376 struct netdev_hw_addr *ha;
377
378 i = 0;
379 netdev_for_each_mc_addr(ha, ndev) {
380 if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
381 break;
382
383 af0reg = (ha->addr[0]);
384 af0reg |= (ha->addr[1] << 8);
385 af0reg |= (ha->addr[2] << 16);
386 af0reg |= (ha->addr[3] << 24);
387
388 af1reg = (ha->addr[4]);
389 af1reg |= (ha->addr[5] << 8);
390
391 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
392 reg |= i;
393
394 axienet_iow(lp, XAE_FMI_OFFSET, reg);
395 axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
396 axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
397 i++;
398 }
399 } else {
400 reg = axienet_ior(lp, XAE_FMI_OFFSET);
401 reg &= ~XAE_FMI_PM_MASK;
402
403 axienet_iow(lp, XAE_FMI_OFFSET, reg);
404
405 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
406 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
407 reg |= i;
408
409 axienet_iow(lp, XAE_FMI_OFFSET, reg);
410 axienet_iow(lp, XAE_AF0_OFFSET, 0);
411 axienet_iow(lp, XAE_AF1_OFFSET, 0);
412 }
413
414 dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
415 }
416}
417
418/**
419 * axienet_setoptions - Set an Axi Ethernet option
420 * @ndev: Pointer to the net_device structure
421 * @options: Option to be enabled/disabled
422 *
423 * The Axi Ethernet core has multiple features which can be selectively turned
424 * on or off. The typical options could be jumbo frame option, basic VLAN
425 * option, promiscuous mode option etc. This function is used to set or clear
426 * these options in the Axi Ethernet hardware. This is done through
427 * axienet_option structure .
428 */
429static void axienet_setoptions(struct net_device *ndev, u32 options)
430{
431 int reg;
432 struct axienet_local *lp = netdev_priv(ndev);
433 struct axienet_option *tp = &axienet_options[0];
434
435 while (tp->opt) {
436 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
437 if (options & tp->opt)
438 reg |= tp->m_or;
439 axienet_iow(lp, tp->reg, reg);
440 tp++;
441 }
442
443 lp->options |= options;
444}
445
446static void __axienet_device_reset(struct axienet_local *lp,
447 struct device *dev, off_t offset)
448{
449 u32 timeout;
450 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
451 * process of Axi DMA takes a while to complete as all pending
452 * commands/transfers will be flushed or completed during this
453 * reset process. */
454 axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
455 timeout = DELAY_OF_ONE_MILLISEC;
456 while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
457 udelay(1);
458 if (--timeout == 0) {
459 dev_err(dev, "axienet_device_reset DMA "
460 "reset timeout!\n");
461 break;
462 }
463 }
464}
465
466/**
467 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
468 * @ndev: Pointer to the net_device structure
469 *
470 * This function is called to reset and initialize the Axi Ethernet core. This
471 * is typically called during initialization. It does a reset of the Axi DMA
472 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
473 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi
474 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
475 * core.
476 */
477static void axienet_device_reset(struct net_device *ndev)
478{
479 u32 axienet_status;
480 struct axienet_local *lp = netdev_priv(ndev);
481
482 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
483 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
484
485 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
486 lp->options &= (~XAE_OPTION_JUMBO);
487
488 if ((ndev->mtu > XAE_MTU) &&
489 (ndev->mtu <= XAE_JUMBO_MTU) &&
490 (lp->jumbo_support)) {
491 lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE +
492 XAE_TRL_SIZE;
493 lp->options |= XAE_OPTION_JUMBO;
494 }
495
496 if (axienet_dma_bd_init(ndev)) {
497 dev_err(&ndev->dev, "axienet_device_reset descriptor "
498 "allocation failed\n");
499 }
500
501 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
502 axienet_status &= ~XAE_RCW1_RX_MASK;
503 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
504
505 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
506 if (axienet_status & XAE_INT_RXRJECT_MASK)
507 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
508
509 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
510
511 /* Sync default options with HW but leave receiver and
512 * transmitter disabled.*/
513 axienet_setoptions(ndev, lp->options &
514 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
515 axienet_set_mac_address(ndev, NULL);
516 axienet_set_multicast_list(ndev);
517 axienet_setoptions(ndev, lp->options);
518
519 ndev->trans_start = jiffies;
520}
521
522/**
523 * axienet_adjust_link - Adjust the PHY link speed/duplex.
524 * @ndev: Pointer to the net_device structure
525 *
526 * This function is called to change the speed and duplex setting after
527 * auto negotiation is done by the PHY. This is the function that gets
528 * registered with the PHY interface through the "of_phy_connect" call.
529 */
530static void axienet_adjust_link(struct net_device *ndev)
531{
532 u32 emmc_reg;
533 u32 link_state;
534 u32 setspeed = 1;
535 struct axienet_local *lp = netdev_priv(ndev);
536 struct phy_device *phy = lp->phy_dev;
537
538 link_state = phy->speed | (phy->duplex << 1) | phy->link;
539 if (lp->last_link != link_state) {
540 if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) {
541 if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X)
542 setspeed = 0;
543 } else {
544 if ((phy->speed == SPEED_1000) &&
545 (lp->phy_type == XAE_PHY_TYPE_MII))
546 setspeed = 0;
547 }
548
549 if (setspeed == 1) {
550 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
551 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
552
553 switch (phy->speed) {
554 case SPEED_1000:
555 emmc_reg |= XAE_EMMC_LINKSPD_1000;
556 break;
557 case SPEED_100:
558 emmc_reg |= XAE_EMMC_LINKSPD_100;
559 break;
560 case SPEED_10:
561 emmc_reg |= XAE_EMMC_LINKSPD_10;
562 break;
563 default:
564 dev_err(&ndev->dev, "Speed other than 10, 100 "
565 "or 1Gbps is not supported\n");
566 break;
567 }
568
569 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
570 lp->last_link = link_state;
571 phy_print_status(phy);
572 } else {
573 dev_err(&ndev->dev, "Error setting Axi Ethernet "
574 "mac speed\n");
575 }
576 }
577}
578
579/**
580 * axienet_start_xmit_done - Invoked once a transmit is completed by the
581 * Axi DMA Tx channel.
582 * @ndev: Pointer to the net_device structure
583 *
584 * This function is invoked from the Axi DMA Tx isr to notify the completion
585 * of transmit operation. It clears fields in the corresponding Tx BDs and
586 * unmaps the corresponding buffer so that CPU can regain ownership of the
587 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
588 * required.
589 */
590static void axienet_start_xmit_done(struct net_device *ndev)
591{
592 u32 size = 0;
593 u32 packets = 0;
594 struct axienet_local *lp = netdev_priv(ndev);
595 struct axidma_bd *cur_p;
596 unsigned int status = 0;
597
598 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
599 status = cur_p->status;
600 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
601 dma_unmap_single(ndev->dev.parent, cur_p->phys,
602 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
603 DMA_TO_DEVICE);
604 if (cur_p->app4)
605 dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
606 /*cur_p->phys = 0;*/
607 cur_p->app0 = 0;
608 cur_p->app1 = 0;
609 cur_p->app2 = 0;
610 cur_p->app4 = 0;
611 cur_p->status = 0;
612
613 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
614 packets++;
615
616 lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM;
617 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
618 status = cur_p->status;
619 }
620
621 ndev->stats.tx_packets += packets;
622 ndev->stats.tx_bytes += size;
623 netif_wake_queue(ndev);
624}
625
626/**
627 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
628 * @lp: Pointer to the axienet_local structure
629 * @num_frag: The number of BDs to check for
630 *
631 * returns: 0, on success
632 * NETDEV_TX_BUSY, if any of the descriptors are not free
633 *
634 * This function is invoked before BDs are allocated and transmission starts.
635 * This function returns 0 if a BD or group of BDs can be allocated for
636 * transmission. If the BD or any of the BDs are not free the function
637 * returns a busy status. This is invoked from axienet_start_xmit.
638 */
639static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
640 int num_frag)
641{
642 struct axidma_bd *cur_p;
643 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM];
644 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
645 return NETDEV_TX_BUSY;
646 return 0;
647}
648
649/**
650 * axienet_start_xmit - Starts the transmission.
651 * @skb: sk_buff pointer that contains data to be Txed.
652 * @ndev: Pointer to net_device structure.
653 *
654 * returns: NETDEV_TX_OK, on success
655 * NETDEV_TX_BUSY, if any of the descriptors are not free
656 *
657 * This function is invoked from upper layers to initiate transmission. The
658 * function uses the next available free BDs and populates their fields to
659 * start the transmission. Additionally if checksum offloading is supported,
660 * it populates AXI Stream Control fields with appropriate values.
661 */
662static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
663{
664 u32 ii;
665 u32 num_frag;
666 u32 csum_start_off;
667 u32 csum_index_off;
668 skb_frag_t *frag;
669 dma_addr_t tail_p;
670 struct axienet_local *lp = netdev_priv(ndev);
671 struct axidma_bd *cur_p;
672
673 num_frag = skb_shinfo(skb)->nr_frags;
674 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
675
676 if (axienet_check_tx_bd_space(lp, num_frag)) {
677 if (!netif_queue_stopped(ndev))
678 netif_stop_queue(ndev);
679 return NETDEV_TX_BUSY;
680 }
681
682 if (skb->ip_summed == CHECKSUM_PARTIAL) {
683 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
684 /* Tx Full Checksum Offload Enabled */
685 cur_p->app0 |= 2;
686 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
687 csum_start_off = skb_transport_offset(skb);
688 csum_index_off = csum_start_off + skb->csum_offset;
689 /* Tx Partial Checksum Offload Enabled */
690 cur_p->app0 |= 1;
691 cur_p->app1 = (csum_start_off << 16) | csum_index_off;
692 }
693 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
694 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
695 }
696
697 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
698 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
699 skb_headlen(skb), DMA_TO_DEVICE);
700
701 for (ii = 0; ii < num_frag; ii++) {
702 lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
703 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
704 frag = &skb_shinfo(skb)->frags[ii];
705 cur_p->phys = dma_map_single(ndev->dev.parent,
706 skb_frag_address(frag),
707 skb_frag_size(frag),
708 DMA_TO_DEVICE);
709 cur_p->cntrl = skb_frag_size(frag);
710 }
711
712 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
713 cur_p->app4 = (unsigned long)skb;
714
715 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
716 /* Start the transfer */
717 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
718 lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
719
720 return NETDEV_TX_OK;
721}
722
723/**
724 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received
725 * BD processing.
726 * @ndev: Pointer to net_device structure.
727 *
728 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
729 * does minimal processing and invokes "netif_rx" to complete further
730 * processing.
731 */
732static void axienet_recv(struct net_device *ndev)
733{
734 u32 length;
735 u32 csumstatus;
736 u32 size = 0;
737 u32 packets = 0;
738 dma_addr_t tail_p;
739 struct axienet_local *lp = netdev_priv(ndev);
740 struct sk_buff *skb, *new_skb;
741 struct axidma_bd *cur_p;
742
743 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
744 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
745
746 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
747 skb = (struct sk_buff *) (cur_p->sw_id_offset);
748 length = cur_p->app4 & 0x0000FFFF;
749
750 dma_unmap_single(ndev->dev.parent, cur_p->phys,
751 lp->max_frm_size,
752 DMA_FROM_DEVICE);
753
754 skb_put(skb, length);
755 skb->protocol = eth_type_trans(skb, ndev);
756 /*skb_checksum_none_assert(skb);*/
757 skb->ip_summed = CHECKSUM_NONE;
758
759 /* if we're doing Rx csum offload, set it up */
760 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
761 csumstatus = (cur_p->app2 &
762 XAE_FULL_CSUM_STATUS_MASK) >> 3;
763 if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
764 (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
765 skb->ip_summed = CHECKSUM_UNNECESSARY;
766 }
767 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
768 skb->protocol == __constant_htons(ETH_P_IP) &&
769 skb->len > 64) {
770 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
771 skb->ip_summed = CHECKSUM_COMPLETE;
772 }
773
774 netif_rx(skb);
775
776 size += length;
777 packets++;
778
779 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
780 if (!new_skb) {
781 dev_err(&ndev->dev, "no memory for new sk_buff\n");
782 return;
783 }
784 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
785 lp->max_frm_size,
786 DMA_FROM_DEVICE);
787 cur_p->cntrl = lp->max_frm_size;
788 cur_p->status = 0;
789 cur_p->sw_id_offset = (u32) new_skb;
790
791 lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM;
792 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
793 }
794
795 ndev->stats.rx_packets += packets;
796 ndev->stats.rx_bytes += size;
797
798 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
799}
800
801/**
802 * axienet_tx_irq - Tx Done Isr.
803 * @irq: irq number
804 * @_ndev: net_device pointer
805 *
806 * returns: IRQ_HANDLED for all cases.
807 *
808 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
809 * to complete the BD processing.
810 */
811static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
812{
813 u32 cr;
814 unsigned int status;
815 struct net_device *ndev = _ndev;
816 struct axienet_local *lp = netdev_priv(ndev);
817
818 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
819 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
820 axienet_start_xmit_done(lp->ndev);
821 goto out;
822 }
823 if (!(status & XAXIDMA_IRQ_ALL_MASK))
824 dev_err(&ndev->dev, "No interrupts asserted in Tx path");
825 if (status & XAXIDMA_IRQ_ERROR_MASK) {
826 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
827 dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
828 (lp->tx_bd_v[lp->tx_bd_ci]).phys);
829
830 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
831 /* Disable coalesce, delay timer and error interrupts */
832 cr &= (~XAXIDMA_IRQ_ALL_MASK);
833 /* Write to the Tx channel control register */
834 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
835
836 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
837 /* Disable coalesce, delay timer and error interrupts */
838 cr &= (~XAXIDMA_IRQ_ALL_MASK);
839 /* Write to the Rx channel control register */
840 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
841
842 tasklet_schedule(&lp->dma_err_tasklet);
843 }
844out:
845 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
846 return IRQ_HANDLED;
847}
848
849/**
850 * axienet_rx_irq - Rx Isr.
851 * @irq: irq number
852 * @_ndev: net_device pointer
853 *
854 * returns: IRQ_HANDLED for all cases.
855 *
856 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
857 * processing.
858 */
859static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
860{
861 u32 cr;
862 unsigned int status;
863 struct net_device *ndev = _ndev;
864 struct axienet_local *lp = netdev_priv(ndev);
865
866 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
867 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
868 axienet_recv(lp->ndev);
869 goto out;
870 }
871 if (!(status & XAXIDMA_IRQ_ALL_MASK))
872 dev_err(&ndev->dev, "No interrupts asserted in Rx path");
873 if (status & XAXIDMA_IRQ_ERROR_MASK) {
874 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
875 dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
876 (lp->rx_bd_v[lp->rx_bd_ci]).phys);
877
878 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
879 /* Disable coalesce, delay timer and error interrupts */
880 cr &= (~XAXIDMA_IRQ_ALL_MASK);
881 /* Finally write to the Tx channel control register */
882 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
883
884 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
885 /* Disable coalesce, delay timer and error interrupts */
886 cr &= (~XAXIDMA_IRQ_ALL_MASK);
887 /* write to the Rx channel control register */
888 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
889
890 tasklet_schedule(&lp->dma_err_tasklet);
891 }
892out:
893 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
894 return IRQ_HANDLED;
895}
896
897/**
898 * axienet_open - Driver open routine.
899 * @ndev: Pointer to net_device structure
900 *
901 * returns: 0, on success.
902 * -ENODEV, if PHY cannot be connected to
903 * non-zero error value on failure
904 *
905 * This is the driver open routine. It calls phy_start to start the PHY device.
906 * It also allocates interrupt service routines, enables the interrupt lines
907 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
908 * descriptors are initialized.
909 */
910static int axienet_open(struct net_device *ndev)
911{
912 int ret, mdio_mcreg;
913 struct axienet_local *lp = netdev_priv(ndev);
914
915 dev_dbg(&ndev->dev, "axienet_open()\n");
916
917 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
918 ret = axienet_mdio_wait_until_ready(lp);
919 if (ret < 0)
920 return ret;
921 /* Disable the MDIO interface till Axi Ethernet Reset is completed.
922 * When we do an Axi Ethernet reset, it resets the complete core
923 * including the MDIO. If MDIO is not disabled when the reset
924 * process is started, MDIO will be broken afterwards. */
925 axienet_iow(lp, XAE_MDIO_MC_OFFSET,
926 (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
927 axienet_device_reset(ndev);
928 /* Enable the MDIO */
929 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
930 ret = axienet_mdio_wait_until_ready(lp);
931 if (ret < 0)
932 return ret;
933
934 if (lp->phy_node) {
935 lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
936 axienet_adjust_link, 0,
937 PHY_INTERFACE_MODE_GMII);
938 if (!lp->phy_dev) {
939 dev_err(lp->dev, "of_phy_connect() failed\n");
940 return -ENODEV;
941 }
942 phy_start(lp->phy_dev);
943 }
944
945 /* Enable interrupts for Axi DMA Tx */
946 ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
947 if (ret)
948 goto err_tx_irq;
949 /* Enable interrupts for Axi DMA Rx */
950 ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
951 if (ret)
952 goto err_rx_irq;
953 /* Enable tasklets for Axi DMA error handling */
954 tasklet_enable(&lp->dma_err_tasklet);
955 return 0;
956
957err_rx_irq:
958 free_irq(lp->tx_irq, ndev);
959err_tx_irq:
960 if (lp->phy_dev)
961 phy_disconnect(lp->phy_dev);
962 lp->phy_dev = NULL;
963 dev_err(lp->dev, "request_irq() failed\n");
964 return ret;
965}
966
967/**
968 * axienet_stop - Driver stop routine.
969 * @ndev: Pointer to net_device structure
970 *
971 * returns: 0, on success.
972 *
973 * This is the driver stop routine. It calls phy_disconnect to stop the PHY
974 * device. It also removes the interrupt handlers and disables the interrupts.
975 * The Axi DMA Tx/Rx BDs are released.
976 */
977static int axienet_stop(struct net_device *ndev)
978{
979 u32 cr;
980 struct axienet_local *lp = netdev_priv(ndev);
981
982 dev_dbg(&ndev->dev, "axienet_close()\n");
983
984 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
985 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
986 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
987 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
988 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
989 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
990 axienet_setoptions(ndev, lp->options &
991 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
992
993 tasklet_disable(&lp->dma_err_tasklet);
994
995 free_irq(lp->tx_irq, ndev);
996 free_irq(lp->rx_irq, ndev);
997
998 if (lp->phy_dev)
999 phy_disconnect(lp->phy_dev);
1000 lp->phy_dev = NULL;
1001
1002 axienet_dma_bd_release(ndev);
1003 return 0;
1004}
1005
1006/**
1007 * axienet_change_mtu - Driver change mtu routine.
1008 * @ndev: Pointer to net_device structure
1009 * @new_mtu: New mtu value to be applied
1010 *
1011 * returns: Always returns 0 (success).
1012 *
1013 * This is the change mtu driver routine. It checks if the Axi Ethernet
1014 * hardware supports jumbo frames before changing the mtu. This can be
1015 * called only when the device is not up.
1016 */
1017static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1018{
1019 struct axienet_local *lp = netdev_priv(ndev);
1020
1021 if (netif_running(ndev))
1022 return -EBUSY;
1023 if (lp->jumbo_support) {
1024 if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
1025 return -EINVAL;
1026 ndev->mtu = new_mtu;
1027 } else {
1028 if ((new_mtu > XAE_MTU) || (new_mtu < 64))
1029 return -EINVAL;
1030 ndev->mtu = new_mtu;
1031 }
1032
1033 return 0;
1034}
1035
1036#ifdef CONFIG_NET_POLL_CONTROLLER
1037/**
1038 * axienet_poll_controller - Axi Ethernet poll mechanism.
1039 * @ndev: Pointer to net_device structure
1040 *
1041 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1042 * to polling the ISRs and are enabled back after the polling is done.
1043 */
1044static void axienet_poll_controller(struct net_device *ndev)
1045{
1046 struct axienet_local *lp = netdev_priv(ndev);
1047 disable_irq(lp->tx_irq);
1048 disable_irq(lp->rx_irq);
1049 axienet_rx_irq(lp->tx_irq, ndev);
1050 axienet_tx_irq(lp->rx_irq, ndev);
1051 enable_irq(lp->tx_irq);
1052 enable_irq(lp->rx_irq);
1053}
1054#endif
1055
1056static const struct net_device_ops axienet_netdev_ops = {
1057 .ndo_open = axienet_open,
1058 .ndo_stop = axienet_stop,
1059 .ndo_start_xmit = axienet_start_xmit,
1060 .ndo_change_mtu = axienet_change_mtu,
1061 .ndo_set_mac_address = netdev_set_mac_address,
1062 .ndo_validate_addr = eth_validate_addr,
1063 .ndo_set_rx_mode = axienet_set_multicast_list,
1064#ifdef CONFIG_NET_POLL_CONTROLLER
1065 .ndo_poll_controller = axienet_poll_controller,
1066#endif
1067};
1068
1069/**
1070 * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY.
1071 * @ndev: Pointer to net_device structure
1072 * @ecmd: Pointer to ethtool_cmd structure
1073 *
1074 * This implements ethtool command for getting PHY settings. If PHY could
1075 * not be found, the function returns -ENODEV. This function calls the
1076 * relevant PHY ethtool API to get the PHY settings.
1077 * Issue "ethtool ethX" under linux prompt to execute this function.
1078 */
1079static int axienet_ethtools_get_settings(struct net_device *ndev,
1080 struct ethtool_cmd *ecmd)
1081{
1082 struct axienet_local *lp = netdev_priv(ndev);
1083 struct phy_device *phydev = lp->phy_dev;
1084 if (!phydev)
1085 return -ENODEV;
1086 return phy_ethtool_gset(phydev, ecmd);
1087}
1088
1089/**
1090 * axienet_ethtools_set_settings - Set PHY settings as passed in the argument.
1091 * @ndev: Pointer to net_device structure
1092 * @ecmd: Pointer to ethtool_cmd structure
1093 *
1094 * This implements ethtool command for setting various PHY settings. If PHY
1095 * could not be found, the function returns -ENODEV. This function calls the
1096 * relevant PHY ethtool API to set the PHY.
1097 * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
1098 * function.
1099 */
1100static int axienet_ethtools_set_settings(struct net_device *ndev,
1101 struct ethtool_cmd *ecmd)
1102{
1103 struct axienet_local *lp = netdev_priv(ndev);
1104 struct phy_device *phydev = lp->phy_dev;
1105 if (!phydev)
1106 return -ENODEV;
1107 return phy_ethtool_sset(phydev, ecmd);
1108}
1109
1110/**
1111 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1112 * @ndev: Pointer to net_device structure
1113 * @ed: Pointer to ethtool_drvinfo structure
1114 *
1115 * This implements ethtool command for getting the driver information.
1116 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1117 */
1118static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1119 struct ethtool_drvinfo *ed)
1120{
1121 memset(ed, 0, sizeof(struct ethtool_drvinfo));
1122 strcpy(ed->driver, DRIVER_NAME);
1123 strcpy(ed->version, DRIVER_VERSION);
1124 ed->regdump_len = sizeof(u32) * AXIENET_REGS_N;
1125}
1126
1127/**
1128 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1129 * AxiEthernet core.
1130 * @ndev: Pointer to net_device structure
1131 *
1132 * This implements ethtool command for getting the total register length
1133 * information.
1134 */
1135static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1136{
1137 return sizeof(u32) * AXIENET_REGS_N;
1138}
1139
1140/**
1141 * axienet_ethtools_get_regs - Dump the contents of all registers present
1142 * in AxiEthernet core.
1143 * @ndev: Pointer to net_device structure
1144 * @regs: Pointer to ethtool_regs structure
1145 * @ret: Void pointer used to return the contents of the registers.
1146 *
1147 * This implements ethtool command for getting the Axi Ethernet register dump.
1148 * Issue "ethtool -d ethX" to execute this function.
1149 */
1150static void axienet_ethtools_get_regs(struct net_device *ndev,
1151 struct ethtool_regs *regs, void *ret)
1152{
1153 u32 *data = (u32 *) ret;
1154 size_t len = sizeof(u32) * AXIENET_REGS_N;
1155 struct axienet_local *lp = netdev_priv(ndev);
1156
1157 regs->version = 0;
1158 regs->len = len;
1159
1160 memset(data, 0, len);
1161 data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1162 data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1163 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1164 data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1165 data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1166 data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1167 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1168 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1169 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1170 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1171 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1172 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1173 data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1174 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1175 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1176 data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1177 data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1178 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1179 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1180 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1181 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1182 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1183 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1184 data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
1185 data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
1186 data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
1187 data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
1188 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1189 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1190 data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1191 data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1192 data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1193}
1194
1195/**
1196 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1197 * Tx and Rx paths.
1198 * @ndev: Pointer to net_device structure
1199 * @epauseparm: Pointer to ethtool_pauseparam structure.
1200 *
1201 * This implements ethtool command for getting axi ethernet pause frame
1202 * setting. Issue "ethtool -a ethX" to execute this function.
1203 */
1204static void
1205axienet_ethtools_get_pauseparam(struct net_device *ndev,
1206 struct ethtool_pauseparam *epauseparm)
1207{
1208 u32 regval;
1209 struct axienet_local *lp = netdev_priv(ndev);
1210 epauseparm->autoneg = 0;
1211 regval = axienet_ior(lp, XAE_FCC_OFFSET);
1212 epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
1213 epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK;
1214}
1215
1216/**
1217 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1218 * settings.
1219 * @ndev: Pointer to net_device structure
1220 * @epauseparam:Pointer to ethtool_pauseparam structure
1221 *
1222 * This implements ethtool command for enabling flow control on Rx and Tx
1223 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1224 * function.
1225 */
1226static int
1227axienet_ethtools_set_pauseparam(struct net_device *ndev,
1228 struct ethtool_pauseparam *epauseparm)
1229{
1230 u32 regval = 0;
1231 struct axienet_local *lp = netdev_priv(ndev);
1232
1233 if (netif_running(ndev)) {
1234 printk(KERN_ERR "%s: Please stop netif before applying "
1235 "configruation\n", ndev->name);
1236 return -EFAULT;
1237 }
1238
1239 regval = axienet_ior(lp, XAE_FCC_OFFSET);
1240 if (epauseparm->tx_pause)
1241 regval |= XAE_FCC_FCTX_MASK;
1242 else
1243 regval &= ~XAE_FCC_FCTX_MASK;
1244 if (epauseparm->rx_pause)
1245 regval |= XAE_FCC_FCRX_MASK;
1246 else
1247 regval &= ~XAE_FCC_FCRX_MASK;
1248 axienet_iow(lp, XAE_FCC_OFFSET, regval);
1249
1250 return 0;
1251}
1252
1253/**
1254 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1255 * @ndev: Pointer to net_device structure
1256 * @ecoalesce: Pointer to ethtool_coalesce structure
1257 *
1258 * This implements ethtool command for getting the DMA interrupt coalescing
1259 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
1260 * execute this function.
1261 */
1262static int axienet_ethtools_get_coalesce(struct net_device *ndev,
1263 struct ethtool_coalesce *ecoalesce)
1264{
1265 u32 regval = 0;
1266 struct axienet_local *lp = netdev_priv(ndev);
1267 regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1268 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1269 >> XAXIDMA_COALESCE_SHIFT;
1270 regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1271 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1272 >> XAXIDMA_COALESCE_SHIFT;
1273 return 0;
1274}
1275
1276/**
1277 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
1278 * @ndev: Pointer to net_device structure
1279 * @ecoalesce: Pointer to ethtool_coalesce structure
1280 *
1281 * This implements ethtool command for setting the DMA interrupt coalescing
1282 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
1283 * prompt to execute this function.
1284 */
1285static int axienet_ethtools_set_coalesce(struct net_device *ndev,
1286 struct ethtool_coalesce *ecoalesce)
1287{
1288 struct axienet_local *lp = netdev_priv(ndev);
1289
1290 if (netif_running(ndev)) {
1291 printk(KERN_ERR "%s: Please stop netif before applying "
1292 "configruation\n", ndev->name);
1293 return -EFAULT;
1294 }
1295
1296 if ((ecoalesce->rx_coalesce_usecs) ||
1297 (ecoalesce->rx_coalesce_usecs_irq) ||
1298 (ecoalesce->rx_max_coalesced_frames_irq) ||
1299 (ecoalesce->tx_coalesce_usecs) ||
1300 (ecoalesce->tx_coalesce_usecs_irq) ||
1301 (ecoalesce->tx_max_coalesced_frames_irq) ||
1302 (ecoalesce->stats_block_coalesce_usecs) ||
1303 (ecoalesce->use_adaptive_rx_coalesce) ||
1304 (ecoalesce->use_adaptive_tx_coalesce) ||
1305 (ecoalesce->pkt_rate_low) ||
1306 (ecoalesce->rx_coalesce_usecs_low) ||
1307 (ecoalesce->rx_max_coalesced_frames_low) ||
1308 (ecoalesce->tx_coalesce_usecs_low) ||
1309 (ecoalesce->tx_max_coalesced_frames_low) ||
1310 (ecoalesce->pkt_rate_high) ||
1311 (ecoalesce->rx_coalesce_usecs_high) ||
1312 (ecoalesce->rx_max_coalesced_frames_high) ||
1313 (ecoalesce->tx_coalesce_usecs_high) ||
1314 (ecoalesce->tx_max_coalesced_frames_high) ||
1315 (ecoalesce->rate_sample_interval))
1316 return -EOPNOTSUPP;
1317 if (ecoalesce->rx_max_coalesced_frames)
1318 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
1319 if (ecoalesce->tx_max_coalesced_frames)
1320 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
1321
1322 return 0;
1323}
1324
1325static struct ethtool_ops axienet_ethtool_ops = {
1326 .get_settings = axienet_ethtools_get_settings,
1327 .set_settings = axienet_ethtools_set_settings,
1328 .get_drvinfo = axienet_ethtools_get_drvinfo,
1329 .get_regs_len = axienet_ethtools_get_regs_len,
1330 .get_regs = axienet_ethtools_get_regs,
1331 .get_link = ethtool_op_get_link,
1332 .get_pauseparam = axienet_ethtools_get_pauseparam,
1333 .set_pauseparam = axienet_ethtools_set_pauseparam,
1334 .get_coalesce = axienet_ethtools_get_coalesce,
1335 .set_coalesce = axienet_ethtools_set_coalesce,
1336};
1337
1338/**
1339 * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
1340 * @data: Data passed
1341 *
1342 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
1343 * Tx/Rx BDs.
1344 */
1345static void axienet_dma_err_handler(unsigned long data)
1346{
1347 u32 axienet_status;
1348 u32 cr, i;
1349 int mdio_mcreg;
1350 struct axienet_local *lp = (struct axienet_local *) data;
1351 struct net_device *ndev = lp->ndev;
1352 struct axidma_bd *cur_p;
1353
1354 axienet_setoptions(ndev, lp->options &
1355 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1356 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1357 axienet_mdio_wait_until_ready(lp);
1358 /* Disable the MDIO interface till Axi Ethernet Reset is completed.
1359 * When we do an Axi Ethernet reset, it resets the complete core
1360 * including the MDIO. So if MDIO is not disabled when the reset
1361 * process is started, MDIO will be broken afterwards. */
1362 axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
1363 ~XAE_MDIO_MC_MDIOEN_MASK));
1364
1365 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
1366 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
1367
1368 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
1369 axienet_mdio_wait_until_ready(lp);
1370
1371 for (i = 0; i < TX_BD_NUM; i++) {
1372 cur_p = &lp->tx_bd_v[i];
1373 if (cur_p->phys)
1374 dma_unmap_single(ndev->dev.parent, cur_p->phys,
1375 (cur_p->cntrl &
1376 XAXIDMA_BD_CTRL_LENGTH_MASK),
1377 DMA_TO_DEVICE);
1378 if (cur_p->app4)
1379 dev_kfree_skb_irq((struct sk_buff *) cur_p->app4);
1380 cur_p->phys = 0;
1381 cur_p->cntrl = 0;
1382 cur_p->status = 0;
1383 cur_p->app0 = 0;
1384 cur_p->app1 = 0;
1385 cur_p->app2 = 0;
1386 cur_p->app3 = 0;
1387 cur_p->app4 = 0;
1388 cur_p->sw_id_offset = 0;
1389 }
1390
1391 for (i = 0; i < RX_BD_NUM; i++) {
1392 cur_p = &lp->rx_bd_v[i];
1393 cur_p->status = 0;
1394 cur_p->app0 = 0;
1395 cur_p->app1 = 0;
1396 cur_p->app2 = 0;
1397 cur_p->app3 = 0;
1398 cur_p->app4 = 0;
1399 }
1400
1401 lp->tx_bd_ci = 0;
1402 lp->tx_bd_tail = 0;
1403 lp->rx_bd_ci = 0;
1404
1405 /* Start updating the Rx channel control register */
1406 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1407 /* Update the interrupt coalesce count */
1408 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
1409 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1410 /* Update the delay timer count */
1411 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
1412 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1413 /* Enable coalesce, delay timer and error interrupts */
1414 cr |= XAXIDMA_IRQ_ALL_MASK;
1415 /* Finally write to the Rx channel control register */
1416 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1417
1418 /* Start updating the Tx channel control register */
1419 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1420 /* Update the interrupt coalesce count */
1421 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
1422 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1423 /* Update the delay timer count */
1424 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
1425 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1426 /* Enable coalesce, delay timer and error interrupts */
1427 cr |= XAXIDMA_IRQ_ALL_MASK;
1428 /* Finally write to the Tx channel control register */
1429 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1430
1431 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
1432 * halted state. This will make the Rx side ready for reception.*/
1433 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
1434 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1435 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
1436 cr | XAXIDMA_CR_RUNSTOP_MASK);
1437 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
1438 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
1439
1440 /* Write to the RS (Run-stop) bit in the Tx channel control register.
1441 * Tx channel is now ready to run. But only after we write to the
1442 * tail pointer register that the Tx channel will start transmitting */
1443 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
1444 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1445 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
1446 cr | XAXIDMA_CR_RUNSTOP_MASK);
1447
1448 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
1449 axienet_status &= ~XAE_RCW1_RX_MASK;
1450 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
1451
1452 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
1453 if (axienet_status & XAE_INT_RXRJECT_MASK)
1454 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
1455 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
1456
1457 /* Sync default options with HW but leave receiver and
1458 * transmitter disabled.*/
1459 axienet_setoptions(ndev, lp->options &
1460 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1461 axienet_set_mac_address(ndev, NULL);
1462 axienet_set_multicast_list(ndev);
1463 axienet_setoptions(ndev, lp->options);
1464}
1465
1466/**
1467 * axienet_of_probe - Axi Ethernet probe function.
1468 * @op: Pointer to platform device structure.
1469 * @match: Pointer to device id structure
1470 *
1471 * returns: 0, on success
1472 * Non-zero error value on failure.
1473 *
1474 * This is the probe routine for Axi Ethernet driver. This is called before
1475 * any other driver routines are invoked. It allocates and sets up the Ethernet
1476 * device. Parses through device tree and populates fields of
1477 * axienet_local. It registers the Ethernet device.
1478 */
1479static int __devinit axienet_of_probe(struct platform_device *op)
1480{
1481 __be32 *p;
1482 int size, ret = 0;
1483 struct device_node *np;
1484 struct axienet_local *lp;
1485 struct net_device *ndev;
1486 const void *addr;
1487
1488 ndev = alloc_etherdev(sizeof(*lp));
1489 if (!ndev)
1490 return -ENOMEM;
1491
1492 ether_setup(ndev);
1493 dev_set_drvdata(&op->dev, ndev);
1494
1495 SET_NETDEV_DEV(ndev, &op->dev);
1496 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
1497 ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
1498 ndev->netdev_ops = &axienet_netdev_ops;
1499 ndev->ethtool_ops = &axienet_ethtool_ops;
1500
1501 lp = netdev_priv(ndev);
1502 lp->ndev = ndev;
1503 lp->dev = &op->dev;
1504 lp->options = XAE_OPTION_DEFAULTS;
1505 /* Map device registers */
1506 lp->regs = of_iomap(op->dev.of_node, 0);
1507 if (!lp->regs) {
1508 dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
1509 goto nodev;
1510 }
1511 /* Setup checksum offload, but default to off if not specified */
1512 lp->features = 0;
1513
1514 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
1515 if (p) {
1516 switch (be32_to_cpup(p)) {
1517 case 1:
1518 lp->csum_offload_on_tx_path =
1519 XAE_FEATURE_PARTIAL_TX_CSUM;
1520 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
1521 /* Can checksum TCP/UDP over IPv4. */
1522 ndev->features |= NETIF_F_IP_CSUM;
1523 break;
1524 case 2:
1525 lp->csum_offload_on_tx_path =
1526 XAE_FEATURE_FULL_TX_CSUM;
1527 lp->features |= XAE_FEATURE_FULL_TX_CSUM;
1528 /* Can checksum TCP/UDP over IPv4. */
1529 ndev->features |= NETIF_F_IP_CSUM;
1530 break;
1531 default:
1532 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
1533 }
1534 }
1535 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
1536 if (p) {
1537 switch (be32_to_cpup(p)) {
1538 case 1:
1539 lp->csum_offload_on_rx_path =
1540 XAE_FEATURE_PARTIAL_RX_CSUM;
1541 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
1542 break;
1543 case 2:
1544 lp->csum_offload_on_rx_path =
1545 XAE_FEATURE_FULL_RX_CSUM;
1546 lp->features |= XAE_FEATURE_FULL_RX_CSUM;
1547 break;
1548 default:
1549 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
1550 }
1551 }
1552 /* For supporting jumbo frames, the Axi Ethernet hardware must have
1553 * a larger Rx/Tx Memory. Typically, the size must be more than or
1554 * equal to 16384 bytes, so that we can enable jumbo option and start
1555 * supporting jumbo frames. Here we check for memory allocated for
1556 * Rx/Tx in the hardware from the device-tree and accordingly set
1557 * flags. */
1558 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL);
1559 if (p) {
1560 if ((be32_to_cpup(p)) >= 0x4000)
1561 lp->jumbo_support = 1;
1562 }
1563 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type",
1564 NULL);
1565 if (p)
1566 lp->temac_type = be32_to_cpup(p);
1567 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
1568 if (p)
1569 lp->phy_type = be32_to_cpup(p);
1570
1571 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
1572 np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
1573 if (!np) {
1574 dev_err(&op->dev, "could not find DMA node\n");
1575 goto err_iounmap;
1576 }
1577 lp->dma_regs = of_iomap(np, 0);
1578 if (lp->dma_regs) {
1579 dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs);
1580 } else {
1581 dev_err(&op->dev, "unable to map DMA registers\n");
1582 of_node_put(np);
1583 }
1584 lp->rx_irq = irq_of_parse_and_map(np, 1);
1585 lp->tx_irq = irq_of_parse_and_map(np, 0);
1586 of_node_put(np);
1587 if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
1588 dev_err(&op->dev, "could not determine irqs\n");
1589 ret = -ENOMEM;
1590 goto err_iounmap_2;
1591 }
1592
1593 /* Retrieve the MAC address */
1594 addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
1595 if ((!addr) || (size != 6)) {
1596 dev_err(&op->dev, "could not find MAC address\n");
1597 ret = -ENODEV;
1598 goto err_iounmap_2;
1599 }
1600 axienet_set_mac_address(ndev, (void *) addr);
1601
1602 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
1603 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
1604
1605 lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
1606 ret = axienet_mdio_setup(lp, op->dev.of_node);
1607 if (ret)
1608 dev_warn(&op->dev, "error registering MDIO bus\n");
1609
1610 ret = register_netdev(lp->ndev);
1611 if (ret) {
1612 dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
1613 goto err_iounmap_2;
1614 }
1615
1616 tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
1617 (unsigned long) lp);
1618 tasklet_disable(&lp->dma_err_tasklet);
1619
1620 return 0;
1621
1622err_iounmap_2:
1623 if (lp->dma_regs)
1624 iounmap(lp->dma_regs);
1625err_iounmap:
1626 iounmap(lp->regs);
1627nodev:
1628 free_netdev(ndev);
1629 ndev = NULL;
1630 return ret;
1631}
1632
1633static int __devexit axienet_of_remove(struct platform_device *op)
1634{
1635 struct net_device *ndev = dev_get_drvdata(&op->dev);
1636 struct axienet_local *lp = netdev_priv(ndev);
1637
1638 axienet_mdio_teardown(lp);
1639 unregister_netdev(ndev);
1640
1641 if (lp->phy_node)
1642 of_node_put(lp->phy_node);
1643 lp->phy_node = NULL;
1644
1645 dev_set_drvdata(&op->dev, NULL);
1646
1647 iounmap(lp->regs);
1648 if (lp->dma_regs)
1649 iounmap(lp->dma_regs);
1650 free_netdev(ndev);
1651
1652 return 0;
1653}
1654
1655static struct platform_driver axienet_of_driver = {
1656 .probe = axienet_of_probe,
1657 .remove = __devexit_p(axienet_of_remove),
1658 .driver = {
1659 .owner = THIS_MODULE,
1660 .name = "xilinx_axienet",
1661 .of_match_table = axienet_of_match,
1662 },
1663};
1664
1665static int __init axienet_init(void)
1666{
1667 return platform_driver_register(&axienet_of_driver);
1668}
1669
1670static void __exit axienet_exit(void)
1671{
1672 platform_driver_unregister(&axienet_of_driver);
1673}
1674
1675module_init(axienet_init);
1676module_exit(axienet_exit);
1677
1678MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
1679MODULE_AUTHOR("Xilinx");
1680MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
new file mode 100644
index 000000000000..d70b6e79f6c0
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -0,0 +1,238 @@
1/*
2 * MDIO bus driver for the Xilinx Axi Ethernet device
3 *
4 * Copyright (c) 2009 Secret Lab Technologies, Ltd.
5 * Copyright (c) 2010 Xilinx, Inc. All rights reserved.
6 * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
7 * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
8 */
9
10#include <linux/of_address.h>
11#include <linux/of_mdio.h>
12#include <linux/jiffies.h>
13
14#include "xilinx_axienet.h"
15
16#define MAX_MDIO_FREQ 2500000 /* 2.5 MHz */
17#define DEFAULT_CLOCK_DIVISOR XAE_MDIO_DIV_DFT
18
19/* Wait till MDIO interface is ready to accept a new transaction.*/
20int axienet_mdio_wait_until_ready(struct axienet_local *lp)
21{
22 long end = jiffies + 2;
23 while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) &
24 XAE_MDIO_MCR_READY_MASK)) {
25 if (end - jiffies <= 0) {
26 WARN_ON(1);
27 return -ETIMEDOUT;
28 }
29 udelay(1);
30 }
31 return 0;
32}
33
34/**
35 * axienet_mdio_read - MDIO interface read function
36 * @bus: Pointer to mii bus structure
37 * @phy_id: Address of the PHY device
38 * @reg: PHY register to read
39 *
40 * returns: The register contents on success, -ETIMEDOUT on a timeout
41 *
42 * Reads the contents of the requested register from the requested PHY
43 * address by first writing the details into MCR register. After a while
44 * the register MRD is read to obtain the PHY register content.
45 */
46static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg)
47{
48 u32 rc;
49 int ret;
50 struct axienet_local *lp = bus->priv;
51
52 ret = axienet_mdio_wait_until_ready(lp);
53 if (ret < 0)
54 return ret;
55
56 axienet_iow(lp, XAE_MDIO_MCR_OFFSET,
57 (((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) &
58 XAE_MDIO_MCR_PHYAD_MASK) |
59 ((reg << XAE_MDIO_MCR_REGAD_SHIFT) &
60 XAE_MDIO_MCR_REGAD_MASK) |
61 XAE_MDIO_MCR_INITIATE_MASK |
62 XAE_MDIO_MCR_OP_READ_MASK));
63
64 ret = axienet_mdio_wait_until_ready(lp);
65 if (ret < 0)
66 return ret;
67
68 rc = axienet_ior(lp, XAE_MDIO_MRD_OFFSET) & 0x0000FFFF;
69
70 dev_dbg(lp->dev, "axienet_mdio_read(phy_id=%i, reg=%x) == %x\n",
71 phy_id, reg, rc);
72
73 return rc;
74}
75
76/**
77 * axienet_mdio_write - MDIO interface write function
78 * @bus: Pointer to mii bus structure
79 * @phy_id: Address of the PHY device
80 * @reg: PHY register to write to
81 * @val: Value to be written into the register
82 *
83 * returns: 0 on success, -ETIMEDOUT on a timeout
84 *
85 * Writes the value to the requested register by first writing the value
86 * into MWD register. The the MCR register is then appropriately setup
87 * to finish the write operation.
88 */
89static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
90 u16 val)
91{
92 int ret;
93 struct axienet_local *lp = bus->priv;
94
95 dev_dbg(lp->dev, "axienet_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
96 phy_id, reg, val);
97
98 ret = axienet_mdio_wait_until_ready(lp);
99 if (ret < 0)
100 return ret;
101
102 axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32) val);
103 axienet_iow(lp, XAE_MDIO_MCR_OFFSET,
104 (((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) &
105 XAE_MDIO_MCR_PHYAD_MASK) |
106 ((reg << XAE_MDIO_MCR_REGAD_SHIFT) &
107 XAE_MDIO_MCR_REGAD_MASK) |
108 XAE_MDIO_MCR_INITIATE_MASK |
109 XAE_MDIO_MCR_OP_WRITE_MASK));
110
111 ret = axienet_mdio_wait_until_ready(lp);
112 if (ret < 0)
113 return ret;
114 return 0;
115}
116
117/**
118 * axienet_mdio_setup - MDIO setup function
119 * @lp: Pointer to axienet local data structure.
120 * @np: Pointer to device node
121 *
122 * returns: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
123 * mdiobus_alloc (to allocate memory for mii bus structure) fails.
124 *
125 * Sets up the MDIO interface by initializing the MDIO clock and enabling the
126 * MDIO interface in hardware. Register the MDIO interface.
127 **/
128int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
129{
130 int ret;
131 u32 clk_div, host_clock;
132 u32 *property_p;
133 struct mii_bus *bus;
134 struct resource res;
135 struct device_node *np1;
136
137 /* clk_div can be calculated by deriving it from the equation:
138 * fMDIO = fHOST / ((1 + clk_div) * 2)
139 *
140 * Where fMDIO <= 2500000, so we get:
141 * fHOST / ((1 + clk_div) * 2) <= 2500000
142 *
143 * Then we get:
144 * 1 / ((1 + clk_div) * 2) <= (2500000 / fHOST)
145 *
146 * Then we get:
147 * 1 / (1 + clk_div) <= ((2500000 * 2) / fHOST)
148 *
149 * Then we get:
150 * 1 / (1 + clk_div) <= (5000000 / fHOST)
151 *
152 * So:
153 * (1 + clk_div) >= (fHOST / 5000000)
154 *
155 * And finally:
156 * clk_div >= (fHOST / 5000000) - 1
157 *
158 * fHOST can be read from the flattened device tree as property
159 * "clock-frequency" from the CPU
160 */
161
162 np1 = of_find_node_by_name(NULL, "cpu");
163 if (!np1) {
164 printk(KERN_WARNING "%s(): Could not find CPU device node.",
165 __func__);
166 printk(KERN_WARNING "Setting MDIO clock divisor to "
167 "default %d\n", DEFAULT_CLOCK_DIVISOR);
168 clk_div = DEFAULT_CLOCK_DIVISOR;
169 goto issue;
170 }
171 property_p = (u32 *) of_get_property(np1, "clock-frequency", NULL);
172 if (!property_p) {
173 printk(KERN_WARNING "%s(): Could not find CPU property: "
174 "clock-frequency.", __func__);
175 printk(KERN_WARNING "Setting MDIO clock divisor to "
176 "default %d\n", DEFAULT_CLOCK_DIVISOR);
177 clk_div = DEFAULT_CLOCK_DIVISOR;
178 goto issue;
179 }
180
181 host_clock = be32_to_cpup(property_p);
182 clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
183 /* If there is any remainder from the division of
184 * fHOST / (MAX_MDIO_FREQ * 2), then we need to add
185 * 1 to the clock divisor or we will surely be above 2.5 MHz */
186 if (host_clock % (MAX_MDIO_FREQ * 2))
187 clk_div++;
188
189 printk(KERN_DEBUG "%s(): Setting MDIO clock divisor to %u based "
190 "on %u Hz host clock.\n", __func__, clk_div, host_clock);
191
192 of_node_put(np1);
193issue:
194 axienet_iow(lp, XAE_MDIO_MC_OFFSET,
195 (((u32) clk_div) | XAE_MDIO_MC_MDIOEN_MASK));
196
197 ret = axienet_mdio_wait_until_ready(lp);
198 if (ret < 0)
199 return ret;
200
201 bus = mdiobus_alloc();
202 if (!bus)
203 return -ENOMEM;
204
205 np1 = of_get_parent(lp->phy_node);
206 of_address_to_resource(np1, 0, &res);
207 snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
208 (unsigned long long) res.start);
209
210 bus->priv = lp;
211 bus->name = "Xilinx Axi Ethernet MDIO";
212 bus->read = axienet_mdio_read;
213 bus->write = axienet_mdio_write;
214 bus->parent = lp->dev;
215 bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
216 lp->mii_bus = bus;
217
218 ret = of_mdiobus_register(bus, np1);
219 if (ret) {
220 mdiobus_free(bus);
221 return ret;
222 }
223 return 0;
224}
225
226/**
227 * axienet_mdio_teardown - MDIO remove function
228 * @lp: Pointer to axienet local data structure.
229 *
230 * Unregisters the MDIO and frees any associate memory for mii bus.
231 */
232void axienet_mdio_teardown(struct axienet_local *lp)
233{
234 mdiobus_unregister(lp->mii_bus);
235 kfree(lp->mii_bus->irq);
236 mdiobus_free(lp->mii_bus);
237 lp->mii_bus = NULL;
238}
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 79013e5731a5..90e611a6f6c6 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1136,10 +1136,8 @@ static int __devinit xemaclite_of_probe(struct platform_device *ofdev)
1136 1136
1137 /* Create an ethernet device instance */ 1137 /* Create an ethernet device instance */
1138 ndev = alloc_etherdev(sizeof(struct net_local)); 1138 ndev = alloc_etherdev(sizeof(struct net_local));
1139 if (!ndev) { 1139 if (!ndev)
1140 dev_err(dev, "Could not allocate network device\n");
1141 return -ENOMEM; 1140 return -ENOMEM;
1142 }
1143 1141
1144 dev_set_drvdata(dev, ndev); 1142 dev_set_drvdata(dev, ndev);
1145 SET_NETDEV_DEV(ndev, &ofdev->dev); 1143 SET_NETDEV_DEV(ndev, &ofdev->dev);
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 9537aaa50c2f..49b8b58fc5c6 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -1162,7 +1162,7 @@ static void baycom_probe(struct net_device *dev)
1162/* 1162/*
1163 * command line settable parameters 1163 * command line settable parameters
1164 */ 1164 */
1165static const char *mode[NR_PORTS] = { "", }; 1165static char *mode[NR_PORTS] = { "", };
1166static int iobase[NR_PORTS] = { 0x378, }; 1166static int iobase[NR_PORTS] = { 0x378, };
1167 1167
1168module_param_array(mode, charp, NULL, 0); 1168module_param_array(mode, charp, NULL, 0);
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index 279d2296290a..f1aea0c98333 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -477,7 +477,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
477/* 477/*
478 * command line settable parameters 478 * command line settable parameters
479 */ 479 */
480static const char *mode[NR_PORTS] = { "picpar", }; 480static char *mode[NR_PORTS] = { "picpar", };
481static int iobase[NR_PORTS] = { 0x378, }; 481static int iobase[NR_PORTS] = { 0x378, };
482 482
483module_param_array(mode, charp, NULL, 0); 483module_param_array(mode, charp, NULL, 0);
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 96a98d2ff151..696327773fbe 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -403,7 +403,6 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate,
403 403
404 /* Allocate a new mcs */ 404 /* Allocate a new mcs */
405 if ((p = kmalloc(sizeof(struct yam_mcs), GFP_KERNEL)) == NULL) { 405 if ((p = kmalloc(sizeof(struct yam_mcs), GFP_KERNEL)) == NULL) {
406 printk(KERN_WARNING "YAM: no memory to allocate mcs\n");
407 release_firmware(fw); 406 release_firmware(fw);
408 return NULL; 407 return NULL;
409 } 408 }
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index e68c941926f1..2a51363d9fed 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1600,12 +1600,8 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1600 } 1600 }
1601 1601
1602 image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL); 1602 image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
1603 if (!image){ 1603 if (!image)
1604 printk(KERN_ERR "%s: Unable to allocate memory "
1605 "for EEPROM image\n", dev->name);
1606 return -ENOMEM; 1604 return -ENOMEM;
1607 }
1608
1609 1605
1610 if (rrpriv->fw_running){ 1606 if (rrpriv->fw_running){
1611 printk("%s: Firmware already running\n", dev->name); 1607 printk("%s: Firmware already running\n", dev->name);
@@ -1637,8 +1633,6 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1637 image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL); 1633 image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
1638 oldimage = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL); 1634 oldimage = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
1639 if (!image || !oldimage) { 1635 if (!image || !oldimage) {
1640 printk(KERN_ERR "%s: Unable to allocate memory "
1641 "for EEPROM image\n", dev->name);
1642 error = -ENOMEM; 1636 error = -ENOMEM;
1643 goto wf_out; 1637 goto wf_out;
1644 } 1638 }
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 64f403da101c..617a446d126c 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1608,7 +1608,6 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
1608 self->ringbuf = kmalloc(OBOE_RING_LEN << 1, GFP_KERNEL); 1608 self->ringbuf = kmalloc(OBOE_RING_LEN << 1, GFP_KERNEL);
1609 if (!self->ringbuf) 1609 if (!self->ringbuf)
1610 { 1610 {
1611 printk (KERN_ERR DRIVER_NAME ": can't allocate DMA buffers\n");
1612 err = -ENOMEM; 1611 err = -ENOMEM;
1613 goto freeregion; 1612 goto freeregion;
1614 } 1613 }
@@ -1647,7 +1646,6 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
1647 1646
1648 if (!ok) 1647 if (!ok)
1649 { 1648 {
1650 printk (KERN_ERR DRIVER_NAME ": can't allocate rx/tx buffers\n");
1651 err = -ENOMEM; 1649 err = -ENOMEM;
1652 goto freebufs; 1650 goto freebufs;
1653 } 1651 }
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index e8882023576b..f9347ea3d381 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -169,10 +169,8 @@ static struct netconsole_target *alloc_param_target(char *target_config)
169 * Note that these targets get their config_item fields zeroed-out. 169 * Note that these targets get their config_item fields zeroed-out.
170 */ 170 */
171 nt = kzalloc(sizeof(*nt), GFP_KERNEL); 171 nt = kzalloc(sizeof(*nt), GFP_KERNEL);
172 if (!nt) { 172 if (!nt)
173 printk(KERN_ERR "netconsole: failed to allocate memory\n");
174 goto fail; 173 goto fail;
175 }
176 174
177 nt->np.name = "netconsole"; 175 nt->np.name = "netconsole";
178 strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ); 176 strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ);
@@ -551,10 +549,8 @@ static struct config_item *make_netconsole_target(struct config_group *group,
551 * Target is disabled at creation (enabled == 0). 549 * Target is disabled at creation (enabled == 0).
552 */ 550 */
553 nt = kzalloc(sizeof(*nt), GFP_KERNEL); 551 nt = kzalloc(sizeof(*nt), GFP_KERNEL);
554 if (!nt) { 552 if (!nt)
555 printk(KERN_ERR "netconsole: failed to allocate memory\n");
556 return ERR_PTR(-ENOMEM); 553 return ERR_PTR(-ENOMEM);
557 }
558 554
559 nt->np.name = "netconsole"; 555 nt->np.name = "netconsole";
560 strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ); 556 strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ);
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index a9e9ca8a86ed..1a5a316cc968 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1260,10 +1260,8 @@ static void plip_attach (struct parport *port)
1260 1260
1261 sprintf(name, "plip%d", unit); 1261 sprintf(name, "plip%d", unit);
1262 dev = alloc_etherdev(sizeof(struct net_local)); 1262 dev = alloc_etherdev(sizeof(struct net_local));
1263 if (!dev) { 1263 if (!dev)
1264 printk(KERN_ERR "plip: memory squeeze\n");
1265 return; 1264 return;
1266 }
1267 1265
1268 strcpy(dev->name, name); 1266 strcpy(dev->name, name);
1269 1267
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index df884dde2a51..234cd9d87ed9 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -670,10 +670,8 @@ static int __init pptp_init_module(void)
670 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n"); 670 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
671 671
672 callid_sock = vzalloc((MAX_CALLID + 1) * sizeof(void *)); 672 callid_sock = vzalloc((MAX_CALLID + 1) * sizeof(void *));
673 if (!callid_sock) { 673 if (!callid_sock)
674 pr_err("PPTP: cann't allocate memory\n");
675 return -ENOMEM; 674 return -ENOMEM;
676 }
677 675
678 err = gre_add_protocol(&gre_pptp_protocol, GREPROTO_PPTP); 676 err = gre_add_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
679 if (err) { 677 if (err) {
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 7145714a5ec9..a57f05726b57 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -497,8 +497,6 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
497 /* Allocate our net_device structure */ 497 /* Allocate our net_device structure */
498 ndev = alloc_etherdev(sizeof(struct rionet_private)); 498 ndev = alloc_etherdev(sizeof(struct rionet_private));
499 if (ndev == NULL) { 499 if (ndev == NULL) {
500 printk(KERN_INFO "%s: could not allocate ethernet device.\n",
501 DRV_NAME);
502 rc = -ENOMEM; 500 rc = -ENOMEM;
503 goto out; 501 goto out;
504 } 502 }
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index ba08341fb92c..69345dfae0fd 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -1296,10 +1296,8 @@ static int __init slip_init(void)
1296 1296
1297 slip_devs = kzalloc(sizeof(struct net_device *)*slip_maxdev, 1297 slip_devs = kzalloc(sizeof(struct net_device *)*slip_maxdev,
1298 GFP_KERNEL); 1298 GFP_KERNEL);
1299 if (!slip_devs) { 1299 if (!slip_devs)
1300 printk(KERN_ERR "SLIP: Can't allocate slip devices array.\n");
1301 return -ENOMEM; 1300 return -ENOMEM;
1302 }
1303 1301
1304 /* Fill in our line protocol discipline, and register it */ 1302 /* Fill in our line protocol discipline, and register it */
1305 status = tty_register_ldisc(N_SLIP, &sl_ldisc); 1303 status = tty_register_ldisc(N_SLIP, &sl_ldisc);
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index ef9fdf3652f6..d7c292aa76b1 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -674,15 +674,11 @@ static int xl_open(struct net_device *dev)
674 /* These MUST be on 8 byte boundaries */ 674 /* These MUST be on 8 byte boundaries */
675 xl_priv->xl_tx_ring = kzalloc((sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) + 7, GFP_DMA | GFP_KERNEL); 675 xl_priv->xl_tx_ring = kzalloc((sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) + 7, GFP_DMA | GFP_KERNEL);
676 if (xl_priv->xl_tx_ring == NULL) { 676 if (xl_priv->xl_tx_ring == NULL) {
677 printk(KERN_WARNING "%s: Not enough memory to allocate tx buffers.\n",
678 dev->name);
679 free_irq(dev->irq,dev); 677 free_irq(dev->irq,dev);
680 return -ENOMEM; 678 return -ENOMEM;
681 } 679 }
682 xl_priv->xl_rx_ring = kzalloc((sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) +7, GFP_DMA | GFP_KERNEL); 680 xl_priv->xl_rx_ring = kzalloc((sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) +7, GFP_DMA | GFP_KERNEL);
683 if (xl_priv->xl_rx_ring == NULL) { 681 if (xl_priv->xl_rx_ring == NULL) {
684 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers.\n",
685 dev->name);
686 free_irq(dev->irq,dev); 682 free_irq(dev->irq,dev);
687 kfree(xl_priv->xl_tx_ring); 683 kfree(xl_priv->xl_tx_ring);
688 return -ENOMEM; 684 return -ENOMEM;
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 6153cfd696b6..1cdc034f6aec 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -171,7 +171,6 @@ static int __devinit madgemc_probe(struct device *device)
171 171
172 card = kmalloc(sizeof(struct card_info), GFP_KERNEL); 172 card = kmalloc(sizeof(struct card_info), GFP_KERNEL);
173 if (card==NULL) { 173 if (card==NULL) {
174 printk("madgemc: unable to allocate card struct\n");
175 ret = -ENOMEM; 174 ret = -ENOMEM;
176 goto getout1; 175 goto getout1;
177 } 176 }
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 65e9cf3a71fe..102f896bbc58 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1525,10 +1525,8 @@ static void tms380tr_chk_outstanding_cmds(struct net_device *dev)
1525 /* Check if adapter is opened, avoiding COMMAND_REJECT 1525 /* Check if adapter is opened, avoiding COMMAND_REJECT
1526 * interrupt by the adapter! 1526 * interrupt by the adapter!
1527 */ 1527 */
1528 if(tp->AdapterOpenFlag == 0) 1528 if (tp->AdapterOpenFlag == 0) {
1529 { 1529 if (tp->CMDqueue & OC_OPEN) {
1530 if(tp->CMDqueue & OC_OPEN)
1531 {
1532 /* Execute OPEN command */ 1530 /* Execute OPEN command */
1533 tp->CMDqueue ^= OC_OPEN; 1531 tp->CMDqueue ^= OC_OPEN;
1534 1532
@@ -1536,21 +1534,17 @@ static void tms380tr_chk_outstanding_cmds(struct net_device *dev)
1536 tp->scb.Parm[0] = LOWORD(Addr); 1534 tp->scb.Parm[0] = LOWORD(Addr);
1537 tp->scb.Parm[1] = HIWORD(Addr); 1535 tp->scb.Parm[1] = HIWORD(Addr);
1538 tp->scb.CMD = OPEN; 1536 tp->scb.CMD = OPEN;
1539 } 1537 } else
1540 else
1541 /* No OPEN command queued, but adapter closed. Note: 1538 /* No OPEN command queued, but adapter closed. Note:
1542 * We'll try to re-open the adapter in DriverPoll() 1539 * We'll try to re-open the adapter in DriverPoll()
1543 */ 1540 */
1544 return; /* No adapter command issued */ 1541 return; /* No adapter command issued */
1545 } 1542 } else {
1546 else
1547 {
1548 /* Adapter is open; evaluate command queue: try to execute 1543 /* Adapter is open; evaluate command queue: try to execute
1549 * outstanding commands (depending on priority!) CLOSE 1544 * outstanding commands (depending on priority!) CLOSE
1550 * command queued 1545 * command queued
1551 */ 1546 */
1552 if(tp->CMDqueue & OC_CLOSE) 1547 if (tp->CMDqueue & OC_CLOSE) {
1553 {
1554 tp->CMDqueue ^= OC_CLOSE; 1548 tp->CMDqueue ^= OC_CLOSE;
1555 tp->AdapterOpenFlag = 0; 1549 tp->AdapterOpenFlag = 0;
1556 tp->scb.Parm[0] = 0; /* Parm[0], Parm[1] are ignored */ 1550 tp->scb.Parm[0] = 0; /* Parm[0], Parm[1] are ignored */
@@ -1560,109 +1554,70 @@ static void tms380tr_chk_outstanding_cmds(struct net_device *dev)
1560 tp->CMDqueue |= OC_OPEN; /* re-open adapter */ 1554 tp->CMDqueue |= OC_OPEN; /* re-open adapter */
1561 else 1555 else
1562 tp->CMDqueue = 0; /* no more commands */ 1556 tp->CMDqueue = 0; /* no more commands */
1563 } 1557 } else if (tp->CMDqueue & OC_RECEIVE) {
1564 else 1558 tp->CMDqueue ^= OC_RECEIVE;
1565 { 1559 Addr = htonl(((char *)tp->RplHead - (char *)tp) + tp->dmabuffer);
1566 if(tp->CMDqueue & OC_RECEIVE) 1560 tp->scb.Parm[0] = LOWORD(Addr);
1567 { 1561 tp->scb.Parm[1] = HIWORD(Addr);
1568 tp->CMDqueue ^= OC_RECEIVE; 1562 tp->scb.CMD = RECEIVE;
1569 Addr = htonl(((char *)tp->RplHead - (char *)tp) + tp->dmabuffer); 1563 } else if (tp->CMDqueue & OC_TRANSMIT_HALT) {
1570 tp->scb.Parm[0] = LOWORD(Addr); 1564 /* NOTE: TRANSMIT.HALT must be checked
1571 tp->scb.Parm[1] = HIWORD(Addr); 1565 * before TRANSMIT.
1572 tp->scb.CMD = RECEIVE; 1566 */
1573 } 1567 tp->CMDqueue ^= OC_TRANSMIT_HALT;
1574 else 1568 tp->scb.CMD = TRANSMIT_HALT;
1575 { 1569
1576 if(tp->CMDqueue & OC_TRANSMIT_HALT) 1570 /* Parm[0] and Parm[1] are ignored
1577 { 1571 * but should be set to zero!
1578 /* NOTE: TRANSMIT.HALT must be checked 1572 */
1579 * before TRANSMIT. 1573 tp->scb.Parm[0] = 0;
1580 */ 1574 tp->scb.Parm[1] = 0;
1581 tp->CMDqueue ^= OC_TRANSMIT_HALT; 1575 } else if (tp->CMDqueue & OC_TRANSMIT) {
1582 tp->scb.CMD = TRANSMIT_HALT; 1576 /* NOTE: TRANSMIT must be
1583 1577 * checked after TRANSMIT.HALT
1584 /* Parm[0] and Parm[1] are ignored 1578 */
1585 * but should be set to zero! 1579 if (tp->TransmitCommandActive) {
1586 */ 1580 if (!tp->TransmitHaltScheduled) {
1587 tp->scb.Parm[0] = 0; 1581 tp->TransmitHaltScheduled = 1;
1588 tp->scb.Parm[1] = 0; 1582 tms380tr_exec_cmd(dev, OC_TRANSMIT_HALT);
1589 }
1590 else
1591 {
1592 if(tp->CMDqueue & OC_TRANSMIT)
1593 {
1594 /* NOTE: TRANSMIT must be
1595 * checked after TRANSMIT.HALT
1596 */
1597 if(tp->TransmitCommandActive)
1598 {
1599 if(!tp->TransmitHaltScheduled)
1600 {
1601 tp->TransmitHaltScheduled = 1;
1602 tms380tr_exec_cmd(dev, OC_TRANSMIT_HALT) ;
1603 }
1604 tp->TransmitCommandActive = 0;
1605 return;
1606 }
1607
1608 tp->CMDqueue ^= OC_TRANSMIT;
1609 tms380tr_cancel_tx_queue(tp);
1610 Addr = htonl(((char *)tp->TplBusy - (char *)tp) + tp->dmabuffer);
1611 tp->scb.Parm[0] = LOWORD(Addr);
1612 tp->scb.Parm[1] = HIWORD(Addr);
1613 tp->scb.CMD = TRANSMIT;
1614 tp->TransmitCommandActive = 1;
1615 }
1616 else
1617 {
1618 if(tp->CMDqueue & OC_MODIFY_OPEN_PARMS)
1619 {
1620 tp->CMDqueue ^= OC_MODIFY_OPEN_PARMS;
1621 tp->scb.Parm[0] = tp->ocpl.OPENOptions; /* new OPEN options*/
1622 tp->scb.Parm[0] |= ENABLE_FULL_DUPLEX_SELECTION;
1623 tp->scb.Parm[1] = 0; /* is ignored but should be zero */
1624 tp->scb.CMD = MODIFY_OPEN_PARMS;
1625 }
1626 else
1627 {
1628 if(tp->CMDqueue & OC_SET_FUNCT_ADDR)
1629 {
1630 tp->CMDqueue ^= OC_SET_FUNCT_ADDR;
1631 tp->scb.Parm[0] = LOWORD(tp->ocpl.FunctAddr);
1632 tp->scb.Parm[1] = HIWORD(tp->ocpl.FunctAddr);
1633 tp->scb.CMD = SET_FUNCT_ADDR;
1634 }
1635 else
1636 {
1637 if(tp->CMDqueue & OC_SET_GROUP_ADDR)
1638 {
1639 tp->CMDqueue ^= OC_SET_GROUP_ADDR;
1640 tp->scb.Parm[0] = LOWORD(tp->ocpl.GroupAddr);
1641 tp->scb.Parm[1] = HIWORD(tp->ocpl.GroupAddr);
1642 tp->scb.CMD = SET_GROUP_ADDR;
1643 }
1644 else
1645 {
1646 if(tp->CMDqueue & OC_READ_ERROR_LOG)
1647 {
1648 tp->CMDqueue ^= OC_READ_ERROR_LOG;
1649 Addr = htonl(((char *)&tp->errorlogtable - (char *)tp) + tp->dmabuffer);
1650 tp->scb.Parm[0] = LOWORD(Addr);
1651 tp->scb.Parm[1] = HIWORD(Addr);
1652 tp->scb.CMD = READ_ERROR_LOG;
1653 }
1654 else
1655 {
1656 printk(KERN_WARNING "CheckForOutstandingCommand: unknown Command\n");
1657 tp->CMDqueue = 0;
1658 return;
1659 }
1660 }
1661 }
1662 }
1663 }
1664 } 1583 }
1584 tp->TransmitCommandActive = 0;
1585 return;
1665 } 1586 }
1587
1588 tp->CMDqueue ^= OC_TRANSMIT;
1589 tms380tr_cancel_tx_queue(tp);
1590 Addr = htonl(((char *)tp->TplBusy - (char *)tp) + tp->dmabuffer);
1591 tp->scb.Parm[0] = LOWORD(Addr);
1592 tp->scb.Parm[1] = HIWORD(Addr);
1593 tp->scb.CMD = TRANSMIT;
1594 tp->TransmitCommandActive = 1;
1595 } else if (tp->CMDqueue & OC_MODIFY_OPEN_PARMS) {
1596 tp->CMDqueue ^= OC_MODIFY_OPEN_PARMS;
1597 tp->scb.Parm[0] = tp->ocpl.OPENOptions; /* new OPEN options*/
1598 tp->scb.Parm[0] |= ENABLE_FULL_DUPLEX_SELECTION;
1599 tp->scb.Parm[1] = 0; /* is ignored but should be zero */
1600 tp->scb.CMD = MODIFY_OPEN_PARMS;
1601 } else if (tp->CMDqueue & OC_SET_FUNCT_ADDR) {
1602 tp->CMDqueue ^= OC_SET_FUNCT_ADDR;
1603 tp->scb.Parm[0] = LOWORD(tp->ocpl.FunctAddr);
1604 tp->scb.Parm[1] = HIWORD(tp->ocpl.FunctAddr);
1605 tp->scb.CMD = SET_FUNCT_ADDR;
1606 } else if (tp->CMDqueue & OC_SET_GROUP_ADDR) {
1607 tp->CMDqueue ^= OC_SET_GROUP_ADDR;
1608 tp->scb.Parm[0] = LOWORD(tp->ocpl.GroupAddr);
1609 tp->scb.Parm[1] = HIWORD(tp->ocpl.GroupAddr);
1610 tp->scb.CMD = SET_GROUP_ADDR;
1611 } else if (tp->CMDqueue & OC_READ_ERROR_LOG) {
1612 tp->CMDqueue ^= OC_READ_ERROR_LOG;
1613 Addr = htonl(((char *)&tp->errorlogtable - (char *)tp) + tp->dmabuffer);
1614 tp->scb.Parm[0] = LOWORD(Addr);
1615 tp->scb.Parm[1] = HIWORD(Addr);
1616 tp->scb.CMD = READ_ERROR_LOG;
1617 } else {
1618 printk(KERN_WARNING "CheckForOutstandingCommand: unknown Command\n");
1619 tp->CMDqueue = 0;
1620 return;
1666 } 1621 }
1667 } 1622 }
1668 1623
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 233576127934..4bad899fb38f 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -398,6 +398,27 @@ config USB_NET_KALMIA
398 To compile this driver as a module, choose M here: the 398 To compile this driver as a module, choose M here: the
399 module will be called kalmia. 399 module will be called kalmia.
400 400
401config USB_NET_QMI_WWAN
402 tristate "QMI WWAN driver for Qualcomm MSM based 3G and LTE modems"
403 depends on USB_USBNET
404 help
405 Support WWAN LTE/3G devices based on Qualcomm Mobile Data Modem
406 (MDM) chipsets. Examples of such devices are
407 * Huawei E392/E398
408
409 This driver will only drive the ethernet part of the chips.
410 The devices require additional configuration to be usable.
411 Multiple management interfaces with linux drivers are
412 available:
413
414 * option: AT commands on /dev/ttyUSBx
415 * cdc-wdm: Qualcomm MSM Interface (QMI) protocol on /dev/cdc-wdmx
416
417 A modem manager with support for QMI is recommended.
418
419 To compile this driver as a module, choose M here: the
420 module will be called qmi_wwan.
421
401config USB_HSO 422config USB_HSO
402 tristate "Option USB High Speed Mobile Devices" 423 tristate "Option USB High Speed Mobile Devices"
403 depends on USB && RFKILL 424 depends on USB && RFKILL
@@ -461,4 +482,5 @@ config USB_VL600
461 482
462 http://ubuntuforums.org/showpost.php?p=10589647&postcount=17 483 http://ubuntuforums.org/showpost.php?p=10589647&postcount=17
463 484
485
464endmenu 486endmenu
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index c203fa21f6b1..a2e2d72c52a0 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -29,4 +29,5 @@ obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
29obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o 29obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
30obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o 30obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o
31obj-$(CONFIG_USB_VL600) += lg-vl600.o 31obj-$(CONFIG_USB_VL600) += lg-vl600.o
32obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o
32 33
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 5d99b8cacd7d..752393092325 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1332,10 +1332,8 @@ static int pegasus_probe(struct usb_interface *intf,
1332 usb_get_dev(dev); 1332 usb_get_dev(dev);
1333 1333
1334 net = alloc_etherdev(sizeof(struct pegasus)); 1334 net = alloc_etherdev(sizeof(struct pegasus));
1335 if (!net) { 1335 if (!net)
1336 dev_err(&intf->dev, "can't allocate %s\n", "device");
1337 goto out; 1336 goto out;
1338 }
1339 1337
1340 pegasus = netdev_priv(net); 1338 pegasus = netdev_priv(net);
1341 pegasus->dev_index = dev_index; 1339 pegasus->dev_index = dev_index;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
new file mode 100644
index 000000000000..739e6de7abcb
--- /dev/null
+++ b/drivers/net/usb/qmi_wwan.c
@@ -0,0 +1,228 @@
1/*
2 * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * version 2 as published by the Free Software Foundation.
7 */
8
9#include <linux/module.h>
10#include <linux/netdevice.h>
11#include <linux/ethtool.h>
12#include <linux/mii.h>
13#include <linux/usb.h>
14#include <linux/usb/cdc.h>
15#include <linux/usb/usbnet.h>
16
17/* The name of the CDC Device Management driver */
18#define DM_DRIVER "cdc_wdm"
19
20/*
21 * This driver supports wwan (3G/LTE/?) devices using a vendor
22 * specific management protocol called Qualcomm MSM Interface (QMI) -
23 * in addition to the more common AT commands over serial interface
24 * management
25 *
26 * QMI is wrapped in CDC, using CDC encapsulated commands on the
27 * control ("master") interface of a two-interface CDC Union
28 * resembling standard CDC ECM. The devices do not use the control
29 * interface for any other CDC messages. Most likely because the
30 * management protocol is used in place of the standard CDC
31 * notifications NOTIFY_NETWORK_CONNECTION and NOTIFY_SPEED_CHANGE
32 *
33 * Handling a protocol like QMI is out of the scope for any driver.
34 * It can be exported as a character device using the cdc-wdm driver,
35 * which will enable userspace applications ("modem managers") to
36 * handle it. This may be required to use the network interface
37 * provided by the driver.
38 *
39 * These devices may alternatively/additionally be configured using AT
40 * commands on any of the serial interfaces driven by the option driver
41 *
42 * This driver binds only to the data ("slave") interface to enable
43 * the cdc-wdm driver to bind to the control interface. It still
44 * parses the CDC functional descriptors on the control interface to
45 * a) verify that this is indeed a handled interface (CDC Union
46 * header lists it as slave)
47 * b) get MAC address and other ethernet config from the CDC Ethernet
48 * header
49 * c) enable user bind requests against the control interface, which
50 * is the common way to bind to CDC Ethernet Control Model type
51 * interfaces
52 * d) provide a hint to the user about which interface is the
53 * corresponding management interface
54 */
55
56static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
57{
58 int status = -1;
59 struct usb_interface *control = NULL;
60 u8 *buf = intf->cur_altsetting->extra;
61 int len = intf->cur_altsetting->extralen;
62 struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
63 struct usb_cdc_union_desc *cdc_union = NULL;
64 struct usb_cdc_ether_desc *cdc_ether = NULL;
65 u32 required = 1 << USB_CDC_HEADER_TYPE | 1 << USB_CDC_UNION_TYPE;
66 u32 found = 0;
67
68 /*
69 * assume a data interface has no additional descriptors and
70 * that the control and data interface are numbered
71 * consecutively - this holds for the Huawei device at least
72 */
73 if (len == 0 && desc->bInterfaceNumber > 0) {
74 control = usb_ifnum_to_if(dev->udev, desc->bInterfaceNumber - 1);
75 if (!control)
76 goto err;
77
78 buf = control->cur_altsetting->extra;
79 len = control->cur_altsetting->extralen;
80 dev_dbg(&intf->dev, "guessing \"control\" => %s, \"data\" => this\n",
81 dev_name(&control->dev));
82 }
83
84 while (len > 3) {
85 struct usb_descriptor_header *h = (void *)buf;
86
87 /* ignore any misplaced descriptors */
88 if (h->bDescriptorType != USB_DT_CS_INTERFACE)
89 goto next_desc;
90
91 /* buf[2] is CDC descriptor subtype */
92 switch (buf[2]) {
93 case USB_CDC_HEADER_TYPE:
94 if (found & 1 << USB_CDC_HEADER_TYPE) {
95 dev_dbg(&intf->dev, "extra CDC header\n");
96 goto err;
97 }
98 if (h->bLength != sizeof(struct usb_cdc_header_desc)) {
99 dev_dbg(&intf->dev, "CDC header len %u\n", h->bLength);
100 goto err;
101 }
102 break;
103 case USB_CDC_UNION_TYPE:
104 if (found & 1 << USB_CDC_UNION_TYPE) {
105 dev_dbg(&intf->dev, "extra CDC union\n");
106 goto err;
107 }
108 if (h->bLength != sizeof(struct usb_cdc_union_desc)) {
109 dev_dbg(&intf->dev, "CDC union len %u\n", h->bLength);
110 goto err;
111 }
112 cdc_union = (struct usb_cdc_union_desc *)buf;
113 break;
114 case USB_CDC_ETHERNET_TYPE:
115 if (found & 1 << USB_CDC_ETHERNET_TYPE) {
116 dev_dbg(&intf->dev, "extra CDC ether\n");
117 goto err;
118 }
119 if (h->bLength != sizeof(struct usb_cdc_ether_desc)) {
120 dev_dbg(&intf->dev, "CDC ether len %u\n", h->bLength);
121 goto err;
122 }
123 cdc_ether = (struct usb_cdc_ether_desc *)buf;
124 break;
125 }
126
127 /*
128 * Remember which CDC functional descriptors we've seen. Works
129 * for all types we care about, of which USB_CDC_ETHERNET_TYPE
130 * (0x0f) is the highest numbered
131 */
132 if (buf[2] < 32)
133 found |= 1 << buf[2];
134
135next_desc:
136 len -= h->bLength;
137 buf += h->bLength;
138 }
139
140 /* did we find all the required ones? */
141 if ((found & required) != required) {
142 dev_err(&intf->dev, "CDC functional descriptors missing\n");
143 goto err;
144 }
145
146 /* give the user a helpful hint if trying to bind to the wrong interface */
147 if (cdc_union && desc->bInterfaceNumber == cdc_union->bMasterInterface0) {
148 dev_err(&intf->dev, "leaving \"control\" interface for " DM_DRIVER " - try binding to %s instead!\n",
149 dev_name(&usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0)->dev));
150 goto err;
151 }
152
153 /* errors aren't fatal - we can live with the dynamic address */
154 if (cdc_ether) {
155 dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize);
156 usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
157 }
158
159 /* success! point the user to the management interface */
160 if (control)
161 dev_info(&intf->dev, "Use \"" DM_DRIVER "\" for QMI interface %s\n",
162 dev_name(&control->dev));
163
164 /* XXX: add a sysfs symlink somewhere to help management applications find it? */
165
166 /* collect bulk endpoints now that we know intf == "data" interface */
167 status = usbnet_get_endpoints(dev, intf);
168
169err:
170 return status;
171}
172
173/* stolen from cdc_ether.c */
174static int qmi_wwan_manage_power(struct usbnet *dev, int on)
175{
176 dev->intf->needs_remote_wakeup = on;
177 return 0;
178}
179
180static const struct driver_info qmi_wwan_info = {
181 .description = "QMI speaking wwan device",
182 .flags = FLAG_WWAN,
183 .bind = qmi_wwan_bind,
184 .manage_power = qmi_wwan_manage_power,
185};
186
187#define HUAWEI_VENDOR_ID 0x12D1
188
189static const struct usb_device_id products[] = {
190{
191 /* Huawei E392, E398 and possibly others sharing both device id and more... */
192 .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO,
193 .idVendor = HUAWEI_VENDOR_ID,
194 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
195 .bInterfaceSubClass = 1,
196 .bInterfaceProtocol = 8, /* NOTE: This is the *slave* interface of the CDC Union! */
197 .driver_info = (unsigned long)&qmi_wwan_info,
198}, {
199}, /* END */
200};
201MODULE_DEVICE_TABLE(usb, products);
202
203static struct usb_driver qmi_wwan_driver = {
204 .name = "qmi_wwan",
205 .id_table = products,
206 .probe = usbnet_probe,
207 .disconnect = usbnet_disconnect,
208 .suspend = usbnet_suspend,
209 .resume = usbnet_resume,
210 .reset_resume = usbnet_resume,
211 .supports_autosuspend = 1,
212};
213
214static int __init qmi_wwan_init(void)
215{
216 return usb_register(&qmi_wwan_driver);
217}
218module_init(qmi_wwan_init);
219
220static void __exit qmi_wwan_exit(void)
221{
222 usb_deregister(&qmi_wwan_driver);
223}
224module_exit(qmi_wwan_exit);
225
226MODULE_AUTHOR("Bjørn Mork <bjorn@mork.no>");
227MODULE_DESCRIPTION("Qualcomm MSM Interface (QMI) WWAN driver");
228MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 0710b4ca9252..6dda2fe5b15b 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -894,10 +894,8 @@ static int rtl8150_probe(struct usb_interface *intf,
894 struct net_device *netdev; 894 struct net_device *netdev;
895 895
896 netdev = alloc_etherdev(sizeof(rtl8150_t)); 896 netdev = alloc_etherdev(sizeof(rtl8150_t));
897 if (!netdev) { 897 if (!netdev)
898 err("Out of memory");
899 return -ENOMEM; 898 return -ENOMEM;
900 }
901 899
902 dev = netdev_priv(netdev); 900 dev = netdev_priv(netdev);
903 901
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index fae0fbd8bc88..b924f46c963c 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1334,10 +1334,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1334 1334
1335 // set up our own records 1335 // set up our own records
1336 net = alloc_etherdev(sizeof(*dev)); 1336 net = alloc_etherdev(sizeof(*dev));
1337 if (!net) { 1337 if (!net)
1338 dbg ("can't kmalloc dev");
1339 goto out; 1338 goto out;
1340 }
1341 1339
1342 /* netdev_printk() needs this so do it as early as possible */ 1340 /* netdev_printk() needs this so do it as early as possible */
1343 SET_NETDEV_DEV(net, &udev->dev); 1341 SET_NETDEV_DEV(net, &udev->dev);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index de7fc345148a..e1562e8acba5 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -537,11 +537,8 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
537 537
538 tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]), 538 tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
539 GFP_KERNEL); 539 GFP_KERNEL);
540 if (!tq->buf_info) { 540 if (!tq->buf_info)
541 printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
542 adapter->netdev->name);
543 goto err; 541 goto err;
544 }
545 542
546 return 0; 543 return 0;
547 544
@@ -636,7 +633,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
636 633
637 dev_dbg(&adapter->netdev->dev, 634 dev_dbg(&adapter->netdev->dev,
638 "alloc_rx_buf: %d allocated, next2fill %u, next2comp " 635 "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
639 "%u, uncommited %u\n", num_allocated, ring->next2fill, 636 "%u, uncommitted %u\n", num_allocated, ring->next2fill,
640 ring->next2comp, rq->uncommitted[ring_idx]); 637 ring->next2comp, rq->uncommitted[ring_idx]);
641 638
642 /* so that the device can distinguish a full ring and an empty ring */ 639 /* so that the device can distinguish a full ring and an empty ring */
@@ -816,27 +813,24 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
816 813
817 if (ctx->mss) { /* TSO */ 814 if (ctx->mss) { /* TSO */
818 ctx->eth_ip_hdr_size = skb_transport_offset(skb); 815 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
819 ctx->l4_hdr_size = ((struct tcphdr *) 816 ctx->l4_hdr_size = tcp_hdrlen(skb);
820 skb_transport_header(skb))->doff * 4;
821 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; 817 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
822 } else { 818 } else {
823 if (skb->ip_summed == CHECKSUM_PARTIAL) { 819 if (skb->ip_summed == CHECKSUM_PARTIAL) {
824 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); 820 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
825 821
826 if (ctx->ipv4) { 822 if (ctx->ipv4) {
827 struct iphdr *iph = (struct iphdr *) 823 const struct iphdr *iph = ip_hdr(skb);
828 skb_network_header(skb); 824
829 if (iph->protocol == IPPROTO_TCP) 825 if (iph->protocol == IPPROTO_TCP)
830 ctx->l4_hdr_size = ((struct tcphdr *) 826 ctx->l4_hdr_size = tcp_hdrlen(skb);
831 skb_transport_header(skb))->doff * 4;
832 else if (iph->protocol == IPPROTO_UDP) 827 else if (iph->protocol == IPPROTO_UDP)
833 /* 828 /*
834 * Use tcp header size so that bytes to 829 * Use tcp header size so that bytes to
835 * be copied are more than required by 830 * be copied are more than required by
836 * the device. 831 * the device.
837 */ 832 */
838 ctx->l4_hdr_size = 833 ctx->l4_hdr_size = sizeof(struct tcphdr);
839 sizeof(struct tcphdr);
840 else 834 else
841 ctx->l4_hdr_size = 0; 835 ctx->l4_hdr_size = 0;
842 } else { 836 } else {
@@ -881,14 +875,17 @@ static void
881vmxnet3_prepare_tso(struct sk_buff *skb, 875vmxnet3_prepare_tso(struct sk_buff *skb,
882 struct vmxnet3_tx_ctx *ctx) 876 struct vmxnet3_tx_ctx *ctx)
883{ 877{
884 struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb); 878 struct tcphdr *tcph = tcp_hdr(skb);
879
885 if (ctx->ipv4) { 880 if (ctx->ipv4) {
886 struct iphdr *iph = (struct iphdr *)skb_network_header(skb); 881 struct iphdr *iph = ip_hdr(skb);
882
887 iph->check = 0; 883 iph->check = 0;
888 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, 884 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
889 IPPROTO_TCP, 0); 885 IPPROTO_TCP, 0);
890 } else { 886 } else {
891 struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb); 887 struct ipv6hdr *iph = ipv6_hdr(skb);
888
892 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, 889 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
893 IPPROTO_TCP, 0); 890 IPPROTO_TCP, 0);
894 } 891 }
@@ -1519,11 +1516,9 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1519 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + 1516 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1520 rq->rx_ring[1].size); 1517 rq->rx_ring[1].size);
1521 bi = kzalloc(sz, GFP_KERNEL); 1518 bi = kzalloc(sz, GFP_KERNEL);
1522 if (!bi) { 1519 if (!bi)
1523 printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
1524 adapter->netdev->name);
1525 goto err; 1520 goto err;
1526 } 1521
1527 rq->buf_info[0] = bi; 1522 rq->buf_info[0] = bi;
1528 rq->buf_info[1] = bi + rq->rx_ring[0].size; 1523 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1529 1524
@@ -2923,11 +2918,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2923 printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n", 2918 printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
2924 num_tx_queues, num_rx_queues); 2919 num_tx_queues, num_rx_queues);
2925 2920
2926 if (!netdev) { 2921 if (!netdev)
2927 printk(KERN_ERR "Failed to alloc ethernet device for adapter "
2928 "%s\n", pci_name(pdev));
2929 return -ENOMEM; 2922 return -ENOMEM;
2930 }
2931 2923
2932 pci_set_drvdata(pdev, netdev); 2924 pci_set_drvdata(pdev, netdev);
2933 adapter = netdev_priv(netdev); 2925 adapter = netdev_priv(netdev);
@@ -2964,8 +2956,6 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2964 2956
2965 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); 2957 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2966 if (adapter->pm_conf == NULL) { 2958 if (adapter->pm_conf == NULL) {
2967 printk(KERN_ERR "Failed to allocate memory for %s\n",
2968 pci_name(pdev));
2969 err = -ENOMEM; 2959 err = -ENOMEM;
2970 goto err_alloc_pm; 2960 goto err_alloc_pm;
2971 } 2961 }
@@ -2974,8 +2964,6 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2974 2964
2975 adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL); 2965 adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
2976 if (adapter->rss_conf == NULL) { 2966 if (adapter->rss_conf == NULL) {
2977 printk(KERN_ERR "Failed to allocate memory for %s\n",
2978 pci_name(pdev));
2979 err = -ENOMEM; 2967 err = -ENOMEM;
2980 goto err_alloc_rss; 2968 goto err_alloc_rss;
2981 } 2969 }
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 54f995f4a5a3..09a50751763b 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -325,10 +325,8 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
325 } 325 }
326 326
327 card = kzalloc(sizeof(card_t), GFP_KERNEL); 327 card = kzalloc(sizeof(card_t), GFP_KERNEL);
328 if (card == NULL) { 328 if (card == NULL)
329 pr_err("unable to allocate memory\n");
330 return -ENOBUFS; 329 return -ENOBUFS;
331 }
332 330
333 card->dev = alloc_hdlcdev(card); 331 card->dev = alloc_hdlcdev(card);
334 if (!card->dev) { 332 if (!card->dev) {
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 058e1697c174..fe8d060d8fff 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -903,10 +903,8 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
903 int i, ret = -ENOMEM; 903 int i, ret = -ENOMEM;
904 904
905 root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL); 905 root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL);
906 if (!root) { 906 if (!root)
907 pr_err("can't allocate data\n");
908 goto err_out; 907 goto err_out;
909 }
910 908
911 for (i = 0; i < dev_per_card; i++) { 909 for (i = 0; i < dev_per_card; i++) {
912 root[i].dev = alloc_hdlcdev(root + i); 910 root[i].dev = alloc_hdlcdev(root + i);
@@ -915,10 +913,8 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
915 } 913 }
916 914
917 ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL); 915 ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL);
918 if (!ppriv) { 916 if (!ppriv)
919 pr_err("can't allocate private data\n");
920 goto err_free_dev; 917 goto err_free_dev;
921 }
922 918
923 ppriv->root = root; 919 ppriv->root = root;
924 spin_lock_init(&ppriv->lock); 920 spin_lock_init(&ppriv->lock);
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index b7f2358d23be..76a8a4a522e9 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -497,7 +497,6 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
497 497
498 data = kmalloc(xc.len, GFP_KERNEL); 498 data = kmalloc(xc.len, GFP_KERNEL);
499 if (!data) { 499 if (!data) {
500 printk(KERN_WARNING "%s: Failed to allocate memory for copy\n", dev->name);
501 ret = -ENOMEM; 500 ret = -ENOMEM;
502 break; 501 break;
503 } 502 }
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 5129ad514d26..315bf09d6a20 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -358,10 +358,8 @@ static int __init n2_run(unsigned long io, unsigned long irq,
358 } 358 }
359 359
360 card = kzalloc(sizeof(card_t), GFP_KERNEL); 360 card = kzalloc(sizeof(card_t), GFP_KERNEL);
361 if (card == NULL) { 361 if (card == NULL)
362 pr_err("unable to allocate memory\n");
363 return -ENOBUFS; 362 return -ENOBUFS;
364 }
365 363
366 card->ports[0].dev = alloc_hdlcdev(&card->ports[0]); 364 card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
367 card->ports[1].dev = alloc_hdlcdev(&card->ports[1]); 365 card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index c49c1b3c7aad..5fe246e060d7 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -320,7 +320,6 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
320 320
321 card = kzalloc(sizeof(card_t), GFP_KERNEL); 321 card = kzalloc(sizeof(card_t), GFP_KERNEL);
322 if (card == NULL) { 322 if (card == NULL) {
323 pr_err("unable to allocate memory\n");
324 pci_release_regions(pdev); 323 pci_release_regions(pdev);
325 pci_disable_device(pdev); 324 pci_disable_device(pdev);
326 return -ENOBUFS; 325 return -ENOBUFS;
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index 1ce21163c776..9659fcaa34ed 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -299,7 +299,6 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
299 299
300 card = kzalloc(sizeof(card_t), GFP_KERNEL); 300 card = kzalloc(sizeof(card_t), GFP_KERNEL);
301 if (card == NULL) { 301 if (card == NULL) {
302 pr_err("unable to allocate memory\n");
303 pci_release_regions(pdev); 302 pci_release_regions(pdev);
304 pci_disable_device(pdev); 303 pci_disable_device(pdev);
305 return -ENOBUFS; 304 return -ENOBUFS;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 44b707197258..feb7541b33fb 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -604,7 +604,6 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
604 alloc_size = sizeof(card_t) + ports * sizeof(port_t); 604 alloc_size = sizeof(card_t) + ports * sizeof(port_t);
605 card = kzalloc(alloc_size, GFP_KERNEL); 605 card = kzalloc(alloc_size, GFP_KERNEL);
606 if (card == NULL) { 606 if (card == NULL) {
607 pr_err("%s: unable to allocate memory\n", pci_name(pdev));
608 pci_release_regions(pdev); 607 pci_release_regions(pdev);
609 pci_disable_device(pdev); 608 pci_disable_device(pdev);
610 return -ENOBUFS; 609 return -ENOBUFS;
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 8a10bb730d5a..e862369b4a6d 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -786,10 +786,8 @@ static int __init init_x25_asy(void)
786 786
787 x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device *), 787 x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device *),
788 GFP_KERNEL); 788 GFP_KERNEL);
789 if (!x25_asy_devs) { 789 if (!x25_asy_devs)
790 pr_warn("Can't allocate x25_asy_ctrls[] array! Uaargh! (-> No X.25 available)\n");
791 return -ENOMEM; 790 return -ENOMEM;
792 }
793 791
794 return tty_register_ldisc(N_X25, &x25_ldisc); 792 return tty_register_ldisc(N_X25, &x25_ldisc);
795} 793}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 1b90ed8795c3..c25226a32ddc 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -431,11 +431,8 @@ struct htc_target *ath9k_htc_hw_alloc(void *hif_handle,
431 struct htc_target *target; 431 struct htc_target *target;
432 432
433 target = kzalloc(sizeof(struct htc_target), GFP_KERNEL); 433 target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
434 if (!target) { 434 if (!target)
435 printk(KERN_ERR "Unable to allocate memory for"
436 "target device\n");
437 return NULL; 435 return NULL;
438 }
439 436
440 init_completion(&target->target_wait); 437 init_completion(&target->target_wait);
441 init_completion(&target->cmd_wait); 438 init_completion(&target->cmd_wait);
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 7e45ca2e78ef..3010cee7b95a 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1533,10 +1533,9 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
1533 1533
1534 /* Create the network device object. */ 1534 /* Create the network device object. */
1535 dev = alloc_etherdev(sizeof(*priv)); 1535 dev = alloc_etherdev(sizeof(*priv));
1536 if (!dev) { 1536 if (!dev)
1537 printk(KERN_ERR "atmel: Couldn't alloc_etherdev\n");
1538 return NULL; 1537 return NULL;
1539 } 1538
1540 if (dev_alloc_name(dev, dev->name) < 0) { 1539 if (dev_alloc_name(dev, dev->name) < 0) {
1541 printk(KERN_ERR "atmel: Couldn't get name!\n"); 1540 printk(KERN_ERR "atmel: Couldn't get name!\n");
1542 goto err_out_free; 1541 goto err_out_free;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index a8bddd81b4d1..aa15cc4269a1 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -347,11 +347,9 @@ static int hfa384x_cmd(struct net_device *dev, u16 cmd, u16 param0,
347 return -EINTR; 347 return -EINTR;
348 348
349 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 349 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
350 if (entry == NULL) { 350 if (entry == NULL)
351 printk(KERN_DEBUG "%s: hfa384x_cmd - kmalloc failed\n",
352 dev->name);
353 return -ENOMEM; 351 return -ENOMEM;
354 } 352
355 atomic_set(&entry->usecnt, 1); 353 atomic_set(&entry->usecnt, 1);
356 entry->type = CMD_SLEEP; 354 entry->type = CMD_SLEEP;
357 entry->cmd = cmd; 355 entry->cmd = cmd;
@@ -515,11 +513,9 @@ static int hfa384x_cmd_callback(struct net_device *dev, u16 cmd, u16 param0,
515 } 513 }
516 514
517 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 515 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
518 if (entry == NULL) { 516 if (entry == NULL)
519 printk(KERN_DEBUG "%s: hfa384x_cmd_callback - kmalloc "
520 "failed\n", dev->name);
521 return -ENOMEM; 517 return -ENOMEM;
522 } 518
523 atomic_set(&entry->usecnt, 1); 519 atomic_set(&entry->usecnt, 1);
524 entry->type = CMD_CALLBACK; 520 entry->type = CMD_CALLBACK;
525 entry->cmd = cmd; 521 entry->cmd = cmd;
@@ -2978,11 +2974,9 @@ static int prism2_set_tim(struct net_device *dev, int aid, int set)
2978 local = iface->local; 2974 local = iface->local;
2979 2975
2980 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC); 2976 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
2981 if (new_entry == NULL) { 2977 if (new_entry == NULL)
2982 printk(KERN_DEBUG "%s: prism2_set_tim: kmalloc failed\n",
2983 local->dev->name);
2984 return -ENOMEM; 2978 return -ENOMEM;
2985 } 2979
2986 new_entry->aid = aid; 2980 new_entry->aid = aid;
2987 new_entry->set = set; 2981 new_entry->set = set;
2988 2982
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index a0e5c21d3657..e847737ccc9d 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -3464,11 +3464,8 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv)
3464 priv->msg_buffers = 3464 priv->msg_buffers =
3465 kmalloc(IPW_COMMAND_POOL_SIZE * sizeof(struct ipw2100_tx_packet), 3465 kmalloc(IPW_COMMAND_POOL_SIZE * sizeof(struct ipw2100_tx_packet),
3466 GFP_KERNEL); 3466 GFP_KERNEL);
3467 if (!priv->msg_buffers) { 3467 if (!priv->msg_buffers)
3468 printk(KERN_ERR DRV_NAME ": %s: PCI alloc failed for msg "
3469 "buffers.\n", priv->net_dev->name);
3470 return -ENOMEM; 3468 return -ENOMEM;
3471 }
3472 3469
3473 for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) { 3470 for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) {
3474 v = pci_alloc_consistent(priv->pci_dev, 3471 v = pci_alloc_consistent(priv->pci_dev,
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index d5ef696298ee..3adb24021a28 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -150,10 +150,9 @@ struct net_device *alloc_libipw(int sizeof_priv, int monitor)
150 LIBIPW_DEBUG_INFO("Initializing...\n"); 150 LIBIPW_DEBUG_INFO("Initializing...\n");
151 151
152 dev = alloc_etherdev(sizeof(struct libipw_device) + sizeof_priv); 152 dev = alloc_etherdev(sizeof(struct libipw_device) + sizeof_priv);
153 if (!dev) { 153 if (!dev)
154 LIBIPW_ERROR("Unable to allocate network device.\n");
155 goto failed; 154 goto failed;
156 } 155
157 ieee = netdev_priv(dev); 156 ieee = netdev_priv(dev);
158 157
159 ieee->dev = dev; 158 ieee->dev = dev;
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 3f7bf4d912b6..234ee88dec95 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -815,10 +815,9 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
815 lbs_deb_enter(LBS_DEB_CS); 815 lbs_deb_enter(LBS_DEB_CS);
816 816
817 card = kzalloc(sizeof(struct if_cs_card), GFP_KERNEL); 817 card = kzalloc(sizeof(struct if_cs_card), GFP_KERNEL);
818 if (!card) { 818 if (!card)
819 pr_err("error in kzalloc\n");
820 goto out; 819 goto out;
821 } 820
822 card->p_dev = p_dev; 821 card->p_dev = p_dev;
823 p_dev->priv = card; 822 p_dev->priv = card;
824 823
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index b5fbbc7947df..74da5f1ea243 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -261,10 +261,8 @@ static int if_usb_probe(struct usb_interface *intf,
261 udev = interface_to_usbdev(intf); 261 udev = interface_to_usbdev(intf);
262 262
263 cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL); 263 cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL);
264 if (!cardp) { 264 if (!cardp)
265 pr_err("Out of memory allocating private data\n");
266 goto error; 265 goto error;
267 }
268 266
269 setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp); 267 setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp);
270 init_waitqueue_head(&cardp->fw_wq); 268 init_waitqueue_head(&cardp->fw_wq);
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index aff8b5743af0..7ced130f4f9e 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -153,10 +153,8 @@ static int if_usb_probe(struct usb_interface *intf,
153 udev = interface_to_usbdev(intf); 153 udev = interface_to_usbdev(intf);
154 154
155 cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL); 155 cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL);
156 if (!cardp) { 156 if (!cardp)
157 pr_err("Out of memory allocating private data.\n");
158 goto error; 157 goto error;
159 }
160 158
161 setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp); 159 setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp);
162 init_waitqueue_head(&cardp->fw_wq); 160 init_waitqueue_head(&cardp->fw_wq);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 405350940a45..f4fbad95d3e3 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -86,10 +86,8 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
86 pdev->vendor, pdev->device, pdev->revision); 86 pdev->vendor, pdev->device, pdev->revision);
87 87
88 card = kzalloc(sizeof(struct pcie_service_card), GFP_KERNEL); 88 card = kzalloc(sizeof(struct pcie_service_card), GFP_KERNEL);
89 if (!card) { 89 if (!card)
90 pr_err("%s: failed to alloc memory\n", __func__);
91 return -ENOMEM; 90 return -ENOMEM;
92 }
93 91
94 card->dev = pdev; 92 card->dev = pdev;
95 93
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index d39d8457f252..835902750231 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -70,10 +70,8 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
70 func->vendor, func->device, func->class, func->num); 70 func->vendor, func->device, func->class, func->num);
71 71
72 card = kzalloc(sizeof(struct sdio_mmc_card), GFP_KERNEL); 72 card = kzalloc(sizeof(struct sdio_mmc_card), GFP_KERNEL);
73 if (!card) { 73 if (!card)
74 pr_err("%s: failed to alloc memory\n", __func__);
75 return -ENOMEM; 74 return -ENOMEM;
76 }
77 75
78 card->func = func; 76 card->func = func;
79 77
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 9fb77d0319f5..dd6c64ac406e 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -941,11 +941,9 @@ void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw)
941 941
942 /* Add desc and skb to rx queue */ 942 /* Add desc and skb to rx queue */
943 rx_data = kzalloc(sizeof(*rx_data), GFP_ATOMIC); 943 rx_data = kzalloc(sizeof(*rx_data), GFP_ATOMIC);
944 if (!rx_data) { 944 if (!rx_data)
945 printk(KERN_WARNING "%s: Can't allocate RX packet\n",
946 dev->name);
947 goto drop; 945 goto drop;
948 } 946
949 rx_data->desc = desc; 947 rx_data->desc = desc;
950 rx_data->skb = skb; 948 rx_data->skb = skb;
951 list_add_tail(&rx_data->list, &priv->rx_list); 949 list_add_tail(&rx_data->list, &priv->rx_list);
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index a5224f6160e4..851fa10241e1 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -192,11 +192,9 @@ islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid,
192 192
193 err = -ENOMEM; 193 err = -ENOMEM;
194 p = buf.mem = kmalloc(frag_len, GFP_KERNEL); 194 p = buf.mem = kmalloc(frag_len, GFP_KERNEL);
195 if (!buf.mem) { 195 if (!buf.mem)
196 printk(KERN_DEBUG "%s: cannot allocate mgmt frame\n",
197 ndev->name);
198 goto error; 196 goto error;
199 } 197
200 buf.size = frag_len; 198 buf.size = frag_len;
201 199
202 /* create the header directly in the fragment data area */ 200 /* create the header directly in the fragment data area */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 78723cf59491..36140ccf2abd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -186,11 +186,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
186 186
187 /* for firmware buf */ 187 /* for firmware buf */
188 rtlpriv->rtlhal.pfirmware = vzalloc(sizeof(struct rt_firmware)); 188 rtlpriv->rtlhal.pfirmware = vzalloc(sizeof(struct rt_firmware));
189 if (!rtlpriv->rtlhal.pfirmware) { 189 if (!rtlpriv->rtlhal.pfirmware)
190 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
191 ("Can't alloc buffer for fw.\n"));
192 return 1; 190 return 1;
193 }
194 191
195 pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n" 192 pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n"
196 "Loading firmware %s\n", rtlpriv->cfg->fw_name); 193 "Loading firmware %s\n", rtlpriv->cfg->fw_name);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 59effac15f36..2596401308a8 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1639,10 +1639,8 @@ static int __init netback_init(void)
1639 1639
1640 xen_netbk_group_nr = num_online_cpus(); 1640 xen_netbk_group_nr = num_online_cpus();
1641 xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr); 1641 xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
1642 if (!xen_netbk) { 1642 if (!xen_netbk)
1643 printk(KERN_ALERT "%s: out of memory\n", __func__);
1644 return -ENOMEM; 1643 return -ENOMEM;
1645 }
1646 1644
1647 for (group = 0; group < xen_netbk_group_nr; group++) { 1645 for (group = 0; group < xen_netbk_group_nr; group++) {
1648 struct xen_netbk *netbk = &xen_netbk[group]; 1646 struct xen_netbk *netbk = &xen_netbk[group];
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 698b905058dd..b16175032327 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -489,6 +489,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
489 int frags = skb_shinfo(skb)->nr_frags; 489 int frags = skb_shinfo(skb)->nr_frags;
490 unsigned int offset = offset_in_page(data); 490 unsigned int offset = offset_in_page(data);
491 unsigned int len = skb_headlen(skb); 491 unsigned int len = skb_headlen(skb);
492 unsigned long flags;
492 493
493 frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); 494 frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
494 if (unlikely(frags > MAX_SKB_FRAGS + 1)) { 495 if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
@@ -498,12 +499,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
498 goto drop; 499 goto drop;
499 } 500 }
500 501
501 spin_lock_irq(&np->tx_lock); 502 spin_lock_irqsave(&np->tx_lock, flags);
502 503
503 if (unlikely(!netif_carrier_ok(dev) || 504 if (unlikely(!netif_carrier_ok(dev) ||
504 (frags > 1 && !xennet_can_sg(dev)) || 505 (frags > 1 && !xennet_can_sg(dev)) ||
505 netif_needs_gso(skb, netif_skb_features(skb)))) { 506 netif_needs_gso(skb, netif_skb_features(skb)))) {
506 spin_unlock_irq(&np->tx_lock); 507 spin_unlock_irqrestore(&np->tx_lock, flags);
507 goto drop; 508 goto drop;
508 } 509 }
509 510
@@ -574,7 +575,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
574 if (!netfront_tx_slot_available(np)) 575 if (!netfront_tx_slot_available(np))
575 netif_stop_queue(dev); 576 netif_stop_queue(dev);
576 577
577 spin_unlock_irq(&np->tx_lock); 578 spin_unlock_irqrestore(&np->tx_lock, flags);
578 579
579 return NETDEV_TX_OK; 580 return NETDEV_TX_OK;
580 581
@@ -1228,6 +1229,33 @@ static int xennet_set_features(struct net_device *dev,
1228 return 0; 1229 return 0;
1229} 1230}
1230 1231
1232static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1233{
1234 struct net_device *dev = dev_id;
1235 struct netfront_info *np = netdev_priv(dev);
1236 unsigned long flags;
1237
1238 spin_lock_irqsave(&np->tx_lock, flags);
1239
1240 if (likely(netif_carrier_ok(dev))) {
1241 xennet_tx_buf_gc(dev);
1242 /* Under tx_lock: protects access to rx shared-ring indexes. */
1243 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
1244 napi_schedule(&np->napi);
1245 }
1246
1247 spin_unlock_irqrestore(&np->tx_lock, flags);
1248
1249 return IRQ_HANDLED;
1250}
1251
1252#ifdef CONFIG_NET_POLL_CONTROLLER
1253static void xennet_poll_controller(struct net_device *dev)
1254{
1255 xennet_interrupt(0, dev);
1256}
1257#endif
1258
1231static const struct net_device_ops xennet_netdev_ops = { 1259static const struct net_device_ops xennet_netdev_ops = {
1232 .ndo_open = xennet_open, 1260 .ndo_open = xennet_open,
1233 .ndo_uninit = xennet_uninit, 1261 .ndo_uninit = xennet_uninit,
@@ -1239,6 +1267,9 @@ static const struct net_device_ops xennet_netdev_ops = {
1239 .ndo_validate_addr = eth_validate_addr, 1267 .ndo_validate_addr = eth_validate_addr,
1240 .ndo_fix_features = xennet_fix_features, 1268 .ndo_fix_features = xennet_fix_features,
1241 .ndo_set_features = xennet_set_features, 1269 .ndo_set_features = xennet_set_features,
1270#ifdef CONFIG_NET_POLL_CONTROLLER
1271 .ndo_poll_controller = xennet_poll_controller,
1272#endif
1242}; 1273};
1243 1274
1244static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) 1275static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
@@ -1248,11 +1279,8 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
1248 struct netfront_info *np; 1279 struct netfront_info *np;
1249 1280
1250 netdev = alloc_etherdev(sizeof(struct netfront_info)); 1281 netdev = alloc_etherdev(sizeof(struct netfront_info));
1251 if (!netdev) { 1282 if (!netdev)
1252 printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
1253 __func__);
1254 return ERR_PTR(-ENOMEM); 1283 return ERR_PTR(-ENOMEM);
1255 }
1256 1284
1257 np = netdev_priv(netdev); 1285 np = netdev_priv(netdev);
1258 np->xbdev = dev; 1286 np->xbdev = dev;
@@ -1448,26 +1476,6 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1448 return 0; 1476 return 0;
1449} 1477}
1450 1478
1451static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1452{
1453 struct net_device *dev = dev_id;
1454 struct netfront_info *np = netdev_priv(dev);
1455 unsigned long flags;
1456
1457 spin_lock_irqsave(&np->tx_lock, flags);
1458
1459 if (likely(netif_carrier_ok(dev))) {
1460 xennet_tx_buf_gc(dev);
1461 /* Under tx_lock: protects access to rx shared-ring indexes. */
1462 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
1463 napi_schedule(&np->napi);
1464 }
1465
1466 spin_unlock_irqrestore(&np->tx_lock, flags);
1467
1468 return IRQ_HANDLED;
1469}
1470
1471static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) 1479static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1472{ 1480{
1473 struct xen_netif_tx_sring *txs; 1481 struct xen_netif_tx_sring *txs;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 9648e4e68337..25cd3799a76c 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -28,6 +28,8 @@
28 28
29#include <net/ip.h> 29#include <net/ip.h>
30#include <net/arp.h> 30#include <net/arp.h>
31#include <net/route.h>
32#include <net/ip6_fib.h>
31#include <net/ip6_checksum.h> 33#include <net/ip6_checksum.h>
32#include <net/iucv/af_iucv.h> 34#include <net/iucv/af_iucv.h>
33 35
@@ -2832,7 +2834,6 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
2832static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, 2834static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2833 struct sk_buff *skb, int ipv, int cast_type) 2835 struct sk_buff *skb, int ipv, int cast_type)
2834{ 2836{
2835 struct neighbour *n = NULL;
2836 struct dst_entry *dst; 2837 struct dst_entry *dst;
2837 2838
2838 memset(hdr, 0, sizeof(struct qeth_hdr)); 2839 memset(hdr, 0, sizeof(struct qeth_hdr));
@@ -2855,33 +2856,29 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2855 2856
2856 rcu_read_lock(); 2857 rcu_read_lock();
2857 dst = skb_dst(skb); 2858 dst = skb_dst(skb);
2858 if (dst)
2859 n = dst_get_neighbour_noref(dst);
2860 if (ipv == 4) { 2859 if (ipv == 4) {
2860 struct rtable *rt = (struct rtable *) dst;
2861 __be32 *pkey = &ip_hdr(skb)->daddr;
2862
2863 if (rt->rt_gateway)
2864 pkey = &rt->rt_gateway;
2865
2861 /* IPv4 */ 2866 /* IPv4 */
2862 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type); 2867 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
2863 memset(hdr->hdr.l3.dest_addr, 0, 12); 2868 memset(hdr->hdr.l3.dest_addr, 0, 12);
2864 if (n) { 2869 *((__be32 *) (&hdr->hdr.l3.dest_addr[12])) = *pkey;
2865 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
2866 *((u32 *) n->primary_key);
2867 } else {
2868 /* fill in destination address used in ip header */
2869 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
2870 ip_hdr(skb)->daddr;
2871 }
2872 } else if (ipv == 6) { 2870 } else if (ipv == 6) {
2871 struct rt6_info *rt = (struct rt6_info *) dst;
2872 struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
2873
2874 if (!ipv6_addr_any(&rt->rt6i_gateway))
2875 pkey = &rt->rt6i_gateway;
2876
2873 /* IPv6 */ 2877 /* IPv6 */
2874 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type); 2878 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type);
2875 if (card->info.type == QETH_CARD_TYPE_IQD) 2879 if (card->info.type == QETH_CARD_TYPE_IQD)
2876 hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU; 2880 hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
2877 if (n) { 2881 memcpy(hdr->hdr.l3.dest_addr, pkey, 16);
2878 memcpy(hdr->hdr.l3.dest_addr,
2879 n->primary_key, 16);
2880 } else {
2881 /* fill in destination address used in ip header */
2882 memcpy(hdr->hdr.l3.dest_addr,
2883 &ipv6_hdr(skb)->daddr, 16);
2884 }
2885 } else { 2882 } else {
2886 /* passthrough */ 2883 /* passthrough */
2887 if ((skb->dev->type == ARPHRD_IEEE802_TR) && 2884 if ((skb->dev->type == ARPHRD_IEEE802_TR) &&
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 52e48959cfa1..a390e9d54827 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -237,22 +237,8 @@ struct netlink_notify {
237 int protocol; 237 int protocol;
238}; 238};
239 239
240static __inline__ struct nlmsghdr * 240struct nlmsghdr *
241__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags) 241__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags);
242{
243 struct nlmsghdr *nlh;
244 int size = NLMSG_LENGTH(len);
245
246 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
247 nlh->nlmsg_type = type;
248 nlh->nlmsg_len = size;
249 nlh->nlmsg_flags = flags;
250 nlh->nlmsg_pid = pid;
251 nlh->nlmsg_seq = seq;
252 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
253 memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
254 return nlh;
255}
256 242
257#define NLMSG_NEW(skb, pid, seq, type, len, flags) \ 243#define NLMSG_NEW(skb, pid, seq, type, len, flags) \
258({ if (unlikely(skb_tailroom(skb) < (int)NLMSG_SPACE(len))) \ 244({ if (unlikely(skb_tailroom(skb) < (int)NLMSG_SPACE(len))) \
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index c1241c428179..8ee8af4e6da9 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -232,6 +232,7 @@ enum
232 LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */ 232 LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */
233 LINUX_MIB_TCPREQQFULLDOCOOKIES, /* TCPReqQFullDoCookies */ 233 LINUX_MIB_TCPREQQFULLDOCOOKIES, /* TCPReqQFullDoCookies */
234 LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */ 234 LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */
235 LINUX_MIB_TCPRETRANSFAIL, /* TCPRetransFail */
235 __LINUX_MIB_MAX 236 __LINUX_MIB_MAX
236}; 237};
237 238
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 46a85c9e1f25..115389e9b945 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -463,7 +463,7 @@ struct tcp_sock {
463 const struct tcp_sock_af_ops *af_specific; 463 const struct tcp_sock_af_ops *af_specific;
464 464
465/* TCP MD5 Signature Option information */ 465/* TCP MD5 Signature Option information */
466 struct tcp_md5sig_info *md5sig_info; 466 struct tcp_md5sig_info __rcu *md5sig_info;
467#endif 467#endif
468 468
469 /* When the cookie options are generated and exchanged, then this 469 /* When the cookie options are generated and exchanged, then this
@@ -486,8 +486,7 @@ struct tcp_timewait_sock {
486 u32 tw_ts_recent; 486 u32 tw_ts_recent;
487 long tw_ts_recent_stamp; 487 long tw_ts_recent_stamp;
488#ifdef CONFIG_TCP_MD5SIG 488#ifdef CONFIG_TCP_MD5SIG
489 u16 tw_md5_keylen; 489 struct tcp_md5sig_key *tw_md5_key;
490 u8 tw_md5_key[TCP_MD5SIG_MAXKEYLEN];
491#endif 490#endif
492 /* Few sockets in timewait have cookies; in that case, then this 491 /* Few sockets in timewait have cookies; in that case, then this
493 * object holds a reference to them (tw_cookie_values->kref). 492 * object holds a reference to them (tw_cookie_values->kref).
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index f68dce2d8d88..757a17638b1b 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -160,7 +160,6 @@ extern void addrconf_prefix_rcv(struct net_device *dev,
160extern int ipv6_sock_ac_join(struct sock *sk,int ifindex, const struct in6_addr *addr); 160extern int ipv6_sock_ac_join(struct sock *sk,int ifindex, const struct in6_addr *addr);
161extern int ipv6_sock_ac_drop(struct sock *sk,int ifindex, const struct in6_addr *addr); 161extern int ipv6_sock_ac_drop(struct sock *sk,int ifindex, const struct in6_addr *addr);
162extern void ipv6_sock_ac_close(struct sock *sk); 162extern void ipv6_sock_ac_close(struct sock *sk);
163extern int inet6_ac_check(struct sock *sk, const struct in6_addr *addr, int ifindex);
164 163
165extern int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr); 164extern int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr);
166extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr); 165extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
index 8d552519ff67..6db8ecf52aa2 100644
--- a/include/net/caif/caif_hsi.h
+++ b/include/net/caif/caif_hsi.h
@@ -138,6 +138,7 @@ struct cfhsi {
138 u8 *rx_ptr; 138 u8 *rx_ptr;
139 u8 *tx_buf; 139 u8 *tx_buf;
140 u8 *rx_buf; 140 u8 *rx_buf;
141 u8 *rx_flip_buf;
141 spinlock_t lock; 142 spinlock_t lock;
142 int flow_off_sent; 143 int flow_off_sent;
143 u32 q_low_mark; 144 u32 q_low_mark;
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 7db32995ccd3..ccb68880abf5 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -131,35 +131,8 @@ extern void genl_unregister_mc_group(struct genl_family *family,
131extern void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, 131extern void genl_notify(struct sk_buff *skb, struct net *net, u32 pid,
132 u32 group, struct nlmsghdr *nlh, gfp_t flags); 132 u32 group, struct nlmsghdr *nlh, gfp_t flags);
133 133
134/** 134void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
135 * genlmsg_put - Add generic netlink header to netlink message 135 struct genl_family *family, int flags, u8 cmd);
136 * @skb: socket buffer holding the message
137 * @pid: netlink pid the message is addressed to
138 * @seq: sequence number (usually the one of the sender)
139 * @family: generic netlink family
140 * @flags netlink message flags
141 * @cmd: generic netlink command
142 *
143 * Returns pointer to user specific header
144 */
145static inline void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
146 struct genl_family *family, int flags, u8 cmd)
147{
148 struct nlmsghdr *nlh;
149 struct genlmsghdr *hdr;
150
151 nlh = nlmsg_put(skb, pid, seq, family->id, GENL_HDRLEN +
152 family->hdrsize, flags);
153 if (nlh == NULL)
154 return NULL;
155
156 hdr = nlmsg_data(nlh);
157 hdr->cmd = cmd;
158 hdr->version = family->version;
159 hdr->reserved = 0;
160
161 return (char *) hdr + GENL_HDRLEN;
162}
163 136
164/** 137/**
165 * genlmsg_nlhdr - Obtain netlink header from user specified header 138 * genlmsg_nlhdr - Obtain netlink header from user specified header
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index e3133c23980e..6f9c25a76cd1 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -133,7 +133,6 @@ extern void ndisc_send_rs(struct net_device *dev,
133 const struct in6_addr *daddr); 133 const struct in6_addr *daddr);
134 134
135extern void ndisc_send_redirect(struct sk_buff *skb, 135extern void ndisc_send_redirect(struct sk_buff *skb,
136 struct neighbour *neigh,
137 const struct in6_addr *target); 136 const struct in6_addr *target);
138 137
139extern int ndisc_mc_map(const struct in6_addr *addr, char *buf, 138extern int ndisc_mc_map(const struct in6_addr *addr, char *buf,
diff --git a/include/net/netlink.h b/include/net/netlink.h
index cb1f3504687f..f394fe5d7641 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -441,41 +441,6 @@ static inline int nlmsg_report(const struct nlmsghdr *nlh)
441 nla_for_each_attr(pos, nlmsg_attrdata(nlh, hdrlen), \ 441 nla_for_each_attr(pos, nlmsg_attrdata(nlh, hdrlen), \
442 nlmsg_attrlen(nlh, hdrlen), rem) 442 nlmsg_attrlen(nlh, hdrlen), rem)
443 443
444#if 0
445/* FIXME: Enable once all users have been converted */
446
447/**
448 * __nlmsg_put - Add a new netlink message to an skb
449 * @skb: socket buffer to store message in
450 * @pid: netlink process id
451 * @seq: sequence number of message
452 * @type: message type
453 * @payload: length of message payload
454 * @flags: message flags
455 *
456 * The caller is responsible to ensure that the skb provides enough
457 * tailroom for both the netlink header and payload.
458 */
459static inline struct nlmsghdr *__nlmsg_put(struct sk_buff *skb, u32 pid,
460 u32 seq, int type, int payload,
461 int flags)
462{
463 struct nlmsghdr *nlh;
464
465 nlh = (struct nlmsghdr *) skb_put(skb, nlmsg_total_size(payload));
466 nlh->nlmsg_type = type;
467 nlh->nlmsg_len = nlmsg_msg_size(payload);
468 nlh->nlmsg_flags = flags;
469 nlh->nlmsg_pid = pid;
470 nlh->nlmsg_seq = seq;
471
472 memset((unsigned char *) nlmsg_data(nlh) + payload, 0,
473 nlmsg_padlen(payload));
474
475 return nlh;
476}
477#endif
478
479/** 444/**
480 * nlmsg_put - Add a new netlink message to an skb 445 * nlmsg_put - Add a new netlink message to an skb
481 * @skb: socket buffer to store message in 446 * @skb: socket buffer to store message in
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 42c29bfbcee3..6b2acfce4cfd 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1138,35 +1138,27 @@ static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1138/* MD5 Signature */ 1138/* MD5 Signature */
1139struct crypto_hash; 1139struct crypto_hash;
1140 1140
1141union tcp_md5_addr {
1142 struct in_addr a4;
1143#if IS_ENABLED(CONFIG_IPV6)
1144 struct in6_addr a6;
1145#endif
1146};
1147
1141/* - key database */ 1148/* - key database */
1142struct tcp_md5sig_key { 1149struct tcp_md5sig_key {
1143 u8 *key; 1150 struct hlist_node node;
1144 u8 keylen; 1151 u8 keylen;
1145}; 1152 u8 family; /* AF_INET or AF_INET6 */
1146 1153 union tcp_md5_addr addr;
1147struct tcp4_md5sig_key { 1154 u8 key[TCP_MD5SIG_MAXKEYLEN];
1148 struct tcp_md5sig_key base; 1155 struct rcu_head rcu;
1149 __be32 addr;
1150};
1151
1152struct tcp6_md5sig_key {
1153 struct tcp_md5sig_key base;
1154#if 0
1155 u32 scope_id; /* XXX */
1156#endif
1157 struct in6_addr addr;
1158}; 1156};
1159 1157
1160/* - sock block */ 1158/* - sock block */
1161struct tcp_md5sig_info { 1159struct tcp_md5sig_info {
1162 struct tcp4_md5sig_key *keys4; 1160 struct hlist_head head;
1163#if IS_ENABLED(CONFIG_IPV6) 1161 struct rcu_head rcu;
1164 struct tcp6_md5sig_key *keys6;
1165 u32 entries6;
1166 u32 alloced6;
1167#endif
1168 u32 entries4;
1169 u32 alloced4;
1170}; 1162};
1171 1163
1172/* - pseudo header */ 1164/* - pseudo header */
@@ -1203,19 +1195,25 @@ extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1203 const struct sock *sk, 1195 const struct sock *sk,
1204 const struct request_sock *req, 1196 const struct request_sock *req,
1205 const struct sk_buff *skb); 1197 const struct sk_buff *skb);
1206extern struct tcp_md5sig_key * tcp_v4_md5_lookup(struct sock *sk, 1198extern int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1207 struct sock *addr_sk); 1199 int family, const u8 *newkey,
1208extern int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, u8 *newkey, 1200 u8 newkeylen, gfp_t gfp);
1209 u8 newkeylen); 1201extern int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1210extern int tcp_v4_md5_do_del(struct sock *sk, __be32 addr); 1202 int family);
1203extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1204 struct sock *addr_sk);
1211 1205
1212#ifdef CONFIG_TCP_MD5SIG 1206#ifdef CONFIG_TCP_MD5SIG
1213#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_keylen ? \ 1207extern struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1214 &(struct tcp_md5sig_key) { \ 1208 const union tcp_md5_addr *addr, int family);
1215 .key = (twsk)->tw_md5_key, \ 1209#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
1216 .keylen = (twsk)->tw_md5_keylen, \
1217 } : NULL)
1218#else 1210#else
1211static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1212 const union tcp_md5_addr *addr,
1213 int family)
1214{
1215 return NULL;
1216}
1219#define tcp_twsk_md5_key(twsk) NULL 1217#define tcp_twsk_md5_key(twsk) NULL
1220#endif 1218#endif
1221 1219
@@ -1470,10 +1468,6 @@ struct tcp_sock_af_ops {
1470 const struct sock *sk, 1468 const struct sock *sk,
1471 const struct request_sock *req, 1469 const struct request_sock *req,
1472 const struct sk_buff *skb); 1470 const struct sk_buff *skb);
1473 int (*md5_add) (struct sock *sk,
1474 struct sock *addr_sk,
1475 u8 *newkey,
1476 u8 len);
1477 int (*md5_parse) (struct sock *sk, 1471 int (*md5_parse) (struct sock *sk,
1478 char __user *optval, 1472 char __user *optval,
1479 int optlen); 1473 int optlen);
diff --git a/net/atm/clip.c b/net/atm/clip.c
index c12c2582457c..ef95a30306fa 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -328,6 +328,8 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
328 struct atmarp_entry *entry; 328 struct atmarp_entry *entry;
329 struct neighbour *n; 329 struct neighbour *n;
330 struct atm_vcc *vcc; 330 struct atm_vcc *vcc;
331 struct rtable *rt;
332 __be32 *daddr;
331 int old; 333 int old;
332 unsigned long flags; 334 unsigned long flags;
333 335
@@ -338,7 +340,12 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
338 dev->stats.tx_dropped++; 340 dev->stats.tx_dropped++;
339 return NETDEV_TX_OK; 341 return NETDEV_TX_OK;
340 } 342 }
341 n = dst_get_neighbour_noref(dst); 343 rt = (struct rtable *) dst;
344 if (rt->rt_gateway)
345 daddr = &rt->rt_gateway;
346 else
347 daddr = &ip_hdr(skb)->daddr;
348 n = dst_neigh_lookup(dst, daddr);
342 if (!n) { 349 if (!n) {
343 pr_err("NO NEIGHBOUR !\n"); 350 pr_err("NO NEIGHBOUR !\n");
344 dev_kfree_skb(skb); 351 dev_kfree_skb(skb);
@@ -358,7 +365,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
358 dev_kfree_skb(skb); 365 dev_kfree_skb(skb);
359 dev->stats.tx_dropped++; 366 dev->stats.tx_dropped++;
360 } 367 }
361 return NETDEV_TX_OK; 368 goto out_release_neigh;
362 } 369 }
363 pr_debug("neigh %p, vccs %p\n", entry, entry->vccs); 370 pr_debug("neigh %p, vccs %p\n", entry, entry->vccs);
364 ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc; 371 ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
@@ -377,14 +384,14 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
377 old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ 384 old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
378 if (old) { 385 if (old) {
379 pr_warning("XOFF->XOFF transition\n"); 386 pr_warning("XOFF->XOFF transition\n");
380 return NETDEV_TX_OK; 387 goto out_release_neigh;
381 } 388 }
382 dev->stats.tx_packets++; 389 dev->stats.tx_packets++;
383 dev->stats.tx_bytes += skb->len; 390 dev->stats.tx_bytes += skb->len;
384 vcc->send(vcc, skb); 391 vcc->send(vcc, skb);
385 if (atm_may_send(vcc, 0)) { 392 if (atm_may_send(vcc, 0)) {
386 entry->vccs->xoff = 0; 393 entry->vccs->xoff = 0;
387 return NETDEV_TX_OK; 394 goto out_release_neigh;
388 } 395 }
389 spin_lock_irqsave(&clip_priv->xoff_lock, flags); 396 spin_lock_irqsave(&clip_priv->xoff_lock, flags);
390 netif_stop_queue(dev); /* XOFF -> throttle immediately */ 397 netif_stop_queue(dev); /* XOFF -> throttle immediately */
@@ -396,6 +403,8 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
396 of the brief netif_stop_queue. If this isn't true or if it 403 of the brief netif_stop_queue. If this isn't true or if it
397 changes, use netif_wake_queue instead. */ 404 changes, use netif_wake_queue instead. */
398 spin_unlock_irqrestore(&clip_priv->xoff_lock, flags); 405 spin_unlock_irqrestore(&clip_priv->xoff_lock, flags);
406out_release_neigh:
407 neigh_release(n);
399 return NETDEV_TX_OK; 408 return NETDEV_TX_OK;
400} 409}
401 410
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index a97d97a3a512..5016fa57b623 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -43,34 +43,9 @@ enum caif_states {
43#define TX_FLOW_ON_BIT 1 43#define TX_FLOW_ON_BIT 1
44#define RX_FLOW_ON_BIT 2 44#define RX_FLOW_ON_BIT 2
45 45
46static struct dentry *debugfsdir;
47
48#ifdef CONFIG_DEBUG_FS
49struct debug_fs_counter {
50 atomic_t caif_nr_socks;
51 atomic_t caif_sock_create;
52 atomic_t num_connect_req;
53 atomic_t num_connect_resp;
54 atomic_t num_connect_fail_resp;
55 atomic_t num_disconnect;
56 atomic_t num_remote_shutdown_ind;
57 atomic_t num_tx_flow_off_ind;
58 atomic_t num_tx_flow_on_ind;
59 atomic_t num_rx_flow_off;
60 atomic_t num_rx_flow_on;
61};
62static struct debug_fs_counter cnt;
63#define dbfs_atomic_inc(v) atomic_inc_return(v)
64#define dbfs_atomic_dec(v) atomic_dec_return(v)
65#else
66#define dbfs_atomic_inc(v) 0
67#define dbfs_atomic_dec(v) 0
68#endif
69
70struct caifsock { 46struct caifsock {
71 struct sock sk; /* must be first member */ 47 struct sock sk; /* must be first member */
72 struct cflayer layer; 48 struct cflayer layer;
73 char name[CAIF_LAYER_NAME_SZ]; /* Used for debugging */
74 u32 flow_state; 49 u32 flow_state;
75 struct caif_connect_request conn_req; 50 struct caif_connect_request conn_req;
76 struct mutex readlock; 51 struct mutex readlock;
@@ -161,7 +136,6 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
161 atomic_read(&cf_sk->sk.sk_rmem_alloc), 136 atomic_read(&cf_sk->sk.sk_rmem_alloc),
162 sk_rcvbuf_lowwater(cf_sk)); 137 sk_rcvbuf_lowwater(cf_sk));
163 set_rx_flow_off(cf_sk); 138 set_rx_flow_off(cf_sk);
164 dbfs_atomic_inc(&cnt.num_rx_flow_off);
165 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 139 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
166 } 140 }
167 141
@@ -172,7 +146,6 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
172 set_rx_flow_off(cf_sk); 146 set_rx_flow_off(cf_sk);
173 if (net_ratelimit()) 147 if (net_ratelimit())
174 pr_debug("sending flow OFF due to rmem_schedule\n"); 148 pr_debug("sending flow OFF due to rmem_schedule\n");
175 dbfs_atomic_inc(&cnt.num_rx_flow_off);
176 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 149 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
177 } 150 }
178 skb->dev = NULL; 151 skb->dev = NULL;
@@ -233,14 +206,12 @@ static void caif_ctrl_cb(struct cflayer *layr,
233 switch (flow) { 206 switch (flow) {
234 case CAIF_CTRLCMD_FLOW_ON_IND: 207 case CAIF_CTRLCMD_FLOW_ON_IND:
235 /* OK from modem to start sending again */ 208 /* OK from modem to start sending again */
236 dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
237 set_tx_flow_on(cf_sk); 209 set_tx_flow_on(cf_sk);
238 cf_sk->sk.sk_state_change(&cf_sk->sk); 210 cf_sk->sk.sk_state_change(&cf_sk->sk);
239 break; 211 break;
240 212
241 case CAIF_CTRLCMD_FLOW_OFF_IND: 213 case CAIF_CTRLCMD_FLOW_OFF_IND:
242 /* Modem asks us to shut up */ 214 /* Modem asks us to shut up */
243 dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
244 set_tx_flow_off(cf_sk); 215 set_tx_flow_off(cf_sk);
245 cf_sk->sk.sk_state_change(&cf_sk->sk); 216 cf_sk->sk.sk_state_change(&cf_sk->sk);
246 break; 217 break;
@@ -249,7 +220,6 @@ static void caif_ctrl_cb(struct cflayer *layr,
249 /* We're now connected */ 220 /* We're now connected */
250 caif_client_register_refcnt(&cf_sk->layer, 221 caif_client_register_refcnt(&cf_sk->layer,
251 cfsk_hold, cfsk_put); 222 cfsk_hold, cfsk_put);
252 dbfs_atomic_inc(&cnt.num_connect_resp);
253 cf_sk->sk.sk_state = CAIF_CONNECTED; 223 cf_sk->sk.sk_state = CAIF_CONNECTED;
254 set_tx_flow_on(cf_sk); 224 set_tx_flow_on(cf_sk);
255 cf_sk->sk.sk_state_change(&cf_sk->sk); 225 cf_sk->sk.sk_state_change(&cf_sk->sk);
@@ -263,7 +233,6 @@ static void caif_ctrl_cb(struct cflayer *layr,
263 233
264 case CAIF_CTRLCMD_INIT_FAIL_RSP: 234 case CAIF_CTRLCMD_INIT_FAIL_RSP:
265 /* Connect request failed */ 235 /* Connect request failed */
266 dbfs_atomic_inc(&cnt.num_connect_fail_resp);
267 cf_sk->sk.sk_err = ECONNREFUSED; 236 cf_sk->sk.sk_err = ECONNREFUSED;
268 cf_sk->sk.sk_state = CAIF_DISCONNECTED; 237 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
269 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; 238 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
@@ -277,7 +246,6 @@ static void caif_ctrl_cb(struct cflayer *layr,
277 246
278 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: 247 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
279 /* Modem has closed this connection, or device is down. */ 248 /* Modem has closed this connection, or device is down. */
280 dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
281 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; 249 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
282 cf_sk->sk.sk_err = ECONNRESET; 250 cf_sk->sk.sk_err = ECONNRESET;
283 set_rx_flow_on(cf_sk); 251 set_rx_flow_on(cf_sk);
@@ -297,7 +265,6 @@ static void caif_check_flow_release(struct sock *sk)
297 return; 265 return;
298 266
299 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { 267 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
300 dbfs_atomic_inc(&cnt.num_rx_flow_on);
301 set_rx_flow_on(cf_sk); 268 set_rx_flow_on(cf_sk);
302 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); 269 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
303 } 270 }
@@ -856,7 +823,6 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
856 /*ifindex = id of the interface.*/ 823 /*ifindex = id of the interface.*/
857 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; 824 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
858 825
859 dbfs_atomic_inc(&cnt.num_connect_req);
860 cf_sk->layer.receive = caif_sktrecv_cb; 826 cf_sk->layer.receive = caif_sktrecv_cb;
861 827
862 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req, 828 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
@@ -945,8 +911,6 @@ static int caif_release(struct socket *sock)
945 spin_unlock_bh(&sk->sk_receive_queue.lock); 911 spin_unlock_bh(&sk->sk_receive_queue.lock);
946 sock->sk = NULL; 912 sock->sk = NULL;
947 913
948 dbfs_atomic_inc(&cnt.num_disconnect);
949
950 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir)); 914 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
951 if (cf_sk->debugfs_socket_dir != NULL) 915 if (cf_sk->debugfs_socket_dir != NULL)
952 debugfs_remove_recursive(cf_sk->debugfs_socket_dir); 916 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
@@ -1054,14 +1018,12 @@ static void caif_sock_destructor(struct sock *sk)
1054 return; 1018 return;
1055 } 1019 }
1056 sk_stream_kill_queues(&cf_sk->sk); 1020 sk_stream_kill_queues(&cf_sk->sk);
1057 dbfs_atomic_dec(&cnt.caif_nr_socks);
1058 caif_free_client(&cf_sk->layer); 1021 caif_free_client(&cf_sk->layer);
1059} 1022}
1060 1023
1061static int caif_create(struct net *net, struct socket *sock, int protocol, 1024static int caif_create(struct net *net, struct socket *sock, int protocol,
1062 int kern) 1025 int kern)
1063{ 1026{
1064 int num;
1065 struct sock *sk = NULL; 1027 struct sock *sk = NULL;
1066 struct caifsock *cf_sk = NULL; 1028 struct caifsock *cf_sk = NULL;
1067 static struct proto prot = {.name = "PF_CAIF", 1029 static struct proto prot = {.name = "PF_CAIF",
@@ -1122,34 +1084,6 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
1122 cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL; 1084 cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL;
1123 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; 1085 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
1124 cf_sk->conn_req.protocol = protocol; 1086 cf_sk->conn_req.protocol = protocol;
1125 /* Increase the number of sockets created. */
1126 dbfs_atomic_inc(&cnt.caif_nr_socks);
1127 num = dbfs_atomic_inc(&cnt.caif_sock_create);
1128#ifdef CONFIG_DEBUG_FS
1129 if (!IS_ERR(debugfsdir)) {
1130
1131 /* Fill in some information concerning the misc socket. */
1132 snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d", num);
1133
1134 cf_sk->debugfs_socket_dir =
1135 debugfs_create_dir(cf_sk->name, debugfsdir);
1136
1137 debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR,
1138 cf_sk->debugfs_socket_dir,
1139 (u32 *) &cf_sk->sk.sk_state);
1140 debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR,
1141 cf_sk->debugfs_socket_dir, &cf_sk->flow_state);
1142 debugfs_create_u32("sk_rmem_alloc", S_IRUSR | S_IWUSR,
1143 cf_sk->debugfs_socket_dir,
1144 (u32 *) &cf_sk->sk.sk_rmem_alloc);
1145 debugfs_create_u32("sk_wmem_alloc", S_IRUSR | S_IWUSR,
1146 cf_sk->debugfs_socket_dir,
1147 (u32 *) &cf_sk->sk.sk_wmem_alloc);
1148 debugfs_create_u32("identity", S_IRUSR | S_IWUSR,
1149 cf_sk->debugfs_socket_dir,
1150 (u32 *) &cf_sk->layer.id);
1151 }
1152#endif
1153 release_sock(&cf_sk->sk); 1087 release_sock(&cf_sk->sk);
1154 return 0; 1088 return 0;
1155} 1089}
@@ -1161,7 +1095,7 @@ static struct net_proto_family caif_family_ops = {
1161 .owner = THIS_MODULE, 1095 .owner = THIS_MODULE,
1162}; 1096};
1163 1097
1164static int af_caif_init(void) 1098static int __init caif_sktinit_module(void)
1165{ 1099{
1166 int err = sock_register(&caif_family_ops); 1100 int err = sock_register(&caif_family_ops);
1167 if (!err) 1101 if (!err)
@@ -1169,54 +1103,9 @@ static int af_caif_init(void)
1169 return 0; 1103 return 0;
1170} 1104}
1171 1105
1172static int __init caif_sktinit_module(void)
1173{
1174#ifdef CONFIG_DEBUG_FS
1175 debugfsdir = debugfs_create_dir("caif_sk", NULL);
1176 if (!IS_ERR(debugfsdir)) {
1177 debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR,
1178 debugfsdir,
1179 (u32 *) &cnt.caif_nr_socks);
1180 debugfs_create_u32("num_create", S_IRUSR | S_IWUSR,
1181 debugfsdir,
1182 (u32 *) &cnt.caif_sock_create);
1183 debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR,
1184 debugfsdir,
1185 (u32 *) &cnt.num_connect_req);
1186 debugfs_create_u32("num_connect_resp", S_IRUSR | S_IWUSR,
1187 debugfsdir,
1188 (u32 *) &cnt.num_connect_resp);
1189 debugfs_create_u32("num_connect_fail_resp", S_IRUSR | S_IWUSR,
1190 debugfsdir,
1191 (u32 *) &cnt.num_connect_fail_resp);
1192 debugfs_create_u32("num_disconnect", S_IRUSR | S_IWUSR,
1193 debugfsdir,
1194 (u32 *) &cnt.num_disconnect);
1195 debugfs_create_u32("num_remote_shutdown_ind",
1196 S_IRUSR | S_IWUSR, debugfsdir,
1197 (u32 *) &cnt.num_remote_shutdown_ind);
1198 debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR,
1199 debugfsdir,
1200 (u32 *) &cnt.num_tx_flow_off_ind);
1201 debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR,
1202 debugfsdir,
1203 (u32 *) &cnt.num_tx_flow_on_ind);
1204 debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR,
1205 debugfsdir,
1206 (u32 *) &cnt.num_rx_flow_off);
1207 debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR,
1208 debugfsdir,
1209 (u32 *) &cnt.num_rx_flow_on);
1210 }
1211#endif
1212 return af_caif_init();
1213}
1214
1215static void __exit caif_sktexit_module(void) 1106static void __exit caif_sktexit_module(void)
1216{ 1107{
1217 sock_unregister(PF_CAIF); 1108 sock_unregister(PF_CAIF);
1218 if (debugfsdir != NULL)
1219 debugfs_remove_recursive(debugfsdir);
1220} 1109}
1221module_init(caif_sktinit_module); 1110module_init(caif_sktinit_module);
1222module_exit(caif_sktexit_module); 1111module_exit(caif_sktexit_module);
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 865690948bbc..a751d9b263ed 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -74,7 +74,6 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
74 struct sk_buff *skb; 74 struct sk_buff *skb;
75 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); 75 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
76 int pktlen; 76 int pktlen;
77 int err = 0;
78 const u8 *ip_version; 77 const u8 *ip_version;
79 u8 buf; 78 u8 buf;
80 79
@@ -95,8 +94,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
95 94
96 /* check the version of IP */ 95 /* check the version of IP */
97 ip_version = skb_header_pointer(skb, 0, 1, &buf); 96 ip_version = skb_header_pointer(skb, 0, 1, &buf);
98 if (!ip_version) 97
99 return -EINVAL;
100 switch (*ip_version >> 4) { 98 switch (*ip_version >> 4) {
101 case 4: 99 case 4:
102 skb->protocol = htons(ETH_P_IP); 100 skb->protocol = htons(ETH_P_IP);
@@ -105,6 +103,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
105 skb->protocol = htons(ETH_P_IPV6); 103 skb->protocol = htons(ETH_P_IPV6);
106 break; 104 break;
107 default: 105 default:
106 priv->netdev->stats.rx_errors++;
108 return -EINVAL; 107 return -EINVAL;
109 } 108 }
110 109
@@ -123,7 +122,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
123 priv->netdev->stats.rx_packets++; 122 priv->netdev->stats.rx_packets++;
124 priv->netdev->stats.rx_bytes += pktlen; 123 priv->netdev->stats.rx_bytes += pktlen;
125 124
126 return err; 125 return 0;
127} 126}
128 127
129static int delete_device(struct chnl_net *dev) 128static int delete_device(struct chnl_net *dev)
@@ -221,11 +220,13 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
221 220
222 if (skb->len > priv->netdev->mtu) { 221 if (skb->len > priv->netdev->mtu) {
223 pr_warn("Size of skb exceeded MTU\n"); 222 pr_warn("Size of skb exceeded MTU\n");
223 dev->stats.tx_errors++;
224 return -ENOSPC; 224 return -ENOSPC;
225 } 225 }
226 226
227 if (!priv->flowenabled) { 227 if (!priv->flowenabled) {
228 pr_debug("dropping packets flow off\n"); 228 pr_debug("dropping packets flow off\n");
229 dev->stats.tx_dropped++;
229 return NETDEV_TX_BUSY; 230 return NETDEV_TX_BUSY;
230 } 231 }
231 232
@@ -240,8 +241,7 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
240 /* Send the packet down the stack. */ 241 /* Send the packet down the stack. */
241 result = priv->chnl.dn->transmit(priv->chnl.dn, pkt); 242 result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
242 if (result) { 243 if (result) {
243 if (result == -EAGAIN) 244 dev->stats.tx_dropped++;
244 result = NETDEV_TX_BUSY;
245 return result; 245 return result;
246 } 246 }
247 247
diff --git a/net/core/dev.c b/net/core/dev.c
index 115dee1d985d..f1249472e90e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -446,7 +446,7 @@ void __dev_remove_pack(struct packet_type *pt)
446 } 446 }
447 } 447 }
448 448
449 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt); 449 pr_warn("dev_remove_pack: %p not found\n", pt);
450out: 450out:
451 spin_unlock(&ptype_lock); 451 spin_unlock(&ptype_lock);
452} 452}
@@ -1039,8 +1039,7 @@ rollback:
1039 memcpy(dev->name, oldname, IFNAMSIZ); 1039 memcpy(dev->name, oldname, IFNAMSIZ);
1040 goto rollback; 1040 goto rollback;
1041 } else { 1041 } else {
1042 printk(KERN_ERR 1042 pr_err("%s: name change rollback failed: %d\n",
1043 "%s: name change rollback failed: %d.\n",
1044 dev->name, ret); 1043 dev->name, ret);
1045 } 1044 }
1046 } 1045 }
@@ -1139,9 +1138,8 @@ void dev_load(struct net *net, const char *name)
1139 no_module = request_module("netdev-%s", name); 1138 no_module = request_module("netdev-%s", name);
1140 if (no_module && capable(CAP_SYS_MODULE)) { 1139 if (no_module && capable(CAP_SYS_MODULE)) {
1141 if (!request_module("%s", name)) 1140 if (!request_module("%s", name))
1142 pr_err("Loading kernel module for a network device " 1141 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1143"with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " 1142 name);
1144"instead\n", name);
1145 } 1143 }
1146} 1144}
1147EXPORT_SYMBOL(dev_load); 1145EXPORT_SYMBOL(dev_load);
@@ -1655,10 +1653,9 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1655 if (skb_network_header(skb2) < skb2->data || 1653 if (skb_network_header(skb2) < skb2->data ||
1656 skb2->network_header > skb2->tail) { 1654 skb2->network_header > skb2->tail) {
1657 if (net_ratelimit()) 1655 if (net_ratelimit())
1658 printk(KERN_CRIT "protocol %04x is " 1656 pr_crit("protocol %04x is buggy, dev %s\n",
1659 "buggy, dev %s\n", 1657 ntohs(skb2->protocol),
1660 ntohs(skb2->protocol), 1658 dev->name);
1661 dev->name);
1662 skb_reset_network_header(skb2); 1659 skb_reset_network_header(skb2);
1663 } 1660 }
1664 1661
@@ -1691,9 +1688,7 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1691 1688
1692 /* If TC0 is invalidated disable TC mapping */ 1689 /* If TC0 is invalidated disable TC mapping */
1693 if (tc->offset + tc->count > txq) { 1690 if (tc->offset + tc->count > txq) {
1694 pr_warning("Number of in use tx queues changed " 1691 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1695 "invalidating tc mappings. Priority "
1696 "traffic classification disabled!\n");
1697 dev->num_tc = 0; 1692 dev->num_tc = 0;
1698 return; 1693 return;
1699 } 1694 }
@@ -1704,11 +1699,8 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1704 1699
1705 tc = &dev->tc_to_txq[q]; 1700 tc = &dev->tc_to_txq[q];
1706 if (tc->offset + tc->count > txq) { 1701 if (tc->offset + tc->count > txq) {
1707 pr_warning("Number of in use tx queues " 1702 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1708 "changed. Priority %i to tc " 1703 i, q);
1709 "mapping %i is no longer valid "
1710 "setting map to 0\n",
1711 i, q);
1712 netdev_set_prio_tc_map(dev, i, 0); 1704 netdev_set_prio_tc_map(dev, i, 0);
1713 } 1705 }
1714 } 1706 }
@@ -2014,8 +2006,7 @@ EXPORT_SYMBOL(skb_gso_segment);
2014void netdev_rx_csum_fault(struct net_device *dev) 2006void netdev_rx_csum_fault(struct net_device *dev)
2015{ 2007{
2016 if (net_ratelimit()) { 2008 if (net_ratelimit()) {
2017 printk(KERN_ERR "%s: hw csum failure.\n", 2009 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2018 dev ? dev->name : "<unknown>");
2019 dump_stack(); 2010 dump_stack();
2020 } 2011 }
2021} 2012}
@@ -2332,9 +2323,9 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2332{ 2323{
2333 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 2324 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2334 if (net_ratelimit()) { 2325 if (net_ratelimit()) {
2335 pr_warning("%s selects TX queue %d, but " 2326 pr_warn("%s selects TX queue %d, but real number of TX queues is %d\n",
2336 "real number of TX queues is %d\n", 2327 dev->name, queue_index,
2337 dev->name, queue_index, dev->real_num_tx_queues); 2328 dev->real_num_tx_queues);
2338 } 2329 }
2339 return 0; 2330 return 0;
2340 } 2331 }
@@ -2578,16 +2569,16 @@ int dev_queue_xmit(struct sk_buff *skb)
2578 } 2569 }
2579 HARD_TX_UNLOCK(dev, txq); 2570 HARD_TX_UNLOCK(dev, txq);
2580 if (net_ratelimit()) 2571 if (net_ratelimit())
2581 printk(KERN_CRIT "Virtual device %s asks to " 2572 pr_crit("Virtual device %s asks to queue packet!\n",
2582 "queue packet!\n", dev->name); 2573 dev->name);
2583 } else { 2574 } else {
2584 /* Recursion is detected! It is possible, 2575 /* Recursion is detected! It is possible,
2585 * unfortunately 2576 * unfortunately
2586 */ 2577 */
2587recursion_alert: 2578recursion_alert:
2588 if (net_ratelimit()) 2579 if (net_ratelimit())
2589 printk(KERN_CRIT "Dead loop on virtual device " 2580 pr_crit("Dead loop on virtual device %s, fix it urgently!\n",
2590 "%s, fix it urgently!\n", dev->name); 2581 dev->name);
2591 } 2582 }
2592 } 2583 }
2593 2584
@@ -3069,8 +3060,8 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3069 3060
3070 if (unlikely(MAX_RED_LOOP < ttl++)) { 3061 if (unlikely(MAX_RED_LOOP < ttl++)) {
3071 if (net_ratelimit()) 3062 if (net_ratelimit())
3072 pr_warning( "Redir loop detected Dropping packet (%d->%d)\n", 3063 pr_warn("Redir loop detected Dropping packet (%d->%d)\n",
3073 skb->skb_iif, dev->ifindex); 3064 skb->skb_iif, dev->ifindex);
3074 return TC_ACT_SHOT; 3065 return TC_ACT_SHOT;
3075 } 3066 }
3076 3067
@@ -4491,16 +4482,15 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
4491 dev->flags &= ~IFF_PROMISC; 4482 dev->flags &= ~IFF_PROMISC;
4492 else { 4483 else {
4493 dev->promiscuity -= inc; 4484 dev->promiscuity -= inc;
4494 printk(KERN_WARNING "%s: promiscuity touches roof, " 4485 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4495 "set promiscuity failed, promiscuity feature " 4486 dev->name);
4496 "of device might be broken.\n", dev->name);
4497 return -EOVERFLOW; 4487 return -EOVERFLOW;
4498 } 4488 }
4499 } 4489 }
4500 if (dev->flags != old_flags) { 4490 if (dev->flags != old_flags) {
4501 printk(KERN_INFO "device %s %s promiscuous mode\n", 4491 pr_info("device %s %s promiscuous mode\n",
4502 dev->name, (dev->flags & IFF_PROMISC) ? "entered" : 4492 dev->name,
4503 "left"); 4493 dev->flags & IFF_PROMISC ? "entered" : "left");
4504 if (audit_enabled) { 4494 if (audit_enabled) {
4505 current_uid_gid(&uid, &gid); 4495 current_uid_gid(&uid, &gid);
4506 audit_log(current->audit_context, GFP_ATOMIC, 4496 audit_log(current->audit_context, GFP_ATOMIC,
@@ -4573,9 +4563,8 @@ int dev_set_allmulti(struct net_device *dev, int inc)
4573 dev->flags &= ~IFF_ALLMULTI; 4563 dev->flags &= ~IFF_ALLMULTI;
4574 else { 4564 else {
4575 dev->allmulti -= inc; 4565 dev->allmulti -= inc;
4576 printk(KERN_WARNING "%s: allmulti touches roof, " 4566 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4577 "set allmulti failed, allmulti feature of " 4567 dev->name);
4578 "device might be broken.\n", dev->name);
4579 return -EOVERFLOW; 4568 return -EOVERFLOW;
4580 } 4569 }
4581 } 4570 }
@@ -5232,8 +5221,8 @@ static void rollback_registered_many(struct list_head *head)
5232 * devices and proceed with the remaining. 5221 * devices and proceed with the remaining.
5233 */ 5222 */
5234 if (dev->reg_state == NETREG_UNINITIALIZED) { 5223 if (dev->reg_state == NETREG_UNINITIALIZED) {
5235 pr_debug("unregister_netdevice: device %s/%p never " 5224 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5236 "was registered\n", dev->name, dev); 5225 dev->name, dev);
5237 5226
5238 WARN_ON(1); 5227 WARN_ON(1);
5239 list_del(&dev->unreg_list); 5228 list_del(&dev->unreg_list);
@@ -5465,7 +5454,7 @@ static int netif_alloc_rx_queues(struct net_device *dev)
5465 5454
5466 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL); 5455 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5467 if (!rx) { 5456 if (!rx) {
5468 pr_err("netdev: Unable to allocate %u rx queues.\n", count); 5457 pr_err("netdev: Unable to allocate %u rx queues\n", count);
5469 return -ENOMEM; 5458 return -ENOMEM;
5470 } 5459 }
5471 dev->_rx = rx; 5460 dev->_rx = rx;
@@ -5499,8 +5488,7 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
5499 5488
5500 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL); 5489 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5501 if (!tx) { 5490 if (!tx) {
5502 pr_err("netdev: Unable to allocate %u tx queues.\n", 5491 pr_err("netdev: Unable to allocate %u tx queues\n", count);
5503 count);
5504 return -ENOMEM; 5492 return -ENOMEM;
5505 } 5493 }
5506 dev->_tx = tx; 5494 dev->_tx = tx;
@@ -5759,10 +5747,8 @@ static void netdev_wait_allrefs(struct net_device *dev)
5759 refcnt = netdev_refcnt_read(dev); 5747 refcnt = netdev_refcnt_read(dev);
5760 5748
5761 if (time_after(jiffies, warning_time + 10 * HZ)) { 5749 if (time_after(jiffies, warning_time + 10 * HZ)) {
5762 printk(KERN_EMERG "unregister_netdevice: " 5750 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5763 "waiting for %s to become free. Usage " 5751 dev->name, refcnt);
5764 "count = %d\n",
5765 dev->name, refcnt);
5766 warning_time = jiffies; 5752 warning_time = jiffies;
5767 } 5753 }
5768 } 5754 }
@@ -5813,7 +5799,7 @@ void netdev_run_todo(void)
5813 list_del(&dev->todo_list); 5799 list_del(&dev->todo_list);
5814 5800
5815 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 5801 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5816 printk(KERN_ERR "network todo '%s' but state %d\n", 5802 pr_err("network todo '%s' but state %d\n",
5817 dev->name, dev->reg_state); 5803 dev->name, dev->reg_state);
5818 dump_stack(); 5804 dump_stack();
5819 continue; 5805 continue;
@@ -5929,15 +5915,13 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5929 BUG_ON(strlen(name) >= sizeof(dev->name)); 5915 BUG_ON(strlen(name) >= sizeof(dev->name));
5930 5916
5931 if (txqs < 1) { 5917 if (txqs < 1) {
5932 pr_err("alloc_netdev: Unable to allocate device " 5918 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
5933 "with zero queues.\n");
5934 return NULL; 5919 return NULL;
5935 } 5920 }
5936 5921
5937#ifdef CONFIG_RPS 5922#ifdef CONFIG_RPS
5938 if (rxqs < 1) { 5923 if (rxqs < 1) {
5939 pr_err("alloc_netdev: Unable to allocate device " 5924 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
5940 "with zero RX queues.\n");
5941 return NULL; 5925 return NULL;
5942 } 5926 }
5943#endif 5927#endif
@@ -5953,7 +5937,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5953 5937
5954 p = kzalloc(alloc_size, GFP_KERNEL); 5938 p = kzalloc(alloc_size, GFP_KERNEL);
5955 if (!p) { 5939 if (!p) {
5956 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n"); 5940 pr_err("alloc_netdev: Unable to allocate device\n");
5957 return NULL; 5941 return NULL;
5958 } 5942 }
5959 5943
@@ -6486,8 +6470,8 @@ static void __net_exit default_device_exit(struct net *net)
6486 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 6470 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6487 err = dev_change_net_namespace(dev, &init_net, fb_name); 6471 err = dev_change_net_namespace(dev, &init_net, fb_name);
6488 if (err) { 6472 if (err) {
6489 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n", 6473 pr_emerg("%s: failed to move %s to init_net: %d\n",
6490 __func__, dev->name, err); 6474 __func__, dev->name, err);
6491 BUG(); 6475 BUG();
6492 } 6476 }
6493 } 6477 }
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e287346e0934..f98ec444133a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2165,6 +2165,35 @@ nla_put_failure:
2165 return -EMSGSIZE; 2165 return -EMSGSIZE;
2166} 2166}
2167 2167
2168static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2169 u32 pid, u32 seq, int type, unsigned int flags,
2170 struct neigh_table *tbl)
2171{
2172 struct nlmsghdr *nlh;
2173 struct ndmsg *ndm;
2174
2175 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2176 if (nlh == NULL)
2177 return -EMSGSIZE;
2178
2179 ndm = nlmsg_data(nlh);
2180 ndm->ndm_family = tbl->family;
2181 ndm->ndm_pad1 = 0;
2182 ndm->ndm_pad2 = 0;
2183 ndm->ndm_flags = pn->flags | NTF_PROXY;
2184 ndm->ndm_type = NDA_DST;
2185 ndm->ndm_ifindex = pn->dev->ifindex;
2186 ndm->ndm_state = NUD_NONE;
2187
2188 NLA_PUT(skb, NDA_DST, tbl->key_len, pn->key);
2189
2190 return nlmsg_end(skb, nlh);
2191
2192nla_put_failure:
2193 nlmsg_cancel(skb, nlh);
2194 return -EMSGSIZE;
2195}
2196
2168static void neigh_update_notify(struct neighbour *neigh) 2197static void neigh_update_notify(struct neighbour *neigh)
2169{ 2198{
2170 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 2199 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
@@ -2214,23 +2243,78 @@ out:
2214 return rc; 2243 return rc;
2215} 2244}
2216 2245
2246static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2247 struct netlink_callback *cb)
2248{
2249 struct pneigh_entry *n;
2250 struct net *net = sock_net(skb->sk);
2251 int rc, h, s_h = cb->args[3];
2252 int idx, s_idx = idx = cb->args[4];
2253
2254 read_lock_bh(&tbl->lock);
2255
2256 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
2257 if (h < s_h)
2258 continue;
2259 if (h > s_h)
2260 s_idx = 0;
2261 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2262 if (dev_net(n->dev) != net)
2263 continue;
2264 if (idx < s_idx)
2265 goto next;
2266 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2267 cb->nlh->nlmsg_seq,
2268 RTM_NEWNEIGH,
2269 NLM_F_MULTI, tbl) <= 0) {
2270 read_unlock_bh(&tbl->lock);
2271 rc = -1;
2272 goto out;
2273 }
2274 next:
2275 idx++;
2276 }
2277 }
2278
2279 read_unlock_bh(&tbl->lock);
2280 rc = skb->len;
2281out:
2282 cb->args[3] = h;
2283 cb->args[4] = idx;
2284 return rc;
2285
2286}
2287
2217static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 2288static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2218{ 2289{
2219 struct neigh_table *tbl; 2290 struct neigh_table *tbl;
2220 int t, family, s_t; 2291 int t, family, s_t;
2292 int proxy = 0;
2293 int err = 0;
2221 2294
2222 read_lock(&neigh_tbl_lock); 2295 read_lock(&neigh_tbl_lock);
2223 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; 2296 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2297
2298 /* check for full ndmsg structure presence, family member is
2299 * the same for both structures
2300 */
2301 if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2302 ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2303 proxy = 1;
2304
2224 s_t = cb->args[0]; 2305 s_t = cb->args[0];
2225 2306
2226 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) { 2307 for (tbl = neigh_tables, t = 0; tbl && (err >= 0);
2308 tbl = tbl->next, t++) {
2227 if (t < s_t || (family && tbl->family != family)) 2309 if (t < s_t || (family && tbl->family != family))
2228 continue; 2310 continue;
2229 if (t > s_t) 2311 if (t > s_t)
2230 memset(&cb->args[1], 0, sizeof(cb->args) - 2312 memset(&cb->args[1], 0, sizeof(cb->args) -
2231 sizeof(cb->args[0])); 2313 sizeof(cb->args[0]));
2232 if (neigh_dump_table(tbl, skb, cb) < 0) 2314 if (proxy)
2233 break; 2315 err = pneigh_dump_table(tbl, skb, cb);
2316 else
2317 err = neigh_dump_table(tbl, skb, cb);
2234 } 2318 }
2235 read_unlock(&neigh_tbl_lock); 2319 read_unlock(&neigh_tbl_lock);
2236 2320
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 556b08298669..4ce473ea5dc0 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -9,6 +9,8 @@
9 * Copyright (C) 2002 Red Hat, Inc. 9 * Copyright (C) 2002 Red Hat, Inc.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
13#include <linux/netdevice.h> 15#include <linux/netdevice.h>
14#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
@@ -45,9 +47,11 @@ static atomic_t trapped;
45#define NETPOLL_RX_ENABLED 1 47#define NETPOLL_RX_ENABLED 1
46#define NETPOLL_RX_DROP 2 48#define NETPOLL_RX_DROP 2
47 49
48#define MAX_SKB_SIZE \ 50#define MAX_SKB_SIZE \
49 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ 51 (sizeof(struct ethhdr) + \
50 sizeof(struct iphdr) + sizeof(struct ethhdr)) 52 sizeof(struct iphdr) + \
53 sizeof(struct udphdr) + \
54 MAX_UDP_CHUNK)
51 55
52static void zap_completion_queue(void); 56static void zap_completion_queue(void);
53static void arp_reply(struct sk_buff *skb); 57static void arp_reply(struct sk_buff *skb);
@@ -55,6 +59,13 @@ static void arp_reply(struct sk_buff *skb);
55static unsigned int carrier_timeout = 4; 59static unsigned int carrier_timeout = 4;
56module_param(carrier_timeout, uint, 0644); 60module_param(carrier_timeout, uint, 0644);
57 61
62#define np_info(np, fmt, ...) \
63 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
64#define np_err(np, fmt, ...) \
65 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
66#define np_notice(np, fmt, ...) \
67 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
68
58static void queue_process(struct work_struct *work) 69static void queue_process(struct work_struct *work)
59{ 70{
60 struct netpoll_info *npinfo = 71 struct netpoll_info *npinfo =
@@ -627,18 +638,12 @@ out:
627 638
628void netpoll_print_options(struct netpoll *np) 639void netpoll_print_options(struct netpoll *np)
629{ 640{
630 printk(KERN_INFO "%s: local port %d\n", 641 np_info(np, "local port %d\n", np->local_port);
631 np->name, np->local_port); 642 np_info(np, "local IP %pI4\n", &np->local_ip);
632 printk(KERN_INFO "%s: local IP %pI4\n", 643 np_info(np, "interface '%s'\n", np->dev_name);
633 np->name, &np->local_ip); 644 np_info(np, "remote port %d\n", np->remote_port);
634 printk(KERN_INFO "%s: interface '%s'\n", 645 np_info(np, "remote IP %pI4\n", &np->remote_ip);
635 np->name, np->dev_name); 646 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
636 printk(KERN_INFO "%s: remote port %d\n",
637 np->name, np->remote_port);
638 printk(KERN_INFO "%s: remote IP %pI4\n",
639 np->name, &np->remote_ip);
640 printk(KERN_INFO "%s: remote ethernet address %pM\n",
641 np->name, np->remote_mac);
642} 647}
643EXPORT_SYMBOL(netpoll_print_options); 648EXPORT_SYMBOL(netpoll_print_options);
644 649
@@ -680,8 +685,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
680 goto parse_failed; 685 goto parse_failed;
681 *delim = 0; 686 *delim = 0;
682 if (*cur == ' ' || *cur == '\t') 687 if (*cur == ' ' || *cur == '\t')
683 printk(KERN_INFO "%s: warning: whitespace" 688 np_info(np, "warning: whitespace is not allowed\n");
684 "is not allowed\n", np->name);
685 np->remote_port = simple_strtol(cur, NULL, 10); 689 np->remote_port = simple_strtol(cur, NULL, 10);
686 cur = delim; 690 cur = delim;
687 } 691 }
@@ -705,8 +709,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
705 return 0; 709 return 0;
706 710
707 parse_failed: 711 parse_failed:
708 printk(KERN_INFO "%s: couldn't parse config at '%s'!\n", 712 np_info(np, "couldn't parse config at '%s'!\n", cur);
709 np->name, cur);
710 return -1; 713 return -1;
711} 714}
712EXPORT_SYMBOL(netpoll_parse_options); 715EXPORT_SYMBOL(netpoll_parse_options);
@@ -721,8 +724,8 @@ int __netpoll_setup(struct netpoll *np)
721 724
722 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || 725 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
723 !ndev->netdev_ops->ndo_poll_controller) { 726 !ndev->netdev_ops->ndo_poll_controller) {
724 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", 727 np_err(np, "%s doesn't support polling, aborting\n",
725 np->name, np->dev_name); 728 np->dev_name);
726 err = -ENOTSUPP; 729 err = -ENOTSUPP;
727 goto out; 730 goto out;
728 } 731 }
@@ -785,14 +788,12 @@ int netpoll_setup(struct netpoll *np)
785 if (np->dev_name) 788 if (np->dev_name)
786 ndev = dev_get_by_name(&init_net, np->dev_name); 789 ndev = dev_get_by_name(&init_net, np->dev_name);
787 if (!ndev) { 790 if (!ndev) {
788 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", 791 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
789 np->name, np->dev_name);
790 return -ENODEV; 792 return -ENODEV;
791 } 793 }
792 794
793 if (ndev->master) { 795 if (ndev->master) {
794 printk(KERN_ERR "%s: %s is a slave device, aborting.\n", 796 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
795 np->name, np->dev_name);
796 err = -EBUSY; 797 err = -EBUSY;
797 goto put; 798 goto put;
798 } 799 }
@@ -800,16 +801,14 @@ int netpoll_setup(struct netpoll *np)
800 if (!netif_running(ndev)) { 801 if (!netif_running(ndev)) {
801 unsigned long atmost, atleast; 802 unsigned long atmost, atleast;
802 803
803 printk(KERN_INFO "%s: device %s not up yet, forcing it\n", 804 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
804 np->name, np->dev_name);
805 805
806 rtnl_lock(); 806 rtnl_lock();
807 err = dev_open(ndev); 807 err = dev_open(ndev);
808 rtnl_unlock(); 808 rtnl_unlock();
809 809
810 if (err) { 810 if (err) {
811 printk(KERN_ERR "%s: failed to open %s\n", 811 np_err(np, "failed to open %s\n", ndev->name);
812 np->name, ndev->name);
813 goto put; 812 goto put;
814 } 813 }
815 814
@@ -817,9 +816,7 @@ int netpoll_setup(struct netpoll *np)
817 atmost = jiffies + carrier_timeout * HZ; 816 atmost = jiffies + carrier_timeout * HZ;
818 while (!netif_carrier_ok(ndev)) { 817 while (!netif_carrier_ok(ndev)) {
819 if (time_after(jiffies, atmost)) { 818 if (time_after(jiffies, atmost)) {
820 printk(KERN_NOTICE 819 np_notice(np, "timeout waiting for carrier\n");
821 "%s: timeout waiting for carrier\n",
822 np->name);
823 break; 820 break;
824 } 821 }
825 msleep(1); 822 msleep(1);
@@ -831,9 +828,7 @@ int netpoll_setup(struct netpoll *np)
831 */ 828 */
832 829
833 if (time_before(jiffies, atleast)) { 830 if (time_before(jiffies, atleast)) {
834 printk(KERN_NOTICE "%s: carrier detect appears" 831 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
835 " untrustworthy, waiting 4 seconds\n",
836 np->name);
837 msleep(4000); 832 msleep(4000);
838 } 833 }
839 } 834 }
@@ -844,15 +839,15 @@ int netpoll_setup(struct netpoll *np)
844 839
845 if (!in_dev || !in_dev->ifa_list) { 840 if (!in_dev || !in_dev->ifa_list) {
846 rcu_read_unlock(); 841 rcu_read_unlock();
847 printk(KERN_ERR "%s: no IP address for %s, aborting\n", 842 np_err(np, "no IP address for %s, aborting\n",
848 np->name, np->dev_name); 843 np->dev_name);
849 err = -EDESTADDRREQ; 844 err = -EDESTADDRREQ;
850 goto put; 845 goto put;
851 } 846 }
852 847
853 np->local_ip = in_dev->ifa_list->ifa_local; 848 np->local_ip = in_dev->ifa_list->ifa_local;
854 rcu_read_unlock(); 849 rcu_read_unlock();
855 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip); 850 np_info(np, "local IP %pI4\n", &np->local_ip);
856 } 851 }
857 852
858 np->dev = ndev; 853 np->dev = ndev;
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index befe426491ba..ee7013f24fca 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -205,17 +205,23 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
205 struct neighbour *neigh = dst_get_neighbour_noref(dst); 205 struct neighbour *neigh = dst_get_neighbour_noref(dst);
206 struct net_device *dev = neigh->dev; 206 struct net_device *dev = neigh->dev;
207 char mac_addr[ETH_ALEN]; 207 char mac_addr[ETH_ALEN];
208 unsigned int seq;
209 int err;
208 210
209 dn_dn2eth(mac_addr, rt->rt_local_src); 211 dn_dn2eth(mac_addr, rt->rt_local_src);
210 if (dev_hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, 212 do {
211 mac_addr, skb->len) >= 0) 213 seq = read_seqbegin(&neigh->ha_lock);
212 return dev_queue_xmit(skb); 214 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
213 215 neigh->ha, mac_addr, skb->len);
214 if (net_ratelimit()) 216 } while (read_seqretry(&neigh->ha_lock, seq));
215 printk(KERN_DEBUG "dn_neigh_output_packet: oops, can't send packet\n"); 217
216 218 if (err >= 0)
217 kfree_skb(skb); 219 err = dev_queue_xmit(skb);
218 return -EINVAL; 220 else {
221 kfree_skb(skb);
222 err = -EINVAL;
223 }
224 return err;
219} 225}
220 226
221static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb) 227static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 6b3ca5ba4450..0286d78c589c 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -730,15 +730,16 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
730 730
731 if (skb->protocol == htons(ETH_P_IP)) { 731 if (skb->protocol == htons(ETH_P_IP)) {
732 rt = skb_rtable(skb); 732 rt = skb_rtable(skb);
733 if ((dst = rt->rt_gateway) == 0) 733 dst = rt->rt_gateway;
734 goto tx_error_icmp;
735 } 734 }
736#if IS_ENABLED(CONFIG_IPV6) 735#if IS_ENABLED(CONFIG_IPV6)
737 else if (skb->protocol == htons(ETH_P_IPV6)) { 736 else if (skb->protocol == htons(ETH_P_IPV6)) {
738 struct neighbour *neigh = dst_get_neighbour_noref(skb_dst(skb));
739 const struct in6_addr *addr6; 737 const struct in6_addr *addr6;
738 struct neighbour *neigh;
739 bool do_tx_error_icmp;
740 int addr_type; 740 int addr_type;
741 741
742 neigh = dst_neigh_lookup(skb_dst(skb), &ipv6_hdr(skb)->daddr);
742 if (neigh == NULL) 743 if (neigh == NULL)
743 goto tx_error; 744 goto tx_error;
744 745
@@ -751,9 +752,14 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
751 } 752 }
752 753
753 if ((addr_type & IPV6_ADDR_COMPATv4) == 0) 754 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
755 do_tx_error_icmp = true;
756 else {
757 do_tx_error_icmp = false;
758 dst = addr6->s6_addr32[3];
759 }
760 neigh_release(neigh);
761 if (do_tx_error_icmp)
754 goto tx_error_icmp; 762 goto tx_error_icmp;
755
756 dst = addr6->s6_addr32[3];
757 } 763 }
758#endif 764#endif
759 else 765 else
@@ -914,9 +920,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
914 __IPTUNNEL_XMIT(tstats, &dev->stats); 920 __IPTUNNEL_XMIT(tstats, &dev->stats);
915 return NETDEV_TX_OK; 921 return NETDEV_TX_OK;
916 922
923#if IS_ENABLED(CONFIG_IPV6)
917tx_error_icmp: 924tx_error_icmp:
918 dst_link_failure(skb); 925 dst_link_failure(skb);
919 926#endif
920tx_error: 927tx_error:
921 dev->stats.tx_errors++; 928 dev->stats.tx_errors++;
922 dev_kfree_skb(skb); 929 dev_kfree_skb(skb);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 22a199315309..f84ebff5cdb0 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -454,8 +454,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
454 dev->stats.tx_fifo_errors++; 454 dev->stats.tx_fifo_errors++;
455 goto tx_error; 455 goto tx_error;
456 } 456 }
457 if ((dst = rt->rt_gateway) == 0) 457 dst = rt->rt_gateway;
458 goto tx_error_icmp;
459 } 458 }
460 459
461 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL, 460 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 6afc807ee2ad..02d61079f08b 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -256,6 +256,7 @@ static const struct snmp_mib snmp4_net_list[] = {
256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), 256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
257 SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES), 257 SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES),
258 SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP), 258 SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
259 SNMP_MIB_ITEM("TCPRetransFail", LINUX_MIB_TCPRETRANSFAIL),
259 SNMP_MIB_SENTINEL 260 SNMP_MIB_SENTINEL
260}; 261};
261 262
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index bcacf54e5418..4eeb8ce856e2 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1117,10 +1117,15 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const vo
1117 static const __be32 inaddr_any = 0; 1117 static const __be32 inaddr_any = 0;
1118 struct net_device *dev = dst->dev; 1118 struct net_device *dev = dst->dev;
1119 const __be32 *pkey = daddr; 1119 const __be32 *pkey = daddr;
1120 const struct rtable *rt;
1120 struct neighbour *n; 1121 struct neighbour *n;
1121 1122
1123 rt = (const struct rtable *) dst;
1124
1122 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) 1125 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
1123 pkey = &inaddr_any; 1126 pkey = &inaddr_any;
1127 else if (rt->rt_gateway)
1128 pkey = (const __be32 *) &rt->rt_gateway;
1124 1129
1125 n = __ipv4_neigh_lookup(&arp_tbl, dev, *(__force u32 *)pkey); 1130 n = __ipv4_neigh_lookup(&arp_tbl, dev, *(__force u32 *)pkey);
1126 if (n) 1131 if (n)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 337ba4cca052..90e47931e217 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -90,16 +90,8 @@ EXPORT_SYMBOL(sysctl_tcp_low_latency);
90 90
91 91
92#ifdef CONFIG_TCP_MD5SIG 92#ifdef CONFIG_TCP_MD5SIG
93static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, 93static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 __be32 addr);
95static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
96 __be32 daddr, __be32 saddr, const struct tcphdr *th); 94 __be32 daddr, __be32 saddr, const struct tcphdr *th);
97#else
98static inline
99struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
100{
101 return NULL;
102}
103#endif 95#endif
104 96
105struct inet_hashinfo tcp_hashinfo; 97struct inet_hashinfo tcp_hashinfo;
@@ -601,6 +593,10 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
601 struct ip_reply_arg arg; 593 struct ip_reply_arg arg;
602#ifdef CONFIG_TCP_MD5SIG 594#ifdef CONFIG_TCP_MD5SIG
603 struct tcp_md5sig_key *key; 595 struct tcp_md5sig_key *key;
596 const __u8 *hash_location = NULL;
597 unsigned char newhash[16];
598 int genhash;
599 struct sock *sk1 = NULL;
604#endif 600#endif
605 struct net *net; 601 struct net *net;
606 602
@@ -631,7 +627,36 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
631 arg.iov[0].iov_len = sizeof(rep.th); 627 arg.iov[0].iov_len = sizeof(rep.th);
632 628
633#ifdef CONFIG_TCP_MD5SIG 629#ifdef CONFIG_TCP_MD5SIG
634 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL; 630 hash_location = tcp_parse_md5sig_option(th);
631 if (!sk && hash_location) {
632 /*
633 * active side is lost. Try to find listening socket through
634 * source port, and then find md5 key through listening socket.
635 * we are not loose security here:
636 * Incoming packet is checked with md5 hash with finding key,
637 * no RST generated if md5 hash doesn't match.
638 */
639 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
640 &tcp_hashinfo, ip_hdr(skb)->daddr,
641 ntohs(th->source), inet_iif(skb));
642 /* don't send rst if it can't find key */
643 if (!sk1)
644 return;
645 rcu_read_lock();
646 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647 &ip_hdr(skb)->saddr, AF_INET);
648 if (!key)
649 goto release_sk1;
650
651 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
652 if (genhash || memcmp(hash_location, newhash, 16) != 0)
653 goto release_sk1;
654 } else {
655 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656 &ip_hdr(skb)->saddr,
657 AF_INET) : NULL;
658 }
659
635 if (key) { 660 if (key) {
636 rep.opt[0] = htonl((TCPOPT_NOP << 24) | 661 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
637 (TCPOPT_NOP << 16) | 662 (TCPOPT_NOP << 16) |
@@ -659,6 +684,14 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
659 684
660 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 685 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
661 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); 686 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
687
688#ifdef CONFIG_TCP_MD5SIG
689release_sk1:
690 if (sk1) {
691 rcu_read_unlock();
692 sock_put(sk1);
693 }
694#endif
662} 695}
663 696
664/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states 697/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
@@ -759,7 +792,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
759 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, 792 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
760 req->ts_recent, 793 req->ts_recent,
761 0, 794 0,
762 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr), 795 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
796 AF_INET),
763 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, 797 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
764 ip_hdr(skb)->tos); 798 ip_hdr(skb)->tos);
765} 799}
@@ -876,153 +910,137 @@ static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
876 */ 910 */
877 911
878/* Find the Key structure for an address. */ 912/* Find the Key structure for an address. */
879static struct tcp_md5sig_key * 913struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
880 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) 914 const union tcp_md5_addr *addr,
915 int family)
881{ 916{
882 struct tcp_sock *tp = tcp_sk(sk); 917 struct tcp_sock *tp = tcp_sk(sk);
883 int i; 918 struct tcp_md5sig_key *key;
884 919 struct hlist_node *pos;
885 if (!tp->md5sig_info || !tp->md5sig_info->entries4) 920 unsigned int size = sizeof(struct in_addr);
921 struct tcp_md5sig_info *md5sig;
922
923 /* caller either holds rcu_read_lock() or socket lock */
924 md5sig = rcu_dereference_check(tp->md5sig_info,
925 sock_owned_by_user(sk));
926 if (!md5sig)
886 return NULL; 927 return NULL;
887 for (i = 0; i < tp->md5sig_info->entries4; i++) { 928#if IS_ENABLED(CONFIG_IPV6)
888 if (tp->md5sig_info->keys4[i].addr == addr) 929 if (family == AF_INET6)
889 return &tp->md5sig_info->keys4[i].base; 930 size = sizeof(struct in6_addr);
931#endif
932 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
933 if (key->family != family)
934 continue;
935 if (!memcmp(&key->addr, addr, size))
936 return key;
890 } 937 }
891 return NULL; 938 return NULL;
892} 939}
940EXPORT_SYMBOL(tcp_md5_do_lookup);
893 941
894struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, 942struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
895 struct sock *addr_sk) 943 struct sock *addr_sk)
896{ 944{
897 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr); 945 union tcp_md5_addr *addr;
946
947 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
948 return tcp_md5_do_lookup(sk, addr, AF_INET);
898} 949}
899EXPORT_SYMBOL(tcp_v4_md5_lookup); 950EXPORT_SYMBOL(tcp_v4_md5_lookup);
900 951
901static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk, 952static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
902 struct request_sock *req) 953 struct request_sock *req)
903{ 954{
904 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr); 955 union tcp_md5_addr *addr;
956
957 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
958 return tcp_md5_do_lookup(sk, addr, AF_INET);
905} 959}
906 960
907/* This can be called on a newly created socket, from other files */ 961/* This can be called on a newly created socket, from other files */
908int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, 962int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
909 u8 *newkey, u8 newkeylen) 963 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
910{ 964{
911 /* Add Key to the list */ 965 /* Add Key to the list */
912 struct tcp_md5sig_key *key; 966 struct tcp_md5sig_key *key;
913 struct tcp_sock *tp = tcp_sk(sk); 967 struct tcp_sock *tp = tcp_sk(sk);
914 struct tcp4_md5sig_key *keys; 968 struct tcp_md5sig_info *md5sig;
915 969
916 key = tcp_v4_md5_do_lookup(sk, addr); 970 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
917 if (key) { 971 if (key) {
918 /* Pre-existing entry - just update that one. */ 972 /* Pre-existing entry - just update that one. */
919 kfree(key->key); 973 memcpy(key->key, newkey, newkeylen);
920 key->key = newkey;
921 key->keylen = newkeylen; 974 key->keylen = newkeylen;
922 } else { 975 return 0;
923 struct tcp_md5sig_info *md5sig; 976 }
924
925 if (!tp->md5sig_info) {
926 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
927 GFP_ATOMIC);
928 if (!tp->md5sig_info) {
929 kfree(newkey);
930 return -ENOMEM;
931 }
932 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
933 }
934 977
935 md5sig = tp->md5sig_info; 978 md5sig = rcu_dereference_protected(tp->md5sig_info,
936 if (md5sig->entries4 == 0 && 979 sock_owned_by_user(sk));
937 tcp_alloc_md5sig_pool(sk) == NULL) { 980 if (!md5sig) {
938 kfree(newkey); 981 md5sig = kmalloc(sizeof(*md5sig), gfp);
982 if (!md5sig)
939 return -ENOMEM; 983 return -ENOMEM;
940 }
941
942 if (md5sig->alloced4 == md5sig->entries4) {
943 keys = kmalloc((sizeof(*keys) *
944 (md5sig->entries4 + 1)), GFP_ATOMIC);
945 if (!keys) {
946 kfree(newkey);
947 if (md5sig->entries4 == 0)
948 tcp_free_md5sig_pool();
949 return -ENOMEM;
950 }
951 984
952 if (md5sig->entries4) 985 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
953 memcpy(keys, md5sig->keys4, 986 INIT_HLIST_HEAD(&md5sig->head);
954 sizeof(*keys) * md5sig->entries4); 987 rcu_assign_pointer(tp->md5sig_info, md5sig);
988 }
955 989
956 /* Free old key list, and reference new one */ 990 key = sock_kmalloc(sk, sizeof(*key), gfp);
957 kfree(md5sig->keys4); 991 if (!key)
958 md5sig->keys4 = keys; 992 return -ENOMEM;
959 md5sig->alloced4++; 993 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
960 } 994 sock_kfree_s(sk, key, sizeof(*key));
961 md5sig->entries4++; 995 return -ENOMEM;
962 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
963 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
964 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
965 } 996 }
966 return 0;
967}
968EXPORT_SYMBOL(tcp_v4_md5_do_add);
969 997
970static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk, 998 memcpy(key->key, newkey, newkeylen);
971 u8 *newkey, u8 newkeylen) 999 key->keylen = newkeylen;
972{ 1000 key->family = family;
973 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr, 1001 memcpy(&key->addr, addr,
974 newkey, newkeylen); 1002 (family == AF_INET6) ? sizeof(struct in6_addr) :
1003 sizeof(struct in_addr));
1004 hlist_add_head_rcu(&key->node, &md5sig->head);
1005 return 0;
975} 1006}
1007EXPORT_SYMBOL(tcp_md5_do_add);
976 1008
977int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) 1009int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
978{ 1010{
979 struct tcp_sock *tp = tcp_sk(sk); 1011 struct tcp_sock *tp = tcp_sk(sk);
980 int i; 1012 struct tcp_md5sig_key *key;
981 1013 struct tcp_md5sig_info *md5sig;
982 for (i = 0; i < tp->md5sig_info->entries4; i++) { 1014
983 if (tp->md5sig_info->keys4[i].addr == addr) { 1015 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
984 /* Free the key */ 1016 if (!key)
985 kfree(tp->md5sig_info->keys4[i].base.key); 1017 return -ENOENT;
986 tp->md5sig_info->entries4--; 1018 hlist_del_rcu(&key->node);
987 1019 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
988 if (tp->md5sig_info->entries4 == 0) { 1020 kfree_rcu(key, rcu);
989 kfree(tp->md5sig_info->keys4); 1021 md5sig = rcu_dereference_protected(tp->md5sig_info,
990 tp->md5sig_info->keys4 = NULL; 1022 sock_owned_by_user(sk));
991 tp->md5sig_info->alloced4 = 0; 1023 if (hlist_empty(&md5sig->head))
992 tcp_free_md5sig_pool(); 1024 tcp_free_md5sig_pool();
993 } else if (tp->md5sig_info->entries4 != i) { 1025 return 0;
994 /* Need to do some manipulation */
995 memmove(&tp->md5sig_info->keys4[i],
996 &tp->md5sig_info->keys4[i+1],
997 (tp->md5sig_info->entries4 - i) *
998 sizeof(struct tcp4_md5sig_key));
999 }
1000 return 0;
1001 }
1002 }
1003 return -ENOENT;
1004} 1026}
1005EXPORT_SYMBOL(tcp_v4_md5_do_del); 1027EXPORT_SYMBOL(tcp_md5_do_del);
1006 1028
1007static void tcp_v4_clear_md5_list(struct sock *sk) 1029void tcp_clear_md5_list(struct sock *sk)
1008{ 1030{
1009 struct tcp_sock *tp = tcp_sk(sk); 1031 struct tcp_sock *tp = tcp_sk(sk);
1032 struct tcp_md5sig_key *key;
1033 struct hlist_node *pos, *n;
1034 struct tcp_md5sig_info *md5sig;
1010 1035
1011 /* Free each key, then the set of key keys, 1036 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1012 * the crypto element, and then decrement our 1037
1013 * hold on the last resort crypto. 1038 if (!hlist_empty(&md5sig->head))
1014 */
1015 if (tp->md5sig_info->entries4) {
1016 int i;
1017 for (i = 0; i < tp->md5sig_info->entries4; i++)
1018 kfree(tp->md5sig_info->keys4[i].base.key);
1019 tp->md5sig_info->entries4 = 0;
1020 tcp_free_md5sig_pool(); 1039 tcp_free_md5sig_pool();
1021 } 1040 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1022 if (tp->md5sig_info->keys4) { 1041 hlist_del_rcu(&key->node);
1023 kfree(tp->md5sig_info->keys4); 1042 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1024 tp->md5sig_info->keys4 = NULL; 1043 kfree_rcu(key, rcu);
1025 tp->md5sig_info->alloced4 = 0;
1026 } 1044 }
1027} 1045}
1028 1046
@@ -1031,7 +1049,6 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1031{ 1049{
1032 struct tcp_md5sig cmd; 1050 struct tcp_md5sig cmd;
1033 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; 1051 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1034 u8 *newkey;
1035 1052
1036 if (optlen < sizeof(cmd)) 1053 if (optlen < sizeof(cmd))
1037 return -EINVAL; 1054 return -EINVAL;
@@ -1042,32 +1059,16 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1042 if (sin->sin_family != AF_INET) 1059 if (sin->sin_family != AF_INET)
1043 return -EINVAL; 1060 return -EINVAL;
1044 1061
1045 if (!cmd.tcpm_key || !cmd.tcpm_keylen) { 1062 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1046 if (!tcp_sk(sk)->md5sig_info) 1063 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1047 return -ENOENT; 1064 AF_INET);
1048 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1049 }
1050 1065
1051 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) 1066 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1052 return -EINVAL; 1067 return -EINVAL;
1053 1068
1054 if (!tcp_sk(sk)->md5sig_info) { 1069 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1055 struct tcp_sock *tp = tcp_sk(sk); 1070 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1056 struct tcp_md5sig_info *p; 1071 GFP_KERNEL);
1057
1058 p = kzalloc(sizeof(*p), sk->sk_allocation);
1059 if (!p)
1060 return -EINVAL;
1061
1062 tp->md5sig_info = p;
1063 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1064 }
1065
1066 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
1067 if (!newkey)
1068 return -ENOMEM;
1069 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1070 newkey, cmd.tcpm_keylen);
1071} 1072}
1072 1073
1073static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, 1074static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
@@ -1093,7 +1094,7 @@ static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1093 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp)); 1094 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1094} 1095}
1095 1096
1096static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 1097static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1097 __be32 daddr, __be32 saddr, const struct tcphdr *th) 1098 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1098{ 1099{
1099 struct tcp_md5sig_pool *hp; 1100 struct tcp_md5sig_pool *hp;
@@ -1193,7 +1194,8 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1193 int genhash; 1194 int genhash;
1194 unsigned char newhash[16]; 1195 unsigned char newhash[16];
1195 1196
1196 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr); 1197 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1198 AF_INET);
1197 hash_location = tcp_parse_md5sig_option(th); 1199 hash_location = tcp_parse_md5sig_option(th);
1198 1200
1199 /* We've parsed the options - do we have a hash? */ 1201 /* We've parsed the options - do we have a hash? */
@@ -1481,7 +1483,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1481 1483
1482#ifdef CONFIG_TCP_MD5SIG 1484#ifdef CONFIG_TCP_MD5SIG
1483 /* Copy over the MD5 key from the original socket */ 1485 /* Copy over the MD5 key from the original socket */
1484 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr); 1486 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1487 AF_INET);
1485 if (key != NULL) { 1488 if (key != NULL) {
1486 /* 1489 /*
1487 * We're using one, so create a matching key 1490 * We're using one, so create a matching key
@@ -1489,10 +1492,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1489 * memory, then we end up not copying the key 1492 * memory, then we end up not copying the key
1490 * across. Shucks. 1493 * across. Shucks.
1491 */ 1494 */
1492 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); 1495 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1493 if (newkey != NULL) 1496 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1494 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
1495 newkey, key->keylen);
1496 sk_nocaps_add(newsk, NETIF_F_GSO_MASK); 1497 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1497 } 1498 }
1498#endif 1499#endif
@@ -1853,7 +1854,6 @@ EXPORT_SYMBOL(ipv4_specific);
1853static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { 1854static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1854 .md5_lookup = tcp_v4_md5_lookup, 1855 .md5_lookup = tcp_v4_md5_lookup,
1855 .calc_md5_hash = tcp_v4_md5_hash_skb, 1856 .calc_md5_hash = tcp_v4_md5_hash_skb,
1856 .md5_add = tcp_v4_md5_add_func,
1857 .md5_parse = tcp_v4_parse_md5_keys, 1857 .md5_parse = tcp_v4_parse_md5_keys,
1858}; 1858};
1859#endif 1859#endif
@@ -1942,8 +1942,8 @@ void tcp_v4_destroy_sock(struct sock *sk)
1942#ifdef CONFIG_TCP_MD5SIG 1942#ifdef CONFIG_TCP_MD5SIG
1943 /* Clean up the MD5 key list, if any */ 1943 /* Clean up the MD5 key list, if any */
1944 if (tp->md5sig_info) { 1944 if (tp->md5sig_info) {
1945 tcp_v4_clear_md5_list(sk); 1945 tcp_clear_md5_list(sk);
1946 kfree(tp->md5sig_info); 1946 kfree_rcu(tp->md5sig_info, rcu);
1947 tp->md5sig_info = NULL; 1947 tp->md5sig_info = NULL;
1948 } 1948 }
1949#endif 1949#endif
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 550e755747e0..3cabafb5cdd1 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -359,13 +359,11 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
359 */ 359 */
360 do { 360 do {
361 struct tcp_md5sig_key *key; 361 struct tcp_md5sig_key *key;
362 memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key)); 362 tcptw->tw_md5_key = NULL;
363 tcptw->tw_md5_keylen = 0;
364 key = tp->af_specific->md5_lookup(sk, sk); 363 key = tp->af_specific->md5_lookup(sk, sk);
365 if (key != NULL) { 364 if (key != NULL) {
366 memcpy(&tcptw->tw_md5_key, key->key, key->keylen); 365 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
367 tcptw->tw_md5_keylen = key->keylen; 366 if (tcptw->tw_md5_key && tcp_alloc_md5sig_pool(sk) == NULL)
368 if (tcp_alloc_md5sig_pool(sk) == NULL)
369 BUG(); 367 BUG();
370 } 368 }
371 } while (0); 369 } while (0);
@@ -405,8 +403,10 @@ void tcp_twsk_destructor(struct sock *sk)
405{ 403{
406#ifdef CONFIG_TCP_MD5SIG 404#ifdef CONFIG_TCP_MD5SIG
407 struct tcp_timewait_sock *twsk = tcp_twsk(sk); 405 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
408 if (twsk->tw_md5_keylen) 406 if (twsk->tw_md5_key) {
409 tcp_free_md5sig_pool(); 407 tcp_free_md5sig_pool();
408 kfree_rcu(twsk->tw_md5_key, rcu);
409 }
410#endif 410#endif
411} 411}
412EXPORT_SYMBOL_GPL(tcp_twsk_destructor); 412EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 4ff3b6dc74fc..364784a91939 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2306,8 +2306,10 @@ begin_fwd:
2306 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 2306 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
2307 continue; 2307 continue;
2308 2308
2309 if (tcp_retransmit_skb(sk, skb)) 2309 if (tcp_retransmit_skb(sk, skb)) {
2310 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2310 return; 2311 return;
2312 }
2311 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2313 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2312 2314
2313 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery) 2315 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 59402b4637f9..db00d27ffb16 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -211,35 +211,6 @@ void ipv6_sock_ac_close(struct sock *sk)
211 rcu_read_unlock(); 211 rcu_read_unlock();
212} 212}
213 213
214#if 0
215/* The function is not used, which is funny. Apparently, author
216 * supposed to use it to filter out datagrams inside udp/raw but forgot.
217 *
218 * It is OK, anycasts are not special comparing to delivery to unicasts.
219 */
220
221int inet6_ac_check(struct sock *sk, struct in6_addr *addr, int ifindex)
222{
223 struct ipv6_ac_socklist *pac;
224 struct ipv6_pinfo *np = inet6_sk(sk);
225 int found;
226
227 found = 0;
228 read_lock(&ipv6_sk_ac_lock);
229 for (pac=np->ipv6_ac_list; pac; pac=pac->acl_next) {
230 if (ifindex && pac->acl_ifindex != ifindex)
231 continue;
232 found = ipv6_addr_equal(&pac->acl_addr, addr);
233 if (found)
234 break;
235 }
236 read_unlock(&ipv6_sk_ac_lock);
237
238 return found;
239}
240
241#endif
242
243static void aca_put(struct ifacaddr6 *ac) 214static void aca_put(struct ifacaddr6 *ac)
244{ 215{
245 if (atomic_dec_and_test(&ac->aca_refcnt)) { 216 if (atomic_dec_and_test(&ac->aca_refcnt)) {
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index b82bcde53f7a..5b27fbcae346 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1552,11 +1552,20 @@ static int fib6_age(struct rt6_info *rt, void *arg)
1552 time_after_eq(now, rt->dst.lastuse + gc_args.timeout)) { 1552 time_after_eq(now, rt->dst.lastuse + gc_args.timeout)) {
1553 RT6_TRACE("aging clone %p\n", rt); 1553 RT6_TRACE("aging clone %p\n", rt);
1554 return -1; 1554 return -1;
1555 } else if ((rt->rt6i_flags & RTF_GATEWAY) && 1555 } else if (rt->rt6i_flags & RTF_GATEWAY) {
1556 (!(dst_get_neighbour_noref_raw(&rt->dst)->flags & NTF_ROUTER))) { 1556 struct neighbour *neigh;
1557 RT6_TRACE("purging route %p via non-router but gateway\n", 1557 __u8 neigh_flags = 0;
1558 rt); 1558
1559 return -1; 1559 neigh = dst_neigh_lookup(&rt->dst, &rt->rt6i_gateway);
1560 if (neigh) {
1561 neigh_flags = neigh->flags;
1562 neigh_release(neigh);
1563 }
1564 if (neigh_flags & NTF_ROUTER) {
1565 RT6_TRACE("purging route %p via non-router but gateway\n",
1566 rt);
1567 return -1;
1568 }
1560 } 1569 }
1561 gc_args.more++; 1570 gc_args.more++;
1562 } 1571 }
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d97e07183ce9..7a98fc2a5d97 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -388,7 +388,6 @@ int ip6_forward(struct sk_buff *skb)
388 struct ipv6hdr *hdr = ipv6_hdr(skb); 388 struct ipv6hdr *hdr = ipv6_hdr(skb);
389 struct inet6_skb_parm *opt = IP6CB(skb); 389 struct inet6_skb_parm *opt = IP6CB(skb);
390 struct net *net = dev_net(dst->dev); 390 struct net *net = dev_net(dst->dev);
391 struct neighbour *n;
392 u32 mtu; 391 u32 mtu;
393 392
394 if (net->ipv6.devconf_all->forwarding == 0) 393 if (net->ipv6.devconf_all->forwarding == 0)
@@ -463,8 +462,7 @@ int ip6_forward(struct sk_buff *skb)
463 send redirects to source routed frames. 462 send redirects to source routed frames.
464 We don't send redirects to frames decapsulated from IPsec. 463 We don't send redirects to frames decapsulated from IPsec.
465 */ 464 */
466 n = dst_get_neighbour_noref(dst); 465 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
467 if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
468 struct in6_addr *target = NULL; 466 struct in6_addr *target = NULL;
469 struct rt6_info *rt; 467 struct rt6_info *rt;
470 468
@@ -474,8 +472,8 @@ int ip6_forward(struct sk_buff *skb)
474 */ 472 */
475 473
476 rt = (struct rt6_info *) dst; 474 rt = (struct rt6_info *) dst;
477 if ((rt->rt6i_flags & RTF_GATEWAY)) 475 if (rt->rt6i_flags & RTF_GATEWAY)
478 target = (struct in6_addr*)&n->primary_key; 476 target = &rt->rt6i_gateway;
479 else 477 else
480 target = &hdr->daddr; 478 target = &hdr->daddr;
481 479
@@ -486,7 +484,7 @@ int ip6_forward(struct sk_buff *skb)
486 and by source (inside ndisc_send_redirect) 484 and by source (inside ndisc_send_redirect)
487 */ 485 */
488 if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) 486 if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
489 ndisc_send_redirect(skb, n, target); 487 ndisc_send_redirect(skb, target);
490 } else { 488 } else {
491 int addrtype = ipv6_addr_type(&hdr->saddr); 489 int addrtype = ipv6_addr_type(&hdr->saddr);
492 490
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index d8f02ef88e59..8d817018c188 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1223,11 +1223,17 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1223 1223
1224 rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev); 1224 rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev);
1225 1225
1226 if (rt) 1226 if (rt) {
1227 neigh = dst_get_neighbour_noref(&rt->dst); 1227 neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
1228 1228 if (!neigh) {
1229 ND_PRINTK0(KERN_ERR
1230 "ICMPv6 RA: %s() got default router without neighbour.\n",
1231 __func__);
1232 dst_release(&rt->dst);
1233 return;
1234 }
1235 }
1229 if (rt && lifetime == 0) { 1236 if (rt && lifetime == 0) {
1230 neigh_clone(neigh);
1231 ip6_del_rt(rt); 1237 ip6_del_rt(rt);
1232 rt = NULL; 1238 rt = NULL;
1233 } 1239 }
@@ -1244,7 +1250,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1244 return; 1250 return;
1245 } 1251 }
1246 1252
1247 neigh = dst_get_neighbour_noref(&rt->dst); 1253 neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
1248 if (neigh == NULL) { 1254 if (neigh == NULL) {
1249 ND_PRINTK0(KERN_ERR 1255 ND_PRINTK0(KERN_ERR
1250 "ICMPv6 RA: %s() got default router without neighbour.\n", 1256 "ICMPv6 RA: %s() got default router without neighbour.\n",
@@ -1411,7 +1417,7 @@ skip_routeinfo:
1411out: 1417out:
1412 if (rt) 1418 if (rt)
1413 dst_release(&rt->dst); 1419 dst_release(&rt->dst);
1414 else if (neigh) 1420 if (neigh)
1415 neigh_release(neigh); 1421 neigh_release(neigh);
1416} 1422}
1417 1423
@@ -1506,8 +1512,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1506 } 1512 }
1507} 1513}
1508 1514
1509void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, 1515void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1510 const struct in6_addr *target)
1511{ 1516{
1512 struct net_device *dev = skb->dev; 1517 struct net_device *dev = skb->dev;
1513 struct net *net = dev_net(dev); 1518 struct net *net = dev_net(dev);
@@ -1565,6 +1570,13 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1565 goto release; 1570 goto release;
1566 1571
1567 if (dev->addr_len) { 1572 if (dev->addr_len) {
1573 struct neighbour *neigh = dst_neigh_lookup(skb_dst(skb), target);
1574 if (!neigh) {
1575 ND_PRINTK2(KERN_WARNING
1576 "ICMPv6 Redirect: no neigh for target address\n");
1577 goto release;
1578 }
1579
1568 read_lock_bh(&neigh->lock); 1580 read_lock_bh(&neigh->lock);
1569 if (neigh->nud_state & NUD_VALID) { 1581 if (neigh->nud_state & NUD_VALID) {
1570 memcpy(ha_buf, neigh->ha, dev->addr_len); 1582 memcpy(ha_buf, neigh->ha, dev->addr_len);
@@ -1573,6 +1585,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1573 len += ndisc_opt_addr_space(dev); 1585 len += ndisc_opt_addr_space(dev);
1574 } else 1586 } else
1575 read_unlock_bh(&neigh->lock); 1587 read_unlock_bh(&neigh->lock);
1588
1589 neigh_release(neigh);
1576 } 1590 }
1577 1591
1578 rd_len = min_t(unsigned int, 1592 rd_len = min_t(unsigned int,
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index b69fae76a6f1..9447bd69873a 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -336,12 +336,11 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
336 } 336 }
337 337
338found: 338found:
339 /* RFC5722, Section 4: 339 /* RFC5722, Section 4, amended by Errata ID : 3089
340 * When reassembling an IPv6 datagram, if 340 * When reassembling an IPv6 datagram, if
341 * one or more its constituent fragments is determined to be an 341 * one or more its constituent fragments is determined to be an
342 * overlapping fragment, the entire datagram (and any constituent 342 * overlapping fragment, the entire datagram (and any constituent
343 * fragments, including those not yet received) MUST be silently 343 * fragments) MUST be silently discarded.
344 * discarded.
345 */ 344 */
346 345
347 /* Check for overlap with preceding fragment. */ 346 /* Check for overlap with preceding fragment. */
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8c2e3ab58f2a..92be12bb8d23 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -121,9 +121,22 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
121 return p; 121 return p;
122} 122}
123 123
124static inline const void *choose_neigh_daddr(struct rt6_info *rt, const void *daddr)
125{
126 struct in6_addr *p = &rt->rt6i_gateway;
127
128 if (!ipv6_addr_any(p))
129 return (const void *) p;
130 return daddr;
131}
132
124static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr) 133static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr)
125{ 134{
126 struct neighbour *n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr); 135 struct rt6_info *rt = (struct rt6_info *) dst;
136 struct neighbour *n;
137
138 daddr = choose_neigh_daddr(rt, daddr);
139 n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr);
127 if (n) 140 if (n)
128 return n; 141 return n;
129 return neigh_create(&nd_tbl, daddr, dst->dev); 142 return neigh_create(&nd_tbl, daddr, dst->dev);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 133768e52912..c4ffd1743528 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -680,9 +680,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
680 /* ISATAP (RFC4214) - must come before 6to4 */ 680 /* ISATAP (RFC4214) - must come before 6to4 */
681 if (dev->priv_flags & IFF_ISATAP) { 681 if (dev->priv_flags & IFF_ISATAP) {
682 struct neighbour *neigh = NULL; 682 struct neighbour *neigh = NULL;
683 bool do_tx_error = false;
683 684
684 if (skb_dst(skb)) 685 if (skb_dst(skb))
685 neigh = dst_get_neighbour_noref(skb_dst(skb)); 686 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
686 687
687 if (neigh == NULL) { 688 if (neigh == NULL) {
688 if (net_ratelimit()) 689 if (net_ratelimit())
@@ -697,6 +698,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
697 ipv6_addr_is_isatap(addr6)) 698 ipv6_addr_is_isatap(addr6))
698 dst = addr6->s6_addr32[3]; 699 dst = addr6->s6_addr32[3];
699 else 700 else
701 do_tx_error = true;
702
703 neigh_release(neigh);
704 if (do_tx_error)
700 goto tx_error; 705 goto tx_error;
701 } 706 }
702 707
@@ -705,9 +710,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
705 710
706 if (!dst) { 711 if (!dst) {
707 struct neighbour *neigh = NULL; 712 struct neighbour *neigh = NULL;
713 bool do_tx_error = false;
708 714
709 if (skb_dst(skb)) 715 if (skb_dst(skb))
710 neigh = dst_get_neighbour_noref(skb_dst(skb)); 716 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
711 717
712 if (neigh == NULL) { 718 if (neigh == NULL) {
713 if (net_ratelimit()) 719 if (net_ratelimit())
@@ -723,10 +729,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
723 addr_type = ipv6_addr_type(addr6); 729 addr_type = ipv6_addr_type(addr6);
724 } 730 }
725 731
726 if ((addr_type & IPV6_ADDR_COMPATv4) == 0) 732 if ((addr_type & IPV6_ADDR_COMPATv4) != 0)
727 goto tx_error_icmp; 733 dst = addr6->s6_addr32[3];
734 else
735 do_tx_error = true;
728 736
729 dst = addr6->s6_addr32[3]; 737 neigh_release(neigh);
738 if (do_tx_error)
739 goto tx_error;
730 } 740 }
731 741
732 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL, 742 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 3edd05ae4388..d16414cb3421 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -540,19 +540,7 @@ static void tcp_v6_reqsk_destructor(struct request_sock *req)
540static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, 540static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
541 const struct in6_addr *addr) 541 const struct in6_addr *addr)
542{ 542{
543 struct tcp_sock *tp = tcp_sk(sk); 543 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
544 int i;
545
546 BUG_ON(tp == NULL);
547
548 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
549 return NULL;
550
551 for (i = 0; i < tp->md5sig_info->entries6; i++) {
552 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
553 return &tp->md5sig_info->keys6[i].base;
554 }
555 return NULL;
556} 544}
557 545
558static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk, 546static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
@@ -567,136 +555,11 @@ static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
567 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr); 555 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
568} 556}
569 557
570static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
571 char *newkey, u8 newkeylen)
572{
573 /* Add key to the list */
574 struct tcp_md5sig_key *key;
575 struct tcp_sock *tp = tcp_sk(sk);
576 struct tcp6_md5sig_key *keys;
577
578 key = tcp_v6_md5_do_lookup(sk, peer);
579 if (key) {
580 /* modify existing entry - just update that one */
581 kfree(key->key);
582 key->key = newkey;
583 key->keylen = newkeylen;
584 } else {
585 /* reallocate new list if current one is full. */
586 if (!tp->md5sig_info) {
587 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
588 if (!tp->md5sig_info) {
589 kfree(newkey);
590 return -ENOMEM;
591 }
592 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
593 }
594 if (tp->md5sig_info->entries6 == 0 &&
595 tcp_alloc_md5sig_pool(sk) == NULL) {
596 kfree(newkey);
597 return -ENOMEM;
598 }
599 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
600 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
601 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
602
603 if (!keys) {
604 kfree(newkey);
605 if (tp->md5sig_info->entries6 == 0)
606 tcp_free_md5sig_pool();
607 return -ENOMEM;
608 }
609
610 if (tp->md5sig_info->entries6)
611 memmove(keys, tp->md5sig_info->keys6,
612 (sizeof (tp->md5sig_info->keys6[0]) *
613 tp->md5sig_info->entries6));
614
615 kfree(tp->md5sig_info->keys6);
616 tp->md5sig_info->keys6 = keys;
617 tp->md5sig_info->alloced6++;
618 }
619
620 tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr = *peer;
621 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
622 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
623
624 tp->md5sig_info->entries6++;
625 }
626 return 0;
627}
628
629static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
630 u8 *newkey, __u8 newkeylen)
631{
632 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
633 newkey, newkeylen);
634}
635
636static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
637{
638 struct tcp_sock *tp = tcp_sk(sk);
639 int i;
640
641 for (i = 0; i < tp->md5sig_info->entries6; i++) {
642 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
643 /* Free the key */
644 kfree(tp->md5sig_info->keys6[i].base.key);
645 tp->md5sig_info->entries6--;
646
647 if (tp->md5sig_info->entries6 == 0) {
648 kfree(tp->md5sig_info->keys6);
649 tp->md5sig_info->keys6 = NULL;
650 tp->md5sig_info->alloced6 = 0;
651 tcp_free_md5sig_pool();
652 } else {
653 /* shrink the database */
654 if (tp->md5sig_info->entries6 != i)
655 memmove(&tp->md5sig_info->keys6[i],
656 &tp->md5sig_info->keys6[i+1],
657 (tp->md5sig_info->entries6 - i)
658 * sizeof (tp->md5sig_info->keys6[0]));
659 }
660 return 0;
661 }
662 }
663 return -ENOENT;
664}
665
666static void tcp_v6_clear_md5_list (struct sock *sk)
667{
668 struct tcp_sock *tp = tcp_sk(sk);
669 int i;
670
671 if (tp->md5sig_info->entries6) {
672 for (i = 0; i < tp->md5sig_info->entries6; i++)
673 kfree(tp->md5sig_info->keys6[i].base.key);
674 tp->md5sig_info->entries6 = 0;
675 tcp_free_md5sig_pool();
676 }
677
678 kfree(tp->md5sig_info->keys6);
679 tp->md5sig_info->keys6 = NULL;
680 tp->md5sig_info->alloced6 = 0;
681
682 if (tp->md5sig_info->entries4) {
683 for (i = 0; i < tp->md5sig_info->entries4; i++)
684 kfree(tp->md5sig_info->keys4[i].base.key);
685 tp->md5sig_info->entries4 = 0;
686 tcp_free_md5sig_pool();
687 }
688
689 kfree(tp->md5sig_info->keys4);
690 tp->md5sig_info->keys4 = NULL;
691 tp->md5sig_info->alloced4 = 0;
692}
693
694static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, 558static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
695 int optlen) 559 int optlen)
696{ 560{
697 struct tcp_md5sig cmd; 561 struct tcp_md5sig cmd;
698 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; 562 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
699 u8 *newkey;
700 563
701 if (optlen < sizeof(cmd)) 564 if (optlen < sizeof(cmd))
702 return -EINVAL; 565 return -EINVAL;
@@ -708,36 +571,22 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
708 return -EINVAL; 571 return -EINVAL;
709 572
710 if (!cmd.tcpm_keylen) { 573 if (!cmd.tcpm_keylen) {
711 if (!tcp_sk(sk)->md5sig_info)
712 return -ENOENT;
713 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) 574 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
714 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]); 575 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
715 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr); 576 AF_INET);
577 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
578 AF_INET6);
716 } 579 }
717 580
718 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) 581 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
719 return -EINVAL; 582 return -EINVAL;
720 583
721 if (!tcp_sk(sk)->md5sig_info) { 584 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
722 struct tcp_sock *tp = tcp_sk(sk); 585 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
723 struct tcp_md5sig_info *p; 586 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
724
725 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
726 if (!p)
727 return -ENOMEM;
728 587
729 tp->md5sig_info = p; 588 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
730 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 589 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
731 }
732
733 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
734 if (!newkey)
735 return -ENOMEM;
736 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
737 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
738 newkey, cmd.tcpm_keylen);
739 }
740 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
741} 590}
742 591
743static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, 592static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
@@ -1074,6 +923,13 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1074 const struct tcphdr *th = tcp_hdr(skb); 923 const struct tcphdr *th = tcp_hdr(skb);
1075 u32 seq = 0, ack_seq = 0; 924 u32 seq = 0, ack_seq = 0;
1076 struct tcp_md5sig_key *key = NULL; 925 struct tcp_md5sig_key *key = NULL;
926#ifdef CONFIG_TCP_MD5SIG
927 const __u8 *hash_location = NULL;
928 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
929 unsigned char newhash[16];
930 int genhash;
931 struct sock *sk1 = NULL;
932#endif
1077 933
1078 if (th->rst) 934 if (th->rst)
1079 return; 935 return;
@@ -1082,8 +938,32 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1082 return; 938 return;
1083 939
1084#ifdef CONFIG_TCP_MD5SIG 940#ifdef CONFIG_TCP_MD5SIG
1085 if (sk) 941 hash_location = tcp_parse_md5sig_option(th);
1086 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr); 942 if (!sk && hash_location) {
943 /*
944 * active side is lost. Try to find listening socket through
945 * source port, and then find md5 key through listening socket.
946 * we are not loose security here:
947 * Incoming packet is checked with md5 hash with finding key,
948 * no RST generated if md5 hash doesn't match.
949 */
950 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
951 &tcp_hashinfo, &ipv6h->daddr,
952 ntohs(th->source), inet6_iif(skb));
953 if (!sk1)
954 return;
955
956 rcu_read_lock();
957 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
958 if (!key)
959 goto release_sk1;
960
961 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
962 if (genhash || memcmp(hash_location, newhash, 16) != 0)
963 goto release_sk1;
964 } else {
965 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
966 }
1087#endif 967#endif
1088 968
1089 if (th->ack) 969 if (th->ack)
@@ -1093,6 +973,14 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1093 (th->doff << 2); 973 (th->doff << 2);
1094 974
1095 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0); 975 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
976
977#ifdef CONFIG_TCP_MD5SIG
978release_sk1:
979 if (sk1) {
980 rcu_read_unlock();
981 sock_put(sk1);
982 }
983#endif
1096} 984}
1097 985
1098static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts, 986static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
@@ -1510,10 +1398,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1510 * memory, then we end up not copying the key 1398 * memory, then we end up not copying the key
1511 * across. Shucks. 1399 * across. Shucks.
1512 */ 1400 */
1513 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); 1401 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1514 if (newkey != NULL) 1402 AF_INET6, key->key, key->keylen, GFP_ATOMIC);
1515 tcp_v6_md5_do_add(newsk, &newnp->daddr,
1516 newkey, key->keylen);
1517 } 1403 }
1518#endif 1404#endif
1519 1405
@@ -1898,7 +1784,6 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
1898static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = { 1784static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1899 .md5_lookup = tcp_v6_md5_lookup, 1785 .md5_lookup = tcp_v6_md5_lookup,
1900 .calc_md5_hash = tcp_v6_md5_hash_skb, 1786 .calc_md5_hash = tcp_v6_md5_hash_skb,
1901 .md5_add = tcp_v6_md5_add_func,
1902 .md5_parse = tcp_v6_parse_md5_keys, 1787 .md5_parse = tcp_v6_parse_md5_keys,
1903}; 1788};
1904#endif 1789#endif
@@ -1930,7 +1815,6 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
1930static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { 1815static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1931 .md5_lookup = tcp_v4_md5_lookup, 1816 .md5_lookup = tcp_v4_md5_lookup,
1932 .calc_md5_hash = tcp_v4_md5_hash_skb, 1817 .calc_md5_hash = tcp_v4_md5_hash_skb,
1933 .md5_add = tcp_v6_md5_add_func,
1934 .md5_parse = tcp_v6_parse_md5_keys, 1818 .md5_parse = tcp_v6_parse_md5_keys,
1935}; 1819};
1936#endif 1820#endif
@@ -2004,11 +1888,6 @@ static int tcp_v6_init_sock(struct sock *sk)
2004 1888
2005static void tcp_v6_destroy_sock(struct sock *sk) 1889static void tcp_v6_destroy_sock(struct sock *sk)
2006{ 1890{
2007#ifdef CONFIG_TCP_MD5SIG
2008 /* Clean up the MD5 key list */
2009 if (tcp_sk(sk)->md5sig_info)
2010 tcp_v6_clear_md5_list(sk);
2011#endif
2012 tcp_v4_destroy_sock(sk); 1891 tcp_v4_destroy_sock(sk);
2013 inet6_destroy_sock(sk); 1892 inet6_destroy_sock(sk);
2014} 1893}
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 4eeff89c1aaa..8755a3079d0f 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -146,7 +146,7 @@ static int __xfrm6_output(struct sk_buff *skb)
146 return -EMSGSIZE; 146 return -EMSGSIZE;
147 } 147 }
148 148
149 if ((x && x->props.mode == XFRM_MODE_TUNNEL) && 149 if (x->props.mode == XFRM_MODE_TUNNEL &&
150 ((skb->len > mtu && !skb_is_gso(skb)) || 150 ((skb->len > mtu && !skb_is_gso(skb)) ||
151 dst_allfrag(skb_dst(skb)))) { 151 dst_allfrag(skb_dst(skb)))) {
152 return ip6_fragment(skb, x->outer_mode->afinfo->output_finish); 152 return ip6_fragment(skb, x->outer_mode->afinfo->output_finish);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 629b06182f3f..4d751e3d4b4b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1645,6 +1645,24 @@ static void netlink_destroy_callback(struct netlink_callback *cb)
1645 kfree(cb); 1645 kfree(cb);
1646} 1646}
1647 1647
1648struct nlmsghdr *
1649__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
1650{
1651 struct nlmsghdr *nlh;
1652 int size = NLMSG_LENGTH(len);
1653
1654 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
1655 nlh->nlmsg_type = type;
1656 nlh->nlmsg_len = size;
1657 nlh->nlmsg_flags = flags;
1658 nlh->nlmsg_pid = pid;
1659 nlh->nlmsg_seq = seq;
1660 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
1661 memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
1662 return nlh;
1663}
1664EXPORT_SYMBOL(__nlmsg_put);
1665
1648/* 1666/*
1649 * It looks a bit ugly. 1667 * It looks a bit ugly.
1650 * It would be better to create kernel thread. 1668 * It would be better to create kernel thread.
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index c29d2568c9e0..a1154717219e 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -498,6 +498,37 @@ int genl_unregister_family(struct genl_family *family)
498} 498}
499EXPORT_SYMBOL(genl_unregister_family); 499EXPORT_SYMBOL(genl_unregister_family);
500 500
501/**
502 * genlmsg_put - Add generic netlink header to netlink message
503 * @skb: socket buffer holding the message
504 * @pid: netlink pid the message is addressed to
505 * @seq: sequence number (usually the one of the sender)
506 * @family: generic netlink family
507 * @flags netlink message flags
508 * @cmd: generic netlink command
509 *
510 * Returns pointer to user specific header
511 */
512void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
513 struct genl_family *family, int flags, u8 cmd)
514{
515 struct nlmsghdr *nlh;
516 struct genlmsghdr *hdr;
517
518 nlh = nlmsg_put(skb, pid, seq, family->id, GENL_HDRLEN +
519 family->hdrsize, flags);
520 if (nlh == NULL)
521 return NULL;
522
523 hdr = nlmsg_data(nlh);
524 hdr->cmd = cmd;
525 hdr->version = family->version;
526 hdr->reserved = 0;
527
528 return (char *) hdr + GENL_HDRLEN;
529}
530EXPORT_SYMBOL(genlmsg_put);
531
501static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 532static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
502{ 533{
503 struct genl_ops *ops; 534 struct genl_ops *ops;