aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/arcnet/arc-rimi.c8
-rw-r--r--drivers/net/bonding/bond_main.c40
-rw-r--r--drivers/net/caif/caif_hsi.c359
-rw-r--r--drivers/net/caif/caif_shmcore.c4
-rw-r--r--drivers/net/can/dev.c31
-rw-r--r--drivers/net/can/pch_can.c12
-rw-r--r--drivers/net/can/sja1000/ems_pci.c14
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c13
-rw-r--r--drivers/net/can/sja1000/peak_pci.c12
-rw-r--r--drivers/net/can/sja1000/plx_pci.c13
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c2
-rw-r--r--drivers/net/dummy.c6
-rw-r--r--drivers/net/ethernet/8390/Kconfig1
-rw-r--r--drivers/net/ethernet/8390/ax88796.c1
-rw-r--r--drivers/net/ethernet/8390/etherh.c1
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c54
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c20
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h58
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c7
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.h435
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c645
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c17
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c181
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.h20
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c44
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c183
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h268
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h219
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c745
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c783
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c114
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h39
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c271
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h15
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c61
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c142
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_reg.h6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c316
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h11
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c6
-rw-r--r--drivers/net/ethernet/cadence/macb.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c22
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c34
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c301
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c27
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c443
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c17
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c280
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c416
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h19
-rw-r--r--drivers/net/ethernet/dlink/sundance.c12
-rw-r--r--drivers/net/ethernet/dnet.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h23
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c17
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h36
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c245
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c20
-rw-r--r--drivers/net/ethernet/fealnx.c14
-rw-r--r--drivers/net/ethernet/freescale/fec.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c13
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c30
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/Kconfig13
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c26
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c71
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c41
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c81
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c23
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h21
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c182
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c381
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c141
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h5
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c1
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c1
-rw-r--r--drivers/net/ethernet/marvell/sky2.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c255
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c62
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c21
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c7
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c67
-rw-r--r--drivers/net/ethernet/neterion/s2io.c14
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c24
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h15
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c5
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c107
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c11
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c32
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c76
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c31
-rw-r--r--drivers/net/ethernet/realtek/8139too.c136
-rw-r--r--drivers/net/ethernet/realtek/r8169.c698
-rw-r--r--drivers/net/ethernet/renesas/Kconfig7
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c114
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h5
-rw-r--r--drivers/net/ethernet/s6gmac.c2
-rw-r--r--drivers/net/ethernet/silan/sc92031.c34
-rw-r--r--drivers/net/ethernet/sis/sis190.c26
-rw-r--r--drivers/net/ethernet/sis/sis900.c375
-rw-r--r--drivers/net/ethernet/smsc/epic100.c403
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c18
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h49
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c143
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c18
-rw-r--r--drivers/net/ethernet/sun/sungem.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c18
-rw-r--r--drivers/net/ethernet/sun/sunhme.h1
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c13
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c1
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c5
-rw-r--r--drivers/net/ethernet/ti/tlan.c2
-rw-r--r--drivers/net/ethernet/tile/tilepro.c77
-rw-r--r--drivers/net/ethernet/via/via-rhine.c12
-rw-r--r--drivers/net/ethernet/via/via-velocity.c9
-rw-r--r--drivers/net/ethernet/wiznet/Kconfig73
-rw-r--r--drivers/net/ethernet/wiznet/Makefile2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c808
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c720
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c6
-rw-r--r--drivers/net/ethernet/xscale/Kconfig6
-rw-r--r--drivers/net/ethernet/xscale/Makefile1
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/Kconfig6
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/Makefile3
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/caleb.c136
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/caleb.h22
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/enp2611.c232
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c212
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h115
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc408
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode130
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc272
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode98
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixpdev.c437
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixpdev.h29
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h57
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/pm3386.c351
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/pm3386.h29
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c29
-rw-r--r--drivers/net/hippi/rrunner.c83
-rw-r--r--drivers/net/hyperv/netvsc.c41
-rw-r--r--drivers/net/hyperv/netvsc_drv.c44
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/sh_irda.c2
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/macvlan.c76
-rw-r--r--drivers/net/phy/bcm63xx.c5
-rw-r--r--drivers/net/phy/davicom.c7
-rw-r--r--drivers/net/phy/dp83640.c31
-rw-r--r--drivers/net/phy/icplus.c12
-rw-r--r--drivers/net/phy/marvell.c18
-rw-r--r--drivers/net/phy/spi_ks8995.c1
-rw-r--r--drivers/net/ppp/ppp_generic.c15
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/team/Kconfig11
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team.c523
-rw-r--r--drivers/net/team/team_mode_activebackup.c20
-rw-r--r--drivers/net/team/team_mode_loadbalance.c174
-rw-r--r--drivers/net/team/team_mode_roundrobin.c2
-rw-r--r--drivers/net/tokenring/3c359.c14
-rw-r--r--drivers/net/tokenring/Kconfig6
-rw-r--r--drivers/net/tokenring/lanstreamer.c10
-rw-r--r--drivers/net/tokenring/olympic.c14
-rw-r--r--drivers/net/tokenring/tms380tr.c6
-rw-r--r--drivers/net/tokenring/tmspci.c14
-rw-r--r--drivers/net/usb/qmi_wwan.c30
-rw-r--r--drivers/net/usb/smsc75xx.c1
-rw-r--r--drivers/net/usb/usbnet.c1
-rw-r--r--drivers/net/virtio_net.c69
-rw-r--r--drivers/net/wan/dscc4.c13
-rw-r--r--drivers/net/wan/farsync.c1
-rw-r--r--drivers/net/wan/lmc/lmc_main.c15
-rw-r--r--drivers/net/wimax/i2400m/Kconfig3
-rw-r--r--drivers/net/wimax/i2400m/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c7
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c10
-rw-r--r--drivers/net/wireless/b43/sdio.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c8
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c135
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.c71
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig2
-rw-r--r--drivers/net/wireless/libertas/cfg.c9
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c24
-rw-r--r--drivers/net/wireless/mwifiex/pcie.h18
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c9
-rw-r--r--drivers/net/xen-netfront.c2
254 files changed, 10024 insertions, 7586 deletions
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 25197b698dd6..b8b4c7ba884f 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -89,16 +89,16 @@ static int __init arcrimi_probe(struct net_device *dev)
89 BUGLVL(D_NORMAL) printk(VERSION); 89 BUGLVL(D_NORMAL) printk(VERSION);
90 BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n"); 90 BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n");
91 91
92 BUGMSG(D_NORMAL, "Given: node %02Xh, shmem %lXh, irq %d\n", 92 BUGLVL(D_NORMAL) printk("Given: node %02Xh, shmem %lXh, irq %d\n",
93 dev->dev_addr[0], dev->mem_start, dev->irq); 93 dev->dev_addr[0], dev->mem_start, dev->irq);
94 94
95 if (dev->mem_start <= 0 || dev->irq <= 0) { 95 if (dev->mem_start <= 0 || dev->irq <= 0) {
96 BUGMSG(D_NORMAL, "No autoprobe for RIM I; you " 96 BUGLVL(D_NORMAL) printk("No autoprobe for RIM I; you "
97 "must specify the shmem and irq!\n"); 97 "must specify the shmem and irq!\n");
98 return -ENODEV; 98 return -ENODEV;
99 } 99 }
100 if (dev->dev_addr[0] == 0) { 100 if (dev->dev_addr[0] == 0) {
101 BUGMSG(D_NORMAL, "You need to specify your card's station " 101 BUGLVL(D_NORMAL) printk("You need to specify your card's station "
102 "ID!\n"); 102 "ID!\n");
103 return -ENODEV; 103 return -ENODEV;
104 } 104 }
@@ -109,7 +109,7 @@ static int __init arcrimi_probe(struct net_device *dev)
109 * will be taken. 109 * will be taken.
110 */ 110 */
111 if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) { 111 if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) {
112 BUGMSG(D_NORMAL, "Card memory already allocated\n"); 112 BUGLVL(D_NORMAL) printk("Card memory already allocated\n");
113 return -ENODEV; 113 return -ENODEV;
114 } 114 }
115 return arcrimi_found(dev); 115 return arcrimi_found(dev);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 62d2409bb293..44e6a64eecdd 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1726,7 +1726,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1726 1726
1727 read_lock(&bond->lock); 1727 read_lock(&bond->lock);
1728 1728
1729 new_slave->last_arp_rx = jiffies; 1729 new_slave->last_arp_rx = jiffies -
1730 (msecs_to_jiffies(bond->params.arp_interval) + 1);
1730 1731
1731 if (bond->params.miimon && !bond->params.use_carrier) { 1732 if (bond->params.miimon && !bond->params.use_carrier) {
1732 link_reporting = bond_check_dev_link(bond, slave_dev, 1); 1733 link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -1751,22 +1752,30 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1751 } 1752 }
1752 1753
1753 /* check for initial state */ 1754 /* check for initial state */
1754 if (!bond->params.miimon || 1755 if (bond->params.miimon) {
1755 (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS)) { 1756 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
1756 if (bond->params.updelay) { 1757 if (bond->params.updelay) {
1757 pr_debug("Initial state of slave_dev is BOND_LINK_BACK\n"); 1758 new_slave->link = BOND_LINK_BACK;
1758 new_slave->link = BOND_LINK_BACK; 1759 new_slave->delay = bond->params.updelay;
1759 new_slave->delay = bond->params.updelay; 1760 } else {
1761 new_slave->link = BOND_LINK_UP;
1762 }
1760 } else { 1763 } else {
1761 pr_debug("Initial state of slave_dev is BOND_LINK_UP\n"); 1764 new_slave->link = BOND_LINK_DOWN;
1762 new_slave->link = BOND_LINK_UP;
1763 } 1765 }
1764 new_slave->jiffies = jiffies; 1766 } else if (bond->params.arp_interval) {
1767 new_slave->link = (netif_carrier_ok(slave_dev) ?
1768 BOND_LINK_UP : BOND_LINK_DOWN);
1765 } else { 1769 } else {
1766 pr_debug("Initial state of slave_dev is BOND_LINK_DOWN\n"); 1770 new_slave->link = BOND_LINK_UP;
1767 new_slave->link = BOND_LINK_DOWN;
1768 } 1771 }
1769 1772
1773 if (new_slave->link != BOND_LINK_DOWN)
1774 new_slave->jiffies = jiffies;
1775 pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
1776 new_slave->link == BOND_LINK_DOWN ? "DOWN" :
1777 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
1778
1770 bond_update_speed_duplex(new_slave); 1779 bond_update_speed_duplex(new_slave);
1771 1780
1772 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { 1781 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
@@ -4820,12 +4829,9 @@ static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
4820 return 0; 4829 return 0;
4821} 4830}
4822 4831
4823static int bond_get_tx_queues(struct net *net, struct nlattr *tb[], 4832static int bond_get_tx_queues(struct net *net, struct nlattr *tb[])
4824 unsigned int *num_queues,
4825 unsigned int *real_num_queues)
4826{ 4833{
4827 *num_queues = tx_queues; 4834 return tx_queues;
4828 return 0;
4829} 4835}
4830 4836
4831static struct rtnl_link_ops bond_link_ops __read_mostly = { 4837static struct rtnl_link_ops bond_link_ops __read_mostly = {
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 9a66e2a910ae..1520814c77c7 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -6,6 +6,8 @@
6 * License terms: GNU General Public License (GPL) version 2. 6 * License terms: GNU General Public License (GPL) version 2.
7 */ 7 */
8 8
9#define pr_fmt(fmt) KBUILD_MODNAME fmt
10
9#include <linux/init.h> 11#include <linux/init.h>
10#include <linux/module.h> 12#include <linux/module.h>
11#include <linux/device.h> 13#include <linux/device.h>
@@ -19,6 +21,7 @@
19#include <linux/if_arp.h> 21#include <linux/if_arp.h>
20#include <linux/timer.h> 22#include <linux/timer.h>
21#include <linux/rtnetlink.h> 23#include <linux/rtnetlink.h>
24#include <linux/pkt_sched.h>
22#include <net/caif/caif_layer.h> 25#include <net/caif/caif_layer.h>
23#include <net/caif/caif_hsi.h> 26#include <net/caif/caif_hsi.h>
24 27
@@ -34,6 +37,10 @@ static int inactivity_timeout = 1000;
34module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR); 37module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
35MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms."); 38MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
36 39
40static int aggregation_timeout = 1;
41module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR);
42MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms.");
43
37/* 44/*
38 * HSI padding options. 45 * HSI padding options.
39 * Warning: must be a base of 2 (& operation used) and can not be zero ! 46 * Warning: must be a base of 2 (& operation used) and can not be zero !
@@ -86,24 +93,84 @@ static void cfhsi_inactivity_tout(unsigned long arg)
86 queue_work(cfhsi->wq, &cfhsi->wake_down_work); 93 queue_work(cfhsi->wq, &cfhsi->wake_down_work);
87} 94}
88 95
96static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
97 const struct sk_buff *skb,
98 int direction)
99{
100 struct caif_payload_info *info;
101 int hpad, tpad, len;
102
103 info = (struct caif_payload_info *)&skb->cb;
104 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
105 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
106 len = skb->len + hpad + tpad;
107
108 if (direction > 0)
109 cfhsi->aggregation_len += len;
110 else if (direction < 0)
111 cfhsi->aggregation_len -= len;
112}
113
114static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
115{
116 int i;
117
118 if (cfhsi->aggregation_timeout < 0)
119 return true;
120
121 for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
122 if (cfhsi->qhead[i].qlen)
123 return true;
124 }
125
126 /* TODO: Use aggregation_len instead */
127 if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
128 return true;
129
130 return false;
131}
132
133static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
134{
135 struct sk_buff *skb;
136 int i;
137
138 for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
139 skb = skb_dequeue(&cfhsi->qhead[i]);
140 if (skb)
141 break;
142 }
143
144 return skb;
145}
146
147static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
148{
149 int i, len = 0;
150 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
151 len += skb_queue_len(&cfhsi->qhead[i]);
152 return len;
153}
154
89static void cfhsi_abort_tx(struct cfhsi *cfhsi) 155static void cfhsi_abort_tx(struct cfhsi *cfhsi)
90{ 156{
91 struct sk_buff *skb; 157 struct sk_buff *skb;
92 158
93 for (;;) { 159 for (;;) {
94 spin_lock_bh(&cfhsi->lock); 160 spin_lock_bh(&cfhsi->lock);
95 skb = skb_dequeue(&cfhsi->qhead); 161 skb = cfhsi_dequeue(cfhsi);
96 if (!skb) 162 if (!skb)
97 break; 163 break;
98 164
99 cfhsi->ndev->stats.tx_errors++; 165 cfhsi->ndev->stats.tx_errors++;
100 cfhsi->ndev->stats.tx_dropped++; 166 cfhsi->ndev->stats.tx_dropped++;
167 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
101 spin_unlock_bh(&cfhsi->lock); 168 spin_unlock_bh(&cfhsi->lock);
102 kfree_skb(skb); 169 kfree_skb(skb);
103 } 170 }
104 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; 171 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
105 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 172 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
106 mod_timer(&cfhsi->timer, 173 mod_timer(&cfhsi->inactivity_timer,
107 jiffies + cfhsi->inactivity_timeout); 174 jiffies + cfhsi->inactivity_timeout);
108 spin_unlock_bh(&cfhsi->lock); 175 spin_unlock_bh(&cfhsi->lock);
109} 176}
@@ -169,7 +236,7 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
169 struct sk_buff *skb; 236 struct sk_buff *skb;
170 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; 237 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
171 238
172 skb = skb_dequeue(&cfhsi->qhead); 239 skb = cfhsi_dequeue(cfhsi);
173 if (!skb) 240 if (!skb)
174 return 0; 241 return 0;
175 242
@@ -196,11 +263,16 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
196 pemb += hpad; 263 pemb += hpad;
197 264
198 /* Update network statistics. */ 265 /* Update network statistics. */
266 spin_lock_bh(&cfhsi->lock);
199 cfhsi->ndev->stats.tx_packets++; 267 cfhsi->ndev->stats.tx_packets++;
200 cfhsi->ndev->stats.tx_bytes += skb->len; 268 cfhsi->ndev->stats.tx_bytes += skb->len;
269 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
270 spin_unlock_bh(&cfhsi->lock);
201 271
202 /* Copy in embedded CAIF frame. */ 272 /* Copy in embedded CAIF frame. */
203 skb_copy_bits(skb, 0, pemb, skb->len); 273 skb_copy_bits(skb, 0, pemb, skb->len);
274
275 /* Consume the SKB */
204 consume_skb(skb); 276 consume_skb(skb);
205 skb = NULL; 277 skb = NULL;
206 } 278 }
@@ -214,7 +286,7 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
214 int tpad = 0; 286 int tpad = 0;
215 287
216 if (!skb) 288 if (!skb)
217 skb = skb_dequeue(&cfhsi->qhead); 289 skb = cfhsi_dequeue(cfhsi);
218 290
219 if (!skb) 291 if (!skb)
220 break; 292 break;
@@ -233,8 +305,11 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
233 pfrm += hpad; 305 pfrm += hpad;
234 306
235 /* Update network statistics. */ 307 /* Update network statistics. */
308 spin_lock_bh(&cfhsi->lock);
236 cfhsi->ndev->stats.tx_packets++; 309 cfhsi->ndev->stats.tx_packets++;
237 cfhsi->ndev->stats.tx_bytes += skb->len; 310 cfhsi->ndev->stats.tx_bytes += skb->len;
311 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
312 spin_unlock_bh(&cfhsi->lock);
238 313
239 /* Copy in CAIF frame. */ 314 /* Copy in CAIF frame. */
240 skb_copy_bits(skb, 0, pfrm, skb->len); 315 skb_copy_bits(skb, 0, pfrm, skb->len);
@@ -244,6 +319,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
244 319
245 /* Update frame pointer. */ 320 /* Update frame pointer. */
246 pfrm += skb->len + tpad; 321 pfrm += skb->len + tpad;
322
323 /* Consume the SKB */
247 consume_skb(skb); 324 consume_skb(skb);
248 skb = NULL; 325 skb = NULL;
249 326
@@ -258,8 +335,7 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
258 } 335 }
259 336
260 /* Check if we can piggy-back another descriptor. */ 337 /* Check if we can piggy-back another descriptor. */
261 skb = skb_peek(&cfhsi->qhead); 338 if (cfhsi_can_send_aggregate(cfhsi))
262 if (skb)
263 desc->header |= CFHSI_PIGGY_DESC; 339 desc->header |= CFHSI_PIGGY_DESC;
264 else 340 else
265 desc->header &= ~CFHSI_PIGGY_DESC; 341 desc->header &= ~CFHSI_PIGGY_DESC;
@@ -267,61 +343,71 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
267 return CFHSI_DESC_SZ + pld_len; 343 return CFHSI_DESC_SZ + pld_len;
268} 344}
269 345
270static void cfhsi_tx_done(struct cfhsi *cfhsi) 346static void cfhsi_start_tx(struct cfhsi *cfhsi)
271{ 347{
272 struct cfhsi_desc *desc = NULL; 348 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
273 int len = 0; 349 int len, res;
274 int res;
275 350
276 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); 351 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
277 352
278 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 353 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
279 return; 354 return;
280 355
281 desc = (struct cfhsi_desc *)cfhsi->tx_buf;
282
283 do { 356 do {
284 /*
285 * Send flow on if flow off has been previously signalled
286 * and number of packets is below low water mark.
287 */
288 spin_lock_bh(&cfhsi->lock);
289 if (cfhsi->flow_off_sent &&
290 cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
291 cfhsi->cfdev.flowctrl) {
292
293 cfhsi->flow_off_sent = 0;
294 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
295 }
296 spin_unlock_bh(&cfhsi->lock);
297
298 /* Create HSI frame. */ 357 /* Create HSI frame. */
299 do { 358 len = cfhsi_tx_frm(desc, cfhsi);
300 len = cfhsi_tx_frm(desc, cfhsi); 359 if (!len) {
301 if (!len) { 360 spin_lock_bh(&cfhsi->lock);
302 spin_lock_bh(&cfhsi->lock); 361 if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
303 if (unlikely(skb_peek(&cfhsi->qhead))) {
304 spin_unlock_bh(&cfhsi->lock);
305 continue;
306 }
307 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
308 /* Start inactivity timer. */
309 mod_timer(&cfhsi->timer,
310 jiffies + cfhsi->inactivity_timeout);
311 spin_unlock_bh(&cfhsi->lock); 362 spin_unlock_bh(&cfhsi->lock);
312 goto done; 363 res = -EAGAIN;
364 continue;
313 } 365 }
314 } while (!len); 366 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
367 /* Start inactivity timer. */
368 mod_timer(&cfhsi->inactivity_timer,
369 jiffies + cfhsi->inactivity_timeout);
370 spin_unlock_bh(&cfhsi->lock);
371 break;
372 }
315 373
316 /* Set up new transfer. */ 374 /* Set up new transfer. */
317 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); 375 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
318 if (WARN_ON(res < 0)) { 376 if (WARN_ON(res < 0))
319 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", 377 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
320 __func__, res); 378 __func__, res);
321 }
322 } while (res < 0); 379 } while (res < 0);
380}
381
382static void cfhsi_tx_done(struct cfhsi *cfhsi)
383{
384 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
385
386 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
387 return;
388
389 /*
390 * Send flow on if flow off has been previously signalled
391 * and number of packets is below low water mark.
392 */
393 spin_lock_bh(&cfhsi->lock);
394 if (cfhsi->flow_off_sent &&
395 cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark &&
396 cfhsi->cfdev.flowctrl) {
397
398 cfhsi->flow_off_sent = 0;
399 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
400 }
401
402 if (cfhsi_can_send_aggregate(cfhsi)) {
403 spin_unlock_bh(&cfhsi->lock);
404 cfhsi_start_tx(cfhsi);
405 } else {
406 mod_timer(&cfhsi->aggregation_timer,
407 jiffies + cfhsi->aggregation_timeout);
408 spin_unlock_bh(&cfhsi->lock);
409 }
323 410
324done:
325 return; 411 return;
326} 412}
327 413
@@ -560,7 +646,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
560 646
561 /* Update inactivity timer if pending. */ 647 /* Update inactivity timer if pending. */
562 spin_lock_bh(&cfhsi->lock); 648 spin_lock_bh(&cfhsi->lock);
563 mod_timer_pending(&cfhsi->timer, 649 mod_timer_pending(&cfhsi->inactivity_timer,
564 jiffies + cfhsi->inactivity_timeout); 650 jiffies + cfhsi->inactivity_timeout);
565 spin_unlock_bh(&cfhsi->lock); 651 spin_unlock_bh(&cfhsi->lock);
566 652
@@ -744,14 +830,14 @@ static void cfhsi_wake_up(struct work_struct *work)
744 size_t fifo_occupancy = 0; 830 size_t fifo_occupancy = 0;
745 831
746 /* Wakeup timeout */ 832 /* Wakeup timeout */
747 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", 833 dev_dbg(&cfhsi->ndev->dev, "%s: Timeout.\n",
748 __func__); 834 __func__);
749 835
750 /* Check FIFO to check if modem has sent something. */ 836 /* Check FIFO to check if modem has sent something. */
751 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, 837 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
752 &fifo_occupancy)); 838 &fifo_occupancy));
753 839
754 dev_err(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n", 840 dev_dbg(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
755 __func__, (unsigned) fifo_occupancy); 841 __func__, (unsigned) fifo_occupancy);
756 842
757 /* Check if we misssed the interrupt. */ 843 /* Check if we misssed the interrupt. */
@@ -793,12 +879,12 @@ wake_ack:
793 879
794 spin_lock_bh(&cfhsi->lock); 880 spin_lock_bh(&cfhsi->lock);
795 881
796 /* Resume transmit if queue is not empty. */ 882 /* Resume transmit if queues are not empty. */
797 if (!skb_peek(&cfhsi->qhead)) { 883 if (!cfhsi_tx_queue_len(cfhsi)) {
798 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n", 884 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
799 __func__); 885 __func__);
800 /* Start inactivity timer. */ 886 /* Start inactivity timer. */
801 mod_timer(&cfhsi->timer, 887 mod_timer(&cfhsi->inactivity_timer,
802 jiffies + cfhsi->inactivity_timeout); 888 jiffies + cfhsi->inactivity_timeout);
803 spin_unlock_bh(&cfhsi->lock); 889 spin_unlock_bh(&cfhsi->lock);
804 return; 890 return;
@@ -934,20 +1020,53 @@ static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
934 wake_up_interruptible(&cfhsi->wake_down_wait); 1020 wake_up_interruptible(&cfhsi->wake_down_wait);
935} 1021}
936 1022
1023static void cfhsi_aggregation_tout(unsigned long arg)
1024{
1025 struct cfhsi *cfhsi = (struct cfhsi *)arg;
1026
1027 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
1028 __func__);
1029
1030 cfhsi_start_tx(cfhsi);
1031}
1032
937static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) 1033static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
938{ 1034{
939 struct cfhsi *cfhsi = NULL; 1035 struct cfhsi *cfhsi = NULL;
940 int start_xfer = 0; 1036 int start_xfer = 0;
941 int timer_active; 1037 int timer_active;
1038 int prio;
942 1039
943 if (!dev) 1040 if (!dev)
944 return -EINVAL; 1041 return -EINVAL;
945 1042
946 cfhsi = netdev_priv(dev); 1043 cfhsi = netdev_priv(dev);
947 1044
1045 switch (skb->priority) {
1046 case TC_PRIO_BESTEFFORT:
1047 case TC_PRIO_FILLER:
1048 case TC_PRIO_BULK:
1049 prio = CFHSI_PRIO_BEBK;
1050 break;
1051 case TC_PRIO_INTERACTIVE_BULK:
1052 prio = CFHSI_PRIO_VI;
1053 break;
1054 case TC_PRIO_INTERACTIVE:
1055 prio = CFHSI_PRIO_VO;
1056 break;
1057 case TC_PRIO_CONTROL:
1058 default:
1059 prio = CFHSI_PRIO_CTL;
1060 break;
1061 }
1062
948 spin_lock_bh(&cfhsi->lock); 1063 spin_lock_bh(&cfhsi->lock);
949 1064
950 skb_queue_tail(&cfhsi->qhead, skb); 1065 /* Update aggregation statistics */
1066 cfhsi_update_aggregation_stats(cfhsi, skb, 1);
1067
1068 /* Queue the SKB */
1069 skb_queue_tail(&cfhsi->qhead[prio], skb);
951 1070
952 /* Sanity check; xmit should not be called after unregister_netdev */ 1071 /* Sanity check; xmit should not be called after unregister_netdev */
953 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) { 1072 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
@@ -958,7 +1077,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
958 1077
959 /* Send flow off if number of packets is above high water mark. */ 1078 /* Send flow off if number of packets is above high water mark. */
960 if (!cfhsi->flow_off_sent && 1079 if (!cfhsi->flow_off_sent &&
961 cfhsi->qhead.qlen > cfhsi->q_high_mark && 1080 cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark &&
962 cfhsi->cfdev.flowctrl) { 1081 cfhsi->cfdev.flowctrl) {
963 cfhsi->flow_off_sent = 1; 1082 cfhsi->flow_off_sent = 1;
964 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF); 1083 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
@@ -970,12 +1089,18 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
970 } 1089 }
971 1090
972 if (!start_xfer) { 1091 if (!start_xfer) {
1092 /* Send aggregate if it is possible */
1093 bool aggregate_ready =
1094 cfhsi_can_send_aggregate(cfhsi) &&
1095 del_timer(&cfhsi->aggregation_timer) > 0;
973 spin_unlock_bh(&cfhsi->lock); 1096 spin_unlock_bh(&cfhsi->lock);
1097 if (aggregate_ready)
1098 cfhsi_start_tx(cfhsi);
974 return 0; 1099 return 0;
975 } 1100 }
976 1101
977 /* Delete inactivity timer if started. */ 1102 /* Delete inactivity timer if started. */
978 timer_active = del_timer_sync(&cfhsi->timer); 1103 timer_active = del_timer_sync(&cfhsi->inactivity_timer);
979 1104
980 spin_unlock_bh(&cfhsi->lock); 1105 spin_unlock_bh(&cfhsi->lock);
981 1106
@@ -1004,28 +1129,11 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1004 return 0; 1129 return 0;
1005} 1130}
1006 1131
1007static int cfhsi_open(struct net_device *dev) 1132static const struct net_device_ops cfhsi_ops;
1008{
1009 netif_wake_queue(dev);
1010
1011 return 0;
1012}
1013
1014static int cfhsi_close(struct net_device *dev)
1015{
1016 netif_stop_queue(dev);
1017
1018 return 0;
1019}
1020
1021static const struct net_device_ops cfhsi_ops = {
1022 .ndo_open = cfhsi_open,
1023 .ndo_stop = cfhsi_close,
1024 .ndo_start_xmit = cfhsi_xmit
1025};
1026 1133
1027static void cfhsi_setup(struct net_device *dev) 1134static void cfhsi_setup(struct net_device *dev)
1028{ 1135{
1136 int i;
1029 struct cfhsi *cfhsi = netdev_priv(dev); 1137 struct cfhsi *cfhsi = netdev_priv(dev);
1030 dev->features = 0; 1138 dev->features = 0;
1031 dev->netdev_ops = &cfhsi_ops; 1139 dev->netdev_ops = &cfhsi_ops;
@@ -1034,7 +1142,8 @@ static void cfhsi_setup(struct net_device *dev)
1034 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; 1142 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
1035 dev->tx_queue_len = 0; 1143 dev->tx_queue_len = 0;
1036 dev->destructor = free_netdev; 1144 dev->destructor = free_netdev;
1037 skb_queue_head_init(&cfhsi->qhead); 1145 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1146 skb_queue_head_init(&cfhsi->qhead[i]);
1038 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; 1147 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
1039 cfhsi->cfdev.use_frag = false; 1148 cfhsi->cfdev.use_frag = false;
1040 cfhsi->cfdev.use_stx = false; 1149 cfhsi->cfdev.use_stx = false;
@@ -1046,7 +1155,7 @@ int cfhsi_probe(struct platform_device *pdev)
1046{ 1155{
1047 struct cfhsi *cfhsi = NULL; 1156 struct cfhsi *cfhsi = NULL;
1048 struct net_device *ndev; 1157 struct net_device *ndev;
1049 struct cfhsi_dev *dev; 1158
1050 int res; 1159 int res;
1051 1160
1052 ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup); 1161 ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
@@ -1057,6 +1166,34 @@ int cfhsi_probe(struct platform_device *pdev)
1057 cfhsi->ndev = ndev; 1166 cfhsi->ndev = ndev;
1058 cfhsi->pdev = pdev; 1167 cfhsi->pdev = pdev;
1059 1168
1169 /* Assign the HSI device. */
1170 cfhsi->dev = pdev->dev.platform_data;
1171
1172 /* Assign the driver to this HSI device. */
1173 cfhsi->dev->drv = &cfhsi->drv;
1174
1175 /* Register network device. */
1176 res = register_netdev(ndev);
1177 if (res) {
1178 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1179 __func__, res);
1180 free_netdev(ndev);
1181 }
1182 /* Add CAIF HSI device to list. */
1183 spin_lock(&cfhsi_list_lock);
1184 list_add_tail(&cfhsi->list, &cfhsi_list);
1185 spin_unlock(&cfhsi_list_lock);
1186
1187 return res;
1188}
1189
1190static int cfhsi_open(struct net_device *ndev)
1191{
1192 struct cfhsi *cfhsi = netdev_priv(ndev);
1193 int res;
1194
1195 clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1196
1060 /* Initialize state vaiables. */ 1197 /* Initialize state vaiables. */
1061 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; 1198 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
1062 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC; 1199 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
@@ -1066,12 +1203,6 @@ int cfhsi_probe(struct platform_device *pdev)
1066 cfhsi->q_low_mark = LOW_WATER_MARK; 1203 cfhsi->q_low_mark = LOW_WATER_MARK;
1067 cfhsi->q_high_mark = HIGH_WATER_MARK; 1204 cfhsi->q_high_mark = HIGH_WATER_MARK;
1068 1205
1069 /* Assign the HSI device. */
1070 dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1071 cfhsi->dev = dev;
1072
1073 /* Assign the driver to this HSI device. */
1074 dev->drv = &cfhsi->drv;
1075 1206
1076 /* 1207 /*
1077 * Allocate a TX buffer with the size of a HSI packet descriptors 1208 * Allocate a TX buffer with the size of a HSI packet descriptors
@@ -1111,6 +1242,9 @@ int cfhsi_probe(struct platform_device *pdev)
1111 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA; 1242 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1112 } 1243 }
1113 1244
1245 /* Initialize aggregation timeout */
1246 cfhsi->aggregation_timeout = aggregation_timeout;
1247
1114 /* Initialize recieve vaiables. */ 1248 /* Initialize recieve vaiables. */
1115 cfhsi->rx_ptr = cfhsi->rx_buf; 1249 cfhsi->rx_ptr = cfhsi->rx_buf;
1116 cfhsi->rx_len = CFHSI_DESC_SZ; 1250 cfhsi->rx_len = CFHSI_DESC_SZ;
@@ -1136,9 +1270,9 @@ int cfhsi_probe(struct platform_device *pdev)
1136 clear_bit(CFHSI_AWAKE, &cfhsi->bits); 1270 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
1137 1271
1138 /* Create work thread. */ 1272 /* Create work thread. */
1139 cfhsi->wq = create_singlethread_workqueue(pdev->name); 1273 cfhsi->wq = create_singlethread_workqueue(cfhsi->pdev->name);
1140 if (!cfhsi->wq) { 1274 if (!cfhsi->wq) {
1141 dev_err(&ndev->dev, "%s: Failed to create work queue.\n", 1275 dev_err(&cfhsi->ndev->dev, "%s: Failed to create work queue.\n",
1142 __func__); 1276 __func__);
1143 res = -ENODEV; 1277 res = -ENODEV;
1144 goto err_create_wq; 1278 goto err_create_wq;
@@ -1150,18 +1284,17 @@ int cfhsi_probe(struct platform_device *pdev)
1150 init_waitqueue_head(&cfhsi->flush_fifo_wait); 1284 init_waitqueue_head(&cfhsi->flush_fifo_wait);
1151 1285
1152 /* Setup the inactivity timer. */ 1286 /* Setup the inactivity timer. */
1153 init_timer(&cfhsi->timer); 1287 init_timer(&cfhsi->inactivity_timer);
1154 cfhsi->timer.data = (unsigned long)cfhsi; 1288 cfhsi->inactivity_timer.data = (unsigned long)cfhsi;
1155 cfhsi->timer.function = cfhsi_inactivity_tout; 1289 cfhsi->inactivity_timer.function = cfhsi_inactivity_tout;
1156 /* Setup the slowpath RX timer. */ 1290 /* Setup the slowpath RX timer. */
1157 init_timer(&cfhsi->rx_slowpath_timer); 1291 init_timer(&cfhsi->rx_slowpath_timer);
1158 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi; 1292 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1159 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath; 1293 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
1160 1294 /* Setup the aggregation timer. */
1161 /* Add CAIF HSI device to list. */ 1295 init_timer(&cfhsi->aggregation_timer);
1162 spin_lock(&cfhsi_list_lock); 1296 cfhsi->aggregation_timer.data = (unsigned long)cfhsi;
1163 list_add_tail(&cfhsi->list, &cfhsi_list); 1297 cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
1164 spin_unlock(&cfhsi_list_lock);
1165 1298
1166 /* Activate HSI interface. */ 1299 /* Activate HSI interface. */
1167 res = cfhsi->dev->cfhsi_up(cfhsi->dev); 1300 res = cfhsi->dev->cfhsi_up(cfhsi->dev);
@@ -1175,21 +1308,10 @@ int cfhsi_probe(struct platform_device *pdev)
1175 /* Flush FIFO */ 1308 /* Flush FIFO */
1176 res = cfhsi_flush_fifo(cfhsi); 1309 res = cfhsi_flush_fifo(cfhsi);
1177 if (res) { 1310 if (res) {
1178 dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n", 1311 dev_err(&cfhsi->ndev->dev, "%s: Can't flush FIFO: %d.\n",
1179 __func__, res); 1312 __func__, res);
1180 goto err_net_reg; 1313 goto err_net_reg;
1181 } 1314 }
1182
1183 /* Register network device. */
1184 res = register_netdev(ndev);
1185 if (res) {
1186 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1187 __func__, res);
1188 goto err_net_reg;
1189 }
1190
1191 netif_stop_queue(ndev);
1192
1193 return res; 1315 return res;
1194 1316
1195 err_net_reg: 1317 err_net_reg:
@@ -1203,17 +1325,13 @@ int cfhsi_probe(struct platform_device *pdev)
1203 err_alloc_rx: 1325 err_alloc_rx:
1204 kfree(cfhsi->tx_buf); 1326 kfree(cfhsi->tx_buf);
1205 err_alloc_tx: 1327 err_alloc_tx:
1206 free_netdev(ndev);
1207
1208 return res; 1328 return res;
1209} 1329}
1210 1330
1211static void cfhsi_shutdown(struct cfhsi *cfhsi) 1331static int cfhsi_close(struct net_device *ndev)
1212{ 1332{
1213 u8 *tx_buf, *rx_buf; 1333 struct cfhsi *cfhsi = netdev_priv(ndev);
1214 1334 u8 *tx_buf, *rx_buf, *flip_buf;
1215 /* Stop TXing */
1216 netif_tx_stop_all_queues(cfhsi->ndev);
1217 1335
1218 /* going to shutdown driver */ 1336 /* going to shutdown driver */
1219 set_bit(CFHSI_SHUTDOWN, &cfhsi->bits); 1337 set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
@@ -1222,8 +1340,9 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi)
1222 flush_workqueue(cfhsi->wq); 1340 flush_workqueue(cfhsi->wq);
1223 1341
1224 /* Delete timers if pending */ 1342 /* Delete timers if pending */
1225 del_timer_sync(&cfhsi->timer); 1343 del_timer_sync(&cfhsi->inactivity_timer);
1226 del_timer_sync(&cfhsi->rx_slowpath_timer); 1344 del_timer_sync(&cfhsi->rx_slowpath_timer);
1345 del_timer_sync(&cfhsi->aggregation_timer);
1227 1346
1228 /* Cancel pending RX request (if any) */ 1347 /* Cancel pending RX request (if any) */
1229 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); 1348 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
@@ -1234,21 +1353,26 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi)
1234 /* Store bufferes: will be freed later. */ 1353 /* Store bufferes: will be freed later. */
1235 tx_buf = cfhsi->tx_buf; 1354 tx_buf = cfhsi->tx_buf;
1236 rx_buf = cfhsi->rx_buf; 1355 rx_buf = cfhsi->rx_buf;
1237 1356 flip_buf = cfhsi->rx_flip_buf;
1238 /* Flush transmit queues. */ 1357 /* Flush transmit queues. */
1239 cfhsi_abort_tx(cfhsi); 1358 cfhsi_abort_tx(cfhsi);
1240 1359
1241 /* Deactivate interface */ 1360 /* Deactivate interface */
1242 cfhsi->dev->cfhsi_down(cfhsi->dev); 1361 cfhsi->dev->cfhsi_down(cfhsi->dev);
1243 1362
1244 /* Finally unregister the network device. */
1245 unregister_netdev(cfhsi->ndev);
1246
1247 /* Free buffers. */ 1363 /* Free buffers. */
1248 kfree(tx_buf); 1364 kfree(tx_buf);
1249 kfree(rx_buf); 1365 kfree(rx_buf);
1366 kfree(flip_buf);
1367 return 0;
1250} 1368}
1251 1369
1370static const struct net_device_ops cfhsi_ops = {
1371 .ndo_open = cfhsi_open,
1372 .ndo_stop = cfhsi_close,
1373 .ndo_start_xmit = cfhsi_xmit
1374};
1375
1252int cfhsi_remove(struct platform_device *pdev) 1376int cfhsi_remove(struct platform_device *pdev)
1253{ 1377{
1254 struct list_head *list_node; 1378 struct list_head *list_node;
@@ -1265,10 +1389,6 @@ int cfhsi_remove(struct platform_device *pdev)
1265 /* Remove from list. */ 1389 /* Remove from list. */
1266 list_del(list_node); 1390 list_del(list_node);
1267 spin_unlock(&cfhsi_list_lock); 1391 spin_unlock(&cfhsi_list_lock);
1268
1269 /* Shutdown driver. */
1270 cfhsi_shutdown(cfhsi);
1271
1272 return 0; 1392 return 0;
1273 } 1393 }
1274 } 1394 }
@@ -1299,8 +1419,7 @@ static void __exit cfhsi_exit_module(void)
1299 list_del(list_node); 1419 list_del(list_node);
1300 spin_unlock(&cfhsi_list_lock); 1420 spin_unlock(&cfhsi_list_lock);
1301 1421
1302 /* Shutdown driver. */ 1422 unregister_netdevice(cfhsi->ndev);
1303 cfhsi_shutdown(cfhsi);
1304 1423
1305 spin_lock(&cfhsi_list_lock); 1424 spin_lock(&cfhsi_list_lock);
1306 } 1425 }
@@ -1325,8 +1444,6 @@ static int __init cfhsi_init_module(void)
1325 goto err_dev_register; 1444 goto err_dev_register;
1326 } 1445 }
1327 1446
1328 return result;
1329
1330 err_dev_register: 1447 err_dev_register:
1331 return result; 1448 return result;
1332} 1449}
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index 5b2041319a32..bc497d718858 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -13,6 +13,7 @@
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/io.h>
16 17
17#include <net/caif/caif_device.h> 18#include <net/caif/caif_device.h>
18#include <net/caif/caif_shm.h> 19#include <net/caif/caif_shm.h>
@@ -647,6 +648,9 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
647 if (pshm_dev->shm_loopback) 648 if (pshm_dev->shm_loopback)
648 tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr; 649 tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
649 else 650 else
651 /*
652 * FIXME: the result of ioremap is not a pointer - arnd
653 */
650 tx_buf->desc_vptr = 654 tx_buf->desc_vptr =
651 ioremap(tx_buf->phy_addr, TX_BUF_SZ); 655 ioremap(tx_buf->phy_addr, TX_BUF_SZ);
652 656
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c5fe3a3db8c9..f03d7a481a80 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -687,18 +687,19 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
687 687
688 if (priv->do_get_state) 688 if (priv->do_get_state)
689 priv->do_get_state(dev, &state); 689 priv->do_get_state(dev, &state);
690 NLA_PUT_U32(skb, IFLA_CAN_STATE, state); 690 if (nla_put_u32(skb, IFLA_CAN_STATE, state) ||
691 NLA_PUT(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm); 691 nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
692 NLA_PUT_U32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms); 692 nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
693 NLA_PUT(skb, IFLA_CAN_BITTIMING, 693 nla_put(skb, IFLA_CAN_BITTIMING,
694 sizeof(priv->bittiming), &priv->bittiming); 694 sizeof(priv->bittiming), &priv->bittiming) ||
695 NLA_PUT(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock); 695 nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
696 if (priv->do_get_berr_counter && !priv->do_get_berr_counter(dev, &bec)) 696 (priv->do_get_berr_counter &&
697 NLA_PUT(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec); 697 !priv->do_get_berr_counter(dev, &bec) &&
698 if (priv->bittiming_const) 698 nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
699 NLA_PUT(skb, IFLA_CAN_BITTIMING_CONST, 699 (priv->bittiming_const &&
700 sizeof(*priv->bittiming_const), priv->bittiming_const); 700 nla_put(skb, IFLA_CAN_BITTIMING_CONST,
701 701 sizeof(*priv->bittiming_const), priv->bittiming_const)))
702 goto nla_put_failure;
702 return 0; 703 return 0;
703 704
704nla_put_failure: 705nla_put_failure:
@@ -714,9 +715,9 @@ static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
714{ 715{
715 struct can_priv *priv = netdev_priv(dev); 716 struct can_priv *priv = netdev_priv(dev);
716 717
717 NLA_PUT(skb, IFLA_INFO_XSTATS, 718 if (nla_put(skb, IFLA_INFO_XSTATS,
718 sizeof(priv->can_stats), &priv->can_stats); 719 sizeof(priv->can_stats), &priv->can_stats))
719 720 goto nla_put_failure;
720 return 0; 721 return 0;
721 722
722nla_put_failure: 723nla_put_failure:
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 2bb215e00eb1..1226297e7676 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -1274,17 +1274,7 @@ static struct pci_driver pch_can_pci_driver = {
1274 .resume = pch_can_resume, 1274 .resume = pch_can_resume,
1275}; 1275};
1276 1276
1277static int __init pch_can_pci_init(void) 1277module_pci_driver(pch_can_pci_driver);
1278{
1279 return pci_register_driver(&pch_can_pci_driver);
1280}
1281module_init(pch_can_pci_init);
1282
1283static void __exit pch_can_pci_exit(void)
1284{
1285 pci_unregister_driver(&pch_can_pci_driver);
1286}
1287module_exit(pch_can_pci_exit);
1288 1278
1289MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver"); 1279MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver");
1290MODULE_LICENSE("GPL v2"); 1280MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 36f4f9780c30..5c6d412bafb5 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -371,16 +371,4 @@ static struct pci_driver ems_pci_driver = {
371 .remove = ems_pci_del_card, 371 .remove = ems_pci_del_card,
372}; 372};
373 373
374static int __init ems_pci_init(void) 374module_pci_driver(ems_pci_driver);
375{
376 return pci_register_driver(&ems_pci_driver);
377}
378
379static void __exit ems_pci_exit(void)
380{
381 pci_unregister_driver(&ems_pci_driver);
382}
383
384module_init(ems_pci_init);
385module_exit(ems_pci_exit);
386
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index ed004cebd31f..23ed6ea4c7c3 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -397,15 +397,4 @@ static struct pci_driver kvaser_pci_driver = {
397 .remove = __devexit_p(kvaser_pci_remove_one), 397 .remove = __devexit_p(kvaser_pci_remove_one),
398}; 398};
399 399
400static int __init kvaser_pci_init(void) 400module_pci_driver(kvaser_pci_driver);
401{
402 return pci_register_driver(&kvaser_pci_driver);
403}
404
405static void __exit kvaser_pci_exit(void)
406{
407 pci_unregister_driver(&kvaser_pci_driver);
408}
409
410module_init(kvaser_pci_init);
411module_exit(kvaser_pci_exit);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 5f92b865f64b..f0a12962f7b6 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -749,14 +749,4 @@ static struct pci_driver peak_pci_driver = {
749 .remove = __devexit_p(peak_pci_remove), 749 .remove = __devexit_p(peak_pci_remove),
750}; 750};
751 751
752static int __init peak_pci_init(void) 752module_pci_driver(peak_pci_driver);
753{
754 return pci_register_driver(&peak_pci_driver);
755}
756module_init(peak_pci_init);
757
758static void __exit peak_pci_exit(void)
759{
760 pci_unregister_driver(&peak_pci_driver);
761}
762module_exit(peak_pci_exit);
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index a227586ddd52..8bc95982840f 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -609,15 +609,4 @@ static struct pci_driver plx_pci_driver = {
609 .remove = plx_pci_del_card, 609 .remove = plx_pci_del_card,
610}; 610};
611 611
612static int __init plx_pci_init(void) 612module_pci_driver(plx_pci_driver);
613{
614 return pci_register_driver(&plx_pci_driver);
615}
616
617static void __exit plx_pci_exit(void)
618{
619 pci_unregister_driver(&plx_pci_driver);
620}
621
622module_init(plx_pci_init);
623module_exit(plx_pci_exit);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 5234586dff15..629c4ba5d49d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -875,6 +875,7 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
875 PCAN_USBPRO_INFO_FW, 875 PCAN_USBPRO_INFO_FW,
876 &fi, sizeof(fi)); 876 &fi, sizeof(fi));
877 if (err) { 877 if (err) {
878 kfree(usb_if);
878 dev_err(dev->netdev->dev.parent, 879 dev_err(dev->netdev->dev.parent,
879 "unable to read %s firmware info (err %d)\n", 880 "unable to read %s firmware info (err %d)\n",
880 pcan_usb_pro.name, err); 881 pcan_usb_pro.name, err);
@@ -885,6 +886,7 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
885 PCAN_USBPRO_INFO_BL, 886 PCAN_USBPRO_INFO_BL,
886 &bi, sizeof(bi)); 887 &bi, sizeof(bi));
887 if (err) { 888 if (err) {
889 kfree(usb_if);
888 dev_err(dev->netdev->dev.parent, 890 dev_err(dev->netdev->dev.parent,
889 "unable to read %s bootloader info (err %d)\n", 891 "unable to read %s bootloader info (err %d)\n",
890 pcan_usb_pro.name, err); 892 pcan_usb_pro.name, err);
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index d5c6d92f1ee7..442d91a2747b 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -107,14 +107,14 @@ static int dummy_dev_init(struct net_device *dev)
107 return 0; 107 return 0;
108} 108}
109 109
110static void dummy_dev_free(struct net_device *dev) 110static void dummy_dev_uninit(struct net_device *dev)
111{ 111{
112 free_percpu(dev->dstats); 112 free_percpu(dev->dstats);
113 free_netdev(dev);
114} 113}
115 114
116static const struct net_device_ops dummy_netdev_ops = { 115static const struct net_device_ops dummy_netdev_ops = {
117 .ndo_init = dummy_dev_init, 116 .ndo_init = dummy_dev_init,
117 .ndo_uninit = dummy_dev_uninit,
118 .ndo_start_xmit = dummy_xmit, 118 .ndo_start_xmit = dummy_xmit,
119 .ndo_validate_addr = eth_validate_addr, 119 .ndo_validate_addr = eth_validate_addr,
120 .ndo_set_rx_mode = set_multicast_list, 120 .ndo_set_rx_mode = set_multicast_list,
@@ -128,7 +128,7 @@ static void dummy_setup(struct net_device *dev)
128 128
129 /* Initialize the device structure. */ 129 /* Initialize the device structure. */
130 dev->netdev_ops = &dummy_netdev_ops; 130 dev->netdev_ops = &dummy_netdev_ops;
131 dev->destructor = dummy_dev_free; 131 dev->destructor = free_netdev;
132 132
133 /* Fill in device structure with ethernet-generic values. */ 133 /* Fill in device structure with ethernet-generic values. */
134 dev->tx_queue_len = 0; 134 dev->tx_queue_len = 0;
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index e04ade444247..910895c5ec97 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -60,6 +60,7 @@ config PCMCIA_AXNET
60config AX88796 60config AX88796
61 tristate "ASIX AX88796 NE2000 clone support" 61 tristate "ASIX AX88796 NE2000 clone support"
62 depends on (ARM || MIPS || SUPERH) 62 depends on (ARM || MIPS || SUPERH)
63 select CRC32
63 select PHYLIB 64 select PHYLIB
64 select MDIO_BITBANG 65 select MDIO_BITBANG
65 ---help--- 66 ---help---
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 11476ca95e93..203ff9dccadb 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -501,6 +501,7 @@ static const struct ethtool_ops ax_ethtool_ops = {
501 .get_settings = ax_get_settings, 501 .get_settings = ax_get_settings,
502 .set_settings = ax_set_settings, 502 .set_settings = ax_set_settings,
503 .get_link = ethtool_op_get_link, 503 .get_link = ethtool_op_get_link,
504 .get_ts_info = ethtool_op_get_ts_info,
504}; 505};
505 506
506#ifdef CONFIG_AX88796_93CX6 507#ifdef CONFIG_AX88796_93CX6
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index dbefd5658c14..8322c54972f3 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -635,6 +635,7 @@ static const struct ethtool_ops etherh_ethtool_ops = {
635 .get_settings = etherh_get_settings, 635 .get_settings = etherh_get_settings,
636 .set_settings = etherh_set_settings, 636 .set_settings = etherh_set_settings,
637 .get_drvinfo = etherh_get_drvinfo, 637 .get_drvinfo = etherh_get_drvinfo,
638 .get_ts_info = ethtool_op_get_ts_info,
638}; 639};
639 640
640static const struct net_device_ops etherh_netdev_ops = { 641static const struct net_device_ops etherh_netdev_ops = {
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index c63a64cb6085..a11af5cc4844 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -174,6 +174,7 @@ source "drivers/net/ethernet/tile/Kconfig"
174source "drivers/net/ethernet/toshiba/Kconfig" 174source "drivers/net/ethernet/toshiba/Kconfig"
175source "drivers/net/ethernet/tundra/Kconfig" 175source "drivers/net/ethernet/tundra/Kconfig"
176source "drivers/net/ethernet/via/Kconfig" 176source "drivers/net/ethernet/via/Kconfig"
177source "drivers/net/ethernet/wiznet/Kconfig"
177source "drivers/net/ethernet/xilinx/Kconfig" 178source "drivers/net/ethernet/xilinx/Kconfig"
178source "drivers/net/ethernet/xircom/Kconfig" 179source "drivers/net/ethernet/xircom/Kconfig"
179 180
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 9676a5109d94..878ad32b93f2 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -73,5 +73,6 @@ obj-$(CONFIG_TILE_NET) += tile/
73obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/ 73obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
74obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/ 74obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
75obj-$(CONFIG_NET_VENDOR_VIA) += via/ 75obj-$(CONFIG_NET_VENDOR_VIA) += via/
76obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
76obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ 77obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
77obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ 78obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index d896816512ca..d920a529ba22 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -114,15 +114,6 @@ static int rx_copybreak /* = 0 */;
114#define DMA_BURST_SIZE 128 114#define DMA_BURST_SIZE 128
115#endif 115#endif
116 116
117/* Used to pass the media type, etc.
118 Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
119 The media type is usually passed in 'options[]'.
120 These variables are deprecated, use ethtool instead. -Ion
121*/
122#define MAX_UNITS 8 /* More are supported, limit only on options */
123static int options[MAX_UNITS] = {0, };
124static int full_duplex[MAX_UNITS] = {0, };
125
126/* Operational parameters that are set at compile time. */ 117/* Operational parameters that are set at compile time. */
127 118
128/* The "native" ring sizes are either 256 or 2048. 119/* The "native" ring sizes are either 256 or 2048.
@@ -192,8 +183,6 @@ module_param(debug, int, 0);
192module_param(rx_copybreak, int, 0); 183module_param(rx_copybreak, int, 0);
193module_param(intr_latency, int, 0); 184module_param(intr_latency, int, 0);
194module_param(small_frames, int, 0); 185module_param(small_frames, int, 0);
195module_param_array(options, int, NULL, 0);
196module_param_array(full_duplex, int, NULL, 0);
197module_param(enable_hw_cksum, int, 0); 186module_param(enable_hw_cksum, int, 0);
198MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt"); 187MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
199MODULE_PARM_DESC(mtu, "MTU (all boards)"); 188MODULE_PARM_DESC(mtu, "MTU (all boards)");
@@ -201,8 +190,6 @@ MODULE_PARM_DESC(debug, "Debug level (0-6)");
201MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); 190MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
202MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds"); 191MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
203MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)"); 192MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
204MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
205MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
206MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)"); 193MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
207 194
208/* 195/*
@@ -657,10 +644,10 @@ static const struct net_device_ops netdev_ops = {
657static int __devinit starfire_init_one(struct pci_dev *pdev, 644static int __devinit starfire_init_one(struct pci_dev *pdev,
658 const struct pci_device_id *ent) 645 const struct pci_device_id *ent)
659{ 646{
647 struct device *d = &pdev->dev;
660 struct netdev_private *np; 648 struct netdev_private *np;
661 int i, irq, option, chip_idx = ent->driver_data; 649 int i, irq, chip_idx = ent->driver_data;
662 struct net_device *dev; 650 struct net_device *dev;
663 static int card_idx = -1;
664 long ioaddr; 651 long ioaddr;
665 void __iomem *base; 652 void __iomem *base;
666 int drv_flags, io_size; 653 int drv_flags, io_size;
@@ -673,15 +660,13 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
673 printk(version); 660 printk(version);
674#endif 661#endif
675 662
676 card_idx++;
677
678 if (pci_enable_device (pdev)) 663 if (pci_enable_device (pdev))
679 return -EIO; 664 return -EIO;
680 665
681 ioaddr = pci_resource_start(pdev, 0); 666 ioaddr = pci_resource_start(pdev, 0);
682 io_size = pci_resource_len(pdev, 0); 667 io_size = pci_resource_len(pdev, 0);
683 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) { 668 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
684 printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx); 669 dev_err(d, "no PCI MEM resources, aborting\n");
685 return -ENODEV; 670 return -ENODEV;
686 } 671 }
687 672
@@ -694,14 +679,14 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
694 irq = pdev->irq; 679 irq = pdev->irq;
695 680
696 if (pci_request_regions (pdev, DRV_NAME)) { 681 if (pci_request_regions (pdev, DRV_NAME)) {
697 printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx); 682 dev_err(d, "cannot reserve PCI resources, aborting\n");
698 goto err_out_free_netdev; 683 goto err_out_free_netdev;
699 } 684 }
700 685
701 base = ioremap(ioaddr, io_size); 686 base = ioremap(ioaddr, io_size);
702 if (!base) { 687 if (!base) {
703 printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n", 688 dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
704 card_idx, io_size, ioaddr); 689 io_size, ioaddr);
705 goto err_out_free_res; 690 goto err_out_free_res;
706 } 691 }
707 692
@@ -753,9 +738,6 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
753 /* wait a little longer */ 738 /* wait a little longer */
754 udelay(1000); 739 udelay(1000);
755 740
756 dev->base_addr = (unsigned long)base;
757 dev->irq = irq;
758
759 np = netdev_priv(dev); 741 np = netdev_priv(dev);
760 np->dev = dev; 742 np->dev = dev;
761 np->base = base; 743 np->base = base;
@@ -772,21 +754,6 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
772 754
773 drv_flags = netdrv_tbl[chip_idx].drv_flags; 755 drv_flags = netdrv_tbl[chip_idx].drv_flags;
774 756
775 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
776 if (dev->mem_start)
777 option = dev->mem_start;
778
779 /* The lower four bits are the media type. */
780 if (option & 0x200)
781 np->mii_if.full_duplex = 1;
782
783 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
784 np->mii_if.full_duplex = 1;
785
786 if (np->mii_if.full_duplex)
787 np->mii_if.force_media = 1;
788 else
789 np->mii_if.force_media = 0;
790 np->speed100 = 1; 757 np->speed100 = 1;
791 758
792 /* timer resolution is 128 * 0.8us */ 759 /* timer resolution is 128 * 0.8us */
@@ -909,13 +876,14 @@ static int netdev_open(struct net_device *dev)
909 const __be32 *fw_rx_data, *fw_tx_data; 876 const __be32 *fw_rx_data, *fw_tx_data;
910 struct netdev_private *np = netdev_priv(dev); 877 struct netdev_private *np = netdev_priv(dev);
911 void __iomem *ioaddr = np->base; 878 void __iomem *ioaddr = np->base;
879 const int irq = np->pci_dev->irq;
912 int i, retval; 880 int i, retval;
913 size_t tx_size, rx_size; 881 size_t tx_size, rx_size;
914 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size; 882 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
915 883
916 /* Do we ever need to reset the chip??? */ 884 /* Do we ever need to reset the chip??? */
917 885
918 retval = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); 886 retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
919 if (retval) 887 if (retval)
920 return retval; 888 return retval;
921 889
@@ -924,7 +892,7 @@ static int netdev_open(struct net_device *dev)
924 writel(1, ioaddr + PCIDeviceConfig); 892 writel(1, ioaddr + PCIDeviceConfig);
925 if (debug > 1) 893 if (debug > 1)
926 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", 894 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
927 dev->name, dev->irq); 895 dev->name, irq);
928 896
929 /* Allocate the various queues. */ 897 /* Allocate the various queues. */
930 if (!np->queue_mem) { 898 if (!np->queue_mem) {
@@ -935,7 +903,7 @@ static int netdev_open(struct net_device *dev)
935 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size; 903 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
936 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma); 904 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
937 if (np->queue_mem == NULL) { 905 if (np->queue_mem == NULL) {
938 free_irq(dev->irq, dev); 906 free_irq(irq, dev);
939 return -ENOMEM; 907 return -ENOMEM;
940 } 908 }
941 909
@@ -1962,7 +1930,7 @@ static int netdev_close(struct net_device *dev)
1962 } 1930 }
1963 } 1931 }
1964 1932
1965 free_irq(dev->irq, dev); 1933 free_irq(np->pci_dev->irq, dev);
1966 1934
1967 /* Free all the skbuffs in the Rx queue. */ 1935 /* Free all the skbuffs in the Rx queue. */
1968 for (i = 0; i < RX_RING_SIZE; i++) { 1936 for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index ab4daeccdf98..f816426e1085 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -548,6 +548,25 @@ static int bfin_mac_ethtool_setwol(struct net_device *dev,
548 return 0; 548 return 0;
549} 549}
550 550
551static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
552 struct ethtool_ts_info *info)
553{
554 info->so_timestamping =
555 SOF_TIMESTAMPING_TX_HARDWARE |
556 SOF_TIMESTAMPING_RX_HARDWARE |
557 SOF_TIMESTAMPING_SYS_HARDWARE;
558 info->phc_index = -1;
559 info->tx_types =
560 (1 << HWTSTAMP_TX_OFF) |
561 (1 << HWTSTAMP_TX_ON);
562 info->rx_filters =
563 (1 << HWTSTAMP_FILTER_NONE) |
564 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
565 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
566 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
567 return 0;
568}
569
551static const struct ethtool_ops bfin_mac_ethtool_ops = { 570static const struct ethtool_ops bfin_mac_ethtool_ops = {
552 .get_settings = bfin_mac_ethtool_getsettings, 571 .get_settings = bfin_mac_ethtool_getsettings,
553 .set_settings = bfin_mac_ethtool_setsettings, 572 .set_settings = bfin_mac_ethtool_setsettings,
@@ -555,6 +574,7 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
555 .get_drvinfo = bfin_mac_ethtool_getdrvinfo, 574 .get_drvinfo = bfin_mac_ethtool_getdrvinfo,
556 .get_wol = bfin_mac_ethtool_getwol, 575 .get_wol = bfin_mac_ethtool_getwol,
557 .set_wol = bfin_mac_ethtool_setwol, 576 .set_wol = bfin_mac_ethtool_setwol,
577 .get_ts_info = bfin_mac_ethtool_get_ts_info,
558}; 578};
559 579
560/**************************************************************************/ 580/**************************************************************************/
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index ca70e16b6e2c..acc2956df907 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -74,8 +74,6 @@
74 74
75#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN) 75#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)
76#define MAX_JUMBO_FRAME_SIZE (6*1024) 76#define MAX_JUMBO_FRAME_SIZE (6*1024)
77#define MAX_TSO_FRAME_SIZE (7*1024)
78#define MAX_TX_OFFLOAD_THRESH (9*1024)
79 77
80#define AT_MAX_RECEIVE_QUEUE 4 78#define AT_MAX_RECEIVE_QUEUE 4
81#define AT_DEF_RECEIVE_QUEUE 1 79#define AT_DEF_RECEIVE_QUEUE 1
@@ -100,7 +98,7 @@
100#define ATL1C_ASPM_L0s_ENABLE 0x0001 98#define ATL1C_ASPM_L0s_ENABLE 0x0001
101#define ATL1C_ASPM_L1_ENABLE 0x0002 99#define ATL1C_ASPM_L1_ENABLE 0x0002
102 100
103#define AT_REGS_LEN (75 * sizeof(u32)) 101#define AT_REGS_LEN (74 * sizeof(u32))
104#define AT_EEPROM_LEN 512 102#define AT_EEPROM_LEN 512
105 103
106#define ATL1C_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i])) 104#define ATL1C_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i]))
@@ -297,20 +295,6 @@ enum atl1c_dma_req_block {
297 atl1c_dma_req_4096 = 5 295 atl1c_dma_req_4096 = 5
298}; 296};
299 297
300enum atl1c_rss_mode {
301 atl1c_rss_mode_disable = 0,
302 atl1c_rss_sig_que = 1,
303 atl1c_rss_mul_que_sig_int = 2,
304 atl1c_rss_mul_que_mul_int = 4,
305};
306
307enum atl1c_rss_type {
308 atl1c_rss_disable = 0,
309 atl1c_rss_ipv4 = 1,
310 atl1c_rss_ipv4_tcp = 2,
311 atl1c_rss_ipv6 = 4,
312 atl1c_rss_ipv6_tcp = 8
313};
314 298
315enum atl1c_nic_type { 299enum atl1c_nic_type {
316 athr_l1c = 0, 300 athr_l1c = 0,
@@ -388,7 +372,6 @@ struct atl1c_hw {
388 enum atl1c_dma_order dma_order; 372 enum atl1c_dma_order dma_order;
389 enum atl1c_dma_rcb rcb_value; 373 enum atl1c_dma_rcb rcb_value;
390 enum atl1c_dma_req_block dmar_block; 374 enum atl1c_dma_req_block dmar_block;
391 enum atl1c_dma_req_block dmaw_block;
392 375
393 u16 device_id; 376 u16 device_id;
394 u16 vendor_id; 377 u16 vendor_id;
@@ -399,8 +382,6 @@ struct atl1c_hw {
399 u16 phy_id2; 382 u16 phy_id2;
400 383
401 u32 intr_mask; 384 u32 intr_mask;
402 u8 dmaw_dly_cnt;
403 u8 dmar_dly_cnt;
404 385
405 u8 preamble_len; 386 u8 preamble_len;
406 u16 max_frame_size; 387 u16 max_frame_size;
@@ -440,10 +421,6 @@ struct atl1c_hw {
440#define ATL1C_FPGA_VERSION 0x8000 421#define ATL1C_FPGA_VERSION 0x8000
441 u16 link_cap_flags; 422 u16 link_cap_flags;
442#define ATL1C_LINK_CAP_1000M 0x0001 423#define ATL1C_LINK_CAP_1000M 0x0001
443 u16 cmb_tpd;
444 u16 cmb_rrd;
445 u16 cmb_rx_timer; /* 2us resolution */
446 u16 cmb_tx_timer;
447 u32 smb_timer; 424 u32 smb_timer;
448 425
449 u16 rrd_thresh; /* Threshold of number of RRD produced to trigger 426 u16 rrd_thresh; /* Threshold of number of RRD produced to trigger
@@ -451,9 +428,6 @@ struct atl1c_hw {
451 u16 tpd_thresh; 428 u16 tpd_thresh;
452 u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */ 429 u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */
453 u8 rfd_burst; 430 u8 rfd_burst;
454 enum atl1c_rss_type rss_type;
455 enum atl1c_rss_mode rss_mode;
456 u8 rss_hash_bits;
457 u32 base_cpu; 431 u32 base_cpu;
458 u32 indirect_tab; 432 u32 indirect_tab;
459 u8 mac_addr[ETH_ALEN]; 433 u8 mac_addr[ETH_ALEN];
@@ -466,8 +440,7 @@ struct atl1c_hw {
466 440
467/* 441/*
468 * atl1c_ring_header represents a single, contiguous block of DMA space 442 * atl1c_ring_header represents a single, contiguous block of DMA space
469 * mapped for the three descriptor rings (tpd, rfd, rrd) and the two 443 * mapped for the three descriptor rings (tpd, rfd, rrd) described below
470 * message blocks (cmb, smb) described below
471 */ 444 */
472struct atl1c_ring_header { 445struct atl1c_ring_header {
473 void *desc; /* virtual address */ 446 void *desc; /* virtual address */
@@ -541,16 +514,6 @@ struct atl1c_rrd_ring {
541 u16 next_to_clean; 514 u16 next_to_clean;
542}; 515};
543 516
544struct atl1c_cmb {
545 void *cmb;
546 dma_addr_t dma;
547};
548
549struct atl1c_smb {
550 void *smb;
551 dma_addr_t dma;
552};
553
554/* board specific private data structure */ 517/* board specific private data structure */
555struct atl1c_adapter { 518struct atl1c_adapter {
556 struct net_device *netdev; 519 struct net_device *netdev;
@@ -586,11 +549,8 @@ struct atl1c_adapter {
586 /* All Descriptor memory */ 549 /* All Descriptor memory */
587 struct atl1c_ring_header ring_header; 550 struct atl1c_ring_header ring_header;
588 struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE]; 551 struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE];
589 struct atl1c_rfd_ring rfd_ring[AT_MAX_RECEIVE_QUEUE]; 552 struct atl1c_rfd_ring rfd_ring;
590 struct atl1c_rrd_ring rrd_ring[AT_MAX_RECEIVE_QUEUE]; 553 struct atl1c_rrd_ring rrd_ring;
591 struct atl1c_cmb cmb;
592 struct atl1c_smb smb;
593 int num_rx_queues;
594 u32 bd_number; /* board number;*/ 554 u32 bd_number; /* board number;*/
595}; 555};
596 556
@@ -618,8 +578,14 @@ struct atl1c_adapter {
618#define AT_WRITE_REGW(a, reg, value) (\ 578#define AT_WRITE_REGW(a, reg, value) (\
619 writew((value), ((a)->hw_addr + reg))) 579 writew((value), ((a)->hw_addr + reg)))
620 580
621#define AT_READ_REGW(a, reg) (\ 581#define AT_READ_REGW(a, reg, pdata) do { \
622 readw((a)->hw_addr + reg)) 582 if (unlikely((a)->hibernate)) { \
583 readw((a)->hw_addr + reg); \
584 *(u16 *)pdata = readw((a)->hw_addr + reg); \
585 } else { \
586 *(u16 *)pdata = readw((a)->hw_addr + reg); \
587 } \
588 } while (0)
623 589
624#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \ 590#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \
625 writel((value), (((a)->hw_addr + reg) + ((offset) << 2)))) 591 writel((value), (((a)->hw_addr + reg) + ((offset) << 2))))
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 0a9326aa58b5..3feb846d40e4 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -141,8 +141,7 @@ static void atl1c_get_regs(struct net_device *netdev,
141 141
142 memset(p, 0, AT_REGS_LEN); 142 memset(p, 0, AT_REGS_LEN);
143 143
144 regs->version = 0; 144 regs->version = 1;
145 AT_READ_REG(hw, REG_VPD_CAP, p++);
146 AT_READ_REG(hw, REG_PM_CTRL, p++); 145 AT_READ_REG(hw, REG_PM_CTRL, p++);
147 AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL, p++); 146 AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL, p++);
148 AT_READ_REG(hw, REG_TWSI_CTRL, p++); 147 AT_READ_REG(hw, REG_TWSI_CTRL, p++);
@@ -167,9 +166,9 @@ static void atl1c_get_regs(struct net_device *netdev,
167 AT_READ_REG(hw, REG_WOL_CTRL, p++); 166 AT_READ_REG(hw, REG_WOL_CTRL, p++);
168 167
169 atl1c_read_phy_reg(hw, MII_BMCR, &phy_data); 168 atl1c_read_phy_reg(hw, MII_BMCR, &phy_data);
170 regs_buff[73] = (u32) phy_data; 169 regs_buff[AT_REGS_LEN/sizeof(u32) - 2] = (u32) phy_data;
171 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); 170 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
172 regs_buff[74] = (u32) phy_data; 171 regs_buff[AT_REGS_LEN/sizeof(u32) - 1] = (u32) phy_data;
173} 172}
174 173
175static int atl1c_get_eeprom_len(struct net_device *netdev) 174static int atl1c_get_eeprom_len(struct net_device *netdev)
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
index 655fc6c4a8a4..cc7afa1a8fbe 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
@@ -25,6 +25,12 @@
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/mii.h> 26#include <linux/mii.h>
27 27
28#define FIELD_GETX(_x, _name) ((_x) >> (_name##_SHIFT) & (_name##_MASK))
29#define FIELD_SETX(_x, _name, _v) \
30(((_x) & ~((_name##_MASK) << (_name##_SHIFT))) |\
31(((_v) & (_name##_MASK)) << (_name##_SHIFT)))
32#define FIELDX(_name, _v) (((_v) & (_name##_MASK)) << (_name##_SHIFT))
33
28struct atl1c_adapter; 34struct atl1c_adapter;
29struct atl1c_hw; 35struct atl1c_hw;
30 36
@@ -48,41 +54,13 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
48#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 54#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7
49#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0 55#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0
50 56
51#define REG_DEVICE_CTRL 0x60 57#define DEVICE_CTRL_MAXRRS_MIN 2
52#define DEVICE_CTRL_MAX_PAYLOAD_MASK 0x7
53#define DEVICE_CTRL_MAX_PAYLOAD_SHIFT 5
54#define DEVICE_CTRL_MAX_RREQ_SZ_MASK 0x7
55#define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT 12
56 58
57#define REG_LINK_CTRL 0x68 59#define REG_LINK_CTRL 0x68
58#define LINK_CTRL_L0S_EN 0x01 60#define LINK_CTRL_L0S_EN 0x01
59#define LINK_CTRL_L1_EN 0x02 61#define LINK_CTRL_L1_EN 0x02
60#define LINK_CTRL_EXT_SYNC 0x80 62#define LINK_CTRL_EXT_SYNC 0x80
61 63
62#define REG_VPD_CAP 0x6C
63#define VPD_CAP_ID_MASK 0xff
64#define VPD_CAP_ID_SHIFT 0
65#define VPD_CAP_NEXT_PTR_MASK 0xFF
66#define VPD_CAP_NEXT_PTR_SHIFT 8
67#define VPD_CAP_VPD_ADDR_MASK 0x7FFF
68#define VPD_CAP_VPD_ADDR_SHIFT 16
69#define VPD_CAP_VPD_FLAG 0x80000000
70
71#define REG_VPD_DATA 0x70
72
73#define REG_PCIE_UC_SEVERITY 0x10C
74#define PCIE_UC_SERVRITY_TRN 0x00000001
75#define PCIE_UC_SERVRITY_DLP 0x00000010
76#define PCIE_UC_SERVRITY_PSN_TLP 0x00001000
77#define PCIE_UC_SERVRITY_FCP 0x00002000
78#define PCIE_UC_SERVRITY_CPL_TO 0x00004000
79#define PCIE_UC_SERVRITY_CA 0x00008000
80#define PCIE_UC_SERVRITY_UC 0x00010000
81#define PCIE_UC_SERVRITY_ROV 0x00020000
82#define PCIE_UC_SERVRITY_MLFP 0x00040000
83#define PCIE_UC_SERVRITY_ECRC 0x00080000
84#define PCIE_UC_SERVRITY_UR 0x00100000
85
86#define REG_DEV_SERIALNUM_CTRL 0x200 64#define REG_DEV_SERIALNUM_CTRL 0x200
87#define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */ 65#define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */
88#define REG_DEV_MAC_SEL_SHIFT 0 66#define REG_DEV_MAC_SEL_SHIFT 0
@@ -118,17 +96,24 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
118#define PCIE_DEV_MISC_SERDES_SEL_DIN 0x10 96#define PCIE_DEV_MISC_SERDES_SEL_DIN 0x10
119 97
120#define REG_PCIE_PHYMISC 0x1000 98#define REG_PCIE_PHYMISC 0x1000
121#define PCIE_PHYMISC_FORCE_RCV_DET 0x4 99#define PCIE_PHYMISC_FORCE_RCV_DET BIT(2)
100#define PCIE_PHYMISC_NFTS_MASK 0xFFUL
101#define PCIE_PHYMISC_NFTS_SHIFT 16
122 102
123#define REG_PCIE_PHYMISC2 0x1004 103#define REG_PCIE_PHYMISC2 0x1004
124#define PCIE_PHYMISC2_SERDES_CDR_MASK 0x3 104#define PCIE_PHYMISC2_L0S_TH_MASK 0x3UL
125#define PCIE_PHYMISC2_SERDES_CDR_SHIFT 16 105#define PCIE_PHYMISC2_L0S_TH_SHIFT 18
126#define PCIE_PHYMISC2_SERDES_TH_MASK 0x3 106#define L2CB1_PCIE_PHYMISC2_L0S_TH 3
127#define PCIE_PHYMISC2_SERDES_TH_SHIFT 18 107#define PCIE_PHYMISC2_CDR_BW_MASK 0x3UL
108#define PCIE_PHYMISC2_CDR_BW_SHIFT 16
109#define L2CB1_PCIE_PHYMISC2_CDR_BW 3
128 110
129#define REG_TWSI_DEBUG 0x1108 111#define REG_TWSI_DEBUG 0x1108
130#define TWSI_DEBUG_DEV_EXIST 0x20000000 112#define TWSI_DEBUG_DEV_EXIST 0x20000000
131 113
114#define REG_DMA_DBG 0x1114
115#define DMA_DBG_VENDOR_MSG BIT(0)
116
132#define REG_EEPROM_CTRL 0x12C0 117#define REG_EEPROM_CTRL 0x12C0
133#define EEPROM_CTRL_DATA_HI_MASK 0xFFFF 118#define EEPROM_CTRL_DATA_HI_MASK 0xFFFF
134#define EEPROM_CTRL_DATA_HI_SHIFT 0 119#define EEPROM_CTRL_DATA_HI_SHIFT 0
@@ -143,53 +128,78 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
143#define OTP_CTRL_CLK_EN 0x0002 128#define OTP_CTRL_CLK_EN 0x0002
144 129
145#define REG_PM_CTRL 0x12F8 130#define REG_PM_CTRL 0x12F8
146#define PM_CTRL_SDES_EN 0x00000001 131#define PM_CTRL_HOTRST BIT(31)
147#define PM_CTRL_RBER_EN 0x00000002 132#define PM_CTRL_MAC_ASPM_CHK BIT(30) /* L0s/L1 dis by MAC based on
148#define PM_CTRL_CLK_REQ_EN 0x00000004 133 * thrghput(setting in 15A0) */
149#define PM_CTRL_ASPM_L1_EN 0x00000008 134#define PM_CTRL_SA_DLY_EN BIT(29)
150#define PM_CTRL_SERDES_L1_EN 0x00000010 135#define PM_CTRL_L0S_BUFSRX_EN BIT(28)
151#define PM_CTRL_SERDES_PLL_L1_EN 0x00000020 136#define PM_CTRL_LCKDET_TIMER_MASK 0xFUL
152#define PM_CTRL_SERDES_PD_EX_L1 0x00000040
153#define PM_CTRL_SERDES_BUDS_RX_L1_EN 0x00000080
154#define PM_CTRL_L0S_ENTRY_TIMER_MASK 0xF
155#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT 8
156#define PM_CTRL_ASPM_L0S_EN 0x00001000
157#define PM_CTRL_CLK_SWH_L1 0x00002000
158#define PM_CTRL_CLK_PWM_VER1_1 0x00004000
159#define PM_CTRL_RCVR_WT_TIMER 0x00008000
160#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xF
161#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16
162#define PM_CTRL_PM_REQ_TIMER_MASK 0xF
163#define PM_CTRL_PM_REQ_TIMER_SHIFT 20
164#define PM_CTRL_LCKDET_TIMER_MASK 0xF
165#define PM_CTRL_LCKDET_TIMER_SHIFT 24 137#define PM_CTRL_LCKDET_TIMER_SHIFT 24
166#define PM_CTRL_EN_BUFS_RX_L0S 0x10000000 138#define PM_CTRL_LCKDET_TIMER_DEF 0xC
167#define PM_CTRL_SA_DLY_EN 0x20000000 139#define PM_CTRL_PM_REQ_TIMER_MASK 0xFUL
168#define PM_CTRL_MAC_ASPM_CHK 0x40000000 140#define PM_CTRL_PM_REQ_TIMER_SHIFT 20 /* pm_request_l1 time > @
169#define PM_CTRL_HOTRST 0x80000000 141 * ->L0s not L1 */
142#define PM_CTRL_PM_REQ_TO_DEF 0xC
143#define PMCTRL_TXL1_AFTER_L0S BIT(19) /* l1dv2.0+ */
144#define L1D_PMCTRL_L1_ENTRY_TM_MASK 7UL /* l1dv2.0+, 3bits */
145#define L1D_PMCTRL_L1_ENTRY_TM_SHIFT 16
146#define L1D_PMCTRL_L1_ENTRY_TM_DIS 0
147#define L1D_PMCTRL_L1_ENTRY_TM_2US 1
148#define L1D_PMCTRL_L1_ENTRY_TM_4US 2
149#define L1D_PMCTRL_L1_ENTRY_TM_8US 3
150#define L1D_PMCTRL_L1_ENTRY_TM_16US 4
151#define L1D_PMCTRL_L1_ENTRY_TM_24US 5
152#define L1D_PMCTRL_L1_ENTRY_TM_32US 6
153#define L1D_PMCTRL_L1_ENTRY_TM_63US 7
154#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xFUL /* l1C 4bits */
155#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16
156#define L2CB1_PM_CTRL_L1_ENTRY_TM 7
157#define L1C_PM_CTRL_L1_ENTRY_TM 0xF
158#define PM_CTRL_RCVR_WT_TIMER BIT(15) /* 1:1us, 0:2ms */
159#define PM_CTRL_CLK_PWM_VER1_1 BIT(14) /* 0:1.0a,1:1.1 */
160#define PM_CTRL_CLK_SWH_L1 BIT(13) /* en pcie clk sw in L1 */
161#define PM_CTRL_ASPM_L0S_EN BIT(12)
162#define PM_CTRL_RXL1_AFTER_L0S BIT(11) /* l1dv2.0+ */
163#define L1D_PMCTRL_L0S_TIMER_MASK 7UL /* l1d2.0+, 3bits*/
164#define L1D_PMCTRL_L0S_TIMER_SHIFT 8
165#define PM_CTRL_L0S_ENTRY_TIMER_MASK 0xFUL /* l1c, 4bits */
166#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT 8
167#define PM_CTRL_SERDES_BUFS_RX_L1_EN BIT(7)
168#define PM_CTRL_SERDES_PD_EX_L1 BIT(6) /* power down serdes rx */
169#define PM_CTRL_SERDES_PLL_L1_EN BIT(5)
170#define PM_CTRL_SERDES_L1_EN BIT(4)
171#define PM_CTRL_ASPM_L1_EN BIT(3)
172#define PM_CTRL_CLK_REQ_EN BIT(2)
173#define PM_CTRL_RBER_EN BIT(1)
174#define PM_CTRL_SPRSDWER_EN BIT(0)
170 175
171#define REG_LTSSM_ID_CTRL 0x12FC 176#define REG_LTSSM_ID_CTRL 0x12FC
172#define LTSSM_ID_EN_WRO 0x1000 177#define LTSSM_ID_EN_WRO 0x1000
178
179
173/* Selene Master Control Register */ 180/* Selene Master Control Register */
174#define REG_MASTER_CTRL 0x1400 181#define REG_MASTER_CTRL 0x1400
175#define MASTER_CTRL_SOFT_RST 0x1 182#define MASTER_CTRL_OTP_SEL BIT(31)
176#define MASTER_CTRL_TEST_MODE_MASK 0x3 183#define MASTER_DEV_NUM_MASK 0x7FUL
177#define MASTER_CTRL_TEST_MODE_SHIFT 2 184#define MASTER_DEV_NUM_SHIFT 24
178#define MASTER_CTRL_BERT_START 0x10 185#define MASTER_REV_NUM_MASK 0xFFUL
179#define MASTER_CTRL_OOB_DIS_OFF 0x40 186#define MASTER_REV_NUM_SHIFT 16
180#define MASTER_CTRL_SA_TIMER_EN 0x80 187#define MASTER_CTRL_INT_RDCLR BIT(14)
181#define MASTER_CTRL_MTIMER_EN 0x100 188#define MASTER_CTRL_CLK_SEL_DIS BIT(12) /* 1:alwys sel pclk from
182#define MASTER_CTRL_MANUAL_INT 0x200 189 * serdes, not sw to 25M */
183#define MASTER_CTRL_TX_ITIMER_EN 0x400 190#define MASTER_CTRL_RX_ITIMER_EN BIT(11) /* IRQ MODURATION FOR RX */
184#define MASTER_CTRL_RX_ITIMER_EN 0x800 191#define MASTER_CTRL_TX_ITIMER_EN BIT(10) /* MODURATION FOR TX/RX */
185#define MASTER_CTRL_CLK_SEL_DIS 0x1000 192#define MASTER_CTRL_MANU_INT BIT(9) /* SOFT MANUAL INT */
186#define MASTER_CTRL_CLK_SWH_MODE 0x2000 193#define MASTER_CTRL_MANUTIMER_EN BIT(8)
187#define MASTER_CTRL_INT_RDCLR 0x4000 194#define MASTER_CTRL_SA_TIMER_EN BIT(7) /* SYS ALIVE TIMER EN */
188#define MASTER_CTRL_REV_NUM_SHIFT 16 195#define MASTER_CTRL_OOB_DIS BIT(6) /* OUT OF BOX DIS */
189#define MASTER_CTRL_REV_NUM_MASK 0xff 196#define MASTER_CTRL_WAKEN_25M BIT(5) /* WAKE WO. PCIE CLK */
190#define MASTER_CTRL_DEV_ID_SHIFT 24 197#define MASTER_CTRL_BERT_START BIT(4)
191#define MASTER_CTRL_DEV_ID_MASK 0x7f 198#define MASTER_PCIE_TSTMOD_MASK 3UL
192#define MASTER_CTRL_OTP_SEL 0x80000000 199#define MASTER_PCIE_TSTMOD_SHIFT 2
200#define MASTER_PCIE_RST BIT(1)
201#define MASTER_CTRL_SOFT_RST BIT(0) /* RST MAC & DMA */
202#define DMA_MAC_RST_TO 50
193 203
194/* Timer Initial Value Register */ 204/* Timer Initial Value Register */
195#define REG_MANUAL_TIMER_INIT 0x1404 205#define REG_MANUAL_TIMER_INIT 0x1404
@@ -236,17 +246,25 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
236 GPHY_CTRL_HIB_PULSE |\ 246 GPHY_CTRL_HIB_PULSE |\
237 GPHY_CTRL_PWDOWN_HW |\ 247 GPHY_CTRL_PWDOWN_HW |\
238 GPHY_CTRL_PHY_IDDQ) 248 GPHY_CTRL_PHY_IDDQ)
249
239/* Block IDLE Status Register */ 250/* Block IDLE Status Register */
240#define REG_IDLE_STATUS 0x1410 251#define REG_IDLE_STATUS 0x1410
241#define IDLE_STATUS_MASK 0x00FF 252#define IDLE_STATUS_SFORCE_MASK 0xFUL
242#define IDLE_STATUS_RXMAC_NO_IDLE 0x1 253#define IDLE_STATUS_SFORCE_SHIFT 14
243#define IDLE_STATUS_TXMAC_NO_IDLE 0x2 254#define IDLE_STATUS_CALIB_DONE BIT(13)
244#define IDLE_STATUS_RXQ_NO_IDLE 0x4 255#define IDLE_STATUS_CALIB_RES_MASK 0x1FUL
245#define IDLE_STATUS_TXQ_NO_IDLE 0x8 256#define IDLE_STATUS_CALIB_RES_SHIFT 8
246#define IDLE_STATUS_DMAR_NO_IDLE 0x10 257#define IDLE_STATUS_CALIBERR_MASK 0xFUL
247#define IDLE_STATUS_DMAW_NO_IDLE 0x20 258#define IDLE_STATUS_CALIBERR_SHIFT 4
248#define IDLE_STATUS_SMB_NO_IDLE 0x40 259#define IDLE_STATUS_TXQ_BUSY BIT(3)
249#define IDLE_STATUS_CMB_NO_IDLE 0x80 260#define IDLE_STATUS_RXQ_BUSY BIT(2)
261#define IDLE_STATUS_TXMAC_BUSY BIT(1)
262#define IDLE_STATUS_RXMAC_BUSY BIT(0)
263#define IDLE_STATUS_MASK (\
264 IDLE_STATUS_TXQ_BUSY |\
265 IDLE_STATUS_RXQ_BUSY |\
266 IDLE_STATUS_TXMAC_BUSY |\
267 IDLE_STATUS_RXMAC_BUSY)
250 268
251/* MDIO Control Register */ 269/* MDIO Control Register */
252#define REG_MDIO_CTRL 0x1414 270#define REG_MDIO_CTRL 0x1414
@@ -386,34 +404,53 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
386 404
387/* Wake-On-Lan control register */ 405/* Wake-On-Lan control register */
388#define REG_WOL_CTRL 0x14a0 406#define REG_WOL_CTRL 0x14a0
389#define WOL_PATTERN_EN 0x00000001 407#define WOL_PT7_MATCH BIT(31)
390#define WOL_PATTERN_PME_EN 0x00000002 408#define WOL_PT6_MATCH BIT(30)
391#define WOL_MAGIC_EN 0x00000004 409#define WOL_PT5_MATCH BIT(29)
392#define WOL_MAGIC_PME_EN 0x00000008 410#define WOL_PT4_MATCH BIT(28)
393#define WOL_LINK_CHG_EN 0x00000010 411#define WOL_PT3_MATCH BIT(27)
394#define WOL_LINK_CHG_PME_EN 0x00000020 412#define WOL_PT2_MATCH BIT(26)
395#define WOL_PATTERN_ST 0x00000100 413#define WOL_PT1_MATCH BIT(25)
396#define WOL_MAGIC_ST 0x00000200 414#define WOL_PT0_MATCH BIT(24)
397#define WOL_LINKCHG_ST 0x00000400 415#define WOL_PT7_EN BIT(23)
398#define WOL_CLK_SWITCH_EN 0x00008000 416#define WOL_PT6_EN BIT(22)
399#define WOL_PT0_EN 0x00010000 417#define WOL_PT5_EN BIT(21)
400#define WOL_PT1_EN 0x00020000 418#define WOL_PT4_EN BIT(20)
401#define WOL_PT2_EN 0x00040000 419#define WOL_PT3_EN BIT(19)
402#define WOL_PT3_EN 0x00080000 420#define WOL_PT2_EN BIT(18)
403#define WOL_PT4_EN 0x00100000 421#define WOL_PT1_EN BIT(17)
404#define WOL_PT5_EN 0x00200000 422#define WOL_PT0_EN BIT(16)
405#define WOL_PT6_EN 0x00400000 423#define WOL_LNKCHG_ST BIT(10)
424#define WOL_MAGIC_ST BIT(9)
425#define WOL_PATTERN_ST BIT(8)
426#define WOL_OOB_EN BIT(6)
427#define WOL_LINK_CHG_PME_EN BIT(5)
428#define WOL_LINK_CHG_EN BIT(4)
429#define WOL_MAGIC_PME_EN BIT(3)
430#define WOL_MAGIC_EN BIT(2)
431#define WOL_PATTERN_PME_EN BIT(1)
432#define WOL_PATTERN_EN BIT(0)
406 433
407/* WOL Length ( 2 DWORD ) */ 434/* WOL Length ( 2 DWORD ) */
408#define REG_WOL_PATTERN_LEN 0x14a4 435#define REG_WOL_PTLEN1 0x14A4
409#define WOL_PT_LEN_MASK 0x7f 436#define WOL_PTLEN1_3_MASK 0xFFUL
410#define WOL_PT0_LEN_SHIFT 0 437#define WOL_PTLEN1_3_SHIFT 24
411#define WOL_PT1_LEN_SHIFT 8 438#define WOL_PTLEN1_2_MASK 0xFFUL
412#define WOL_PT2_LEN_SHIFT 16 439#define WOL_PTLEN1_2_SHIFT 16
413#define WOL_PT3_LEN_SHIFT 24 440#define WOL_PTLEN1_1_MASK 0xFFUL
414#define WOL_PT4_LEN_SHIFT 0 441#define WOL_PTLEN1_1_SHIFT 8
415#define WOL_PT5_LEN_SHIFT 8 442#define WOL_PTLEN1_0_MASK 0xFFUL
416#define WOL_PT6_LEN_SHIFT 16 443#define WOL_PTLEN1_0_SHIFT 0
444
445#define REG_WOL_PTLEN2 0x14A8
446#define WOL_PTLEN2_7_MASK 0xFFUL
447#define WOL_PTLEN2_7_SHIFT 24
448#define WOL_PTLEN2_6_MASK 0xFFUL
449#define WOL_PTLEN2_6_SHIFT 16
450#define WOL_PTLEN2_5_MASK 0xFFUL
451#define WOL_PTLEN2_5_SHIFT 8
452#define WOL_PTLEN2_4_MASK 0xFFUL
453#define WOL_PTLEN2_4_SHIFT 0
417 454
418/* Internal SRAM Partition Register */ 455/* Internal SRAM Partition Register */
419#define RFDX_HEAD_ADDR_MASK 0x03FF 456#define RFDX_HEAD_ADDR_MASK 0x03FF
@@ -458,66 +495,50 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
458 */ 495 */
459#define REG_RX_BASE_ADDR_HI 0x1540 496#define REG_RX_BASE_ADDR_HI 0x1540
460#define REG_TX_BASE_ADDR_HI 0x1544 497#define REG_TX_BASE_ADDR_HI 0x1544
461#define REG_SMB_BASE_ADDR_HI 0x1548
462#define REG_SMB_BASE_ADDR_LO 0x154C
463#define REG_RFD0_HEAD_ADDR_LO 0x1550 498#define REG_RFD0_HEAD_ADDR_LO 0x1550
464#define REG_RFD1_HEAD_ADDR_LO 0x1554
465#define REG_RFD2_HEAD_ADDR_LO 0x1558
466#define REG_RFD3_HEAD_ADDR_LO 0x155C
467#define REG_RFD_RING_SIZE 0x1560 499#define REG_RFD_RING_SIZE 0x1560
468#define RFD_RING_SIZE_MASK 0x0FFF 500#define RFD_RING_SIZE_MASK 0x0FFF
469#define REG_RX_BUF_SIZE 0x1564 501#define REG_RX_BUF_SIZE 0x1564
470#define RX_BUF_SIZE_MASK 0xFFFF 502#define RX_BUF_SIZE_MASK 0xFFFF
471#define REG_RRD0_HEAD_ADDR_LO 0x1568 503#define REG_RRD0_HEAD_ADDR_LO 0x1568
472#define REG_RRD1_HEAD_ADDR_LO 0x156C
473#define REG_RRD2_HEAD_ADDR_LO 0x1570
474#define REG_RRD3_HEAD_ADDR_LO 0x1574
475#define REG_RRD_RING_SIZE 0x1578 504#define REG_RRD_RING_SIZE 0x1578
476#define RRD_RING_SIZE_MASK 0x0FFF 505#define RRD_RING_SIZE_MASK 0x0FFF
477#define REG_HTPD_HEAD_ADDR_LO 0x157C 506#define REG_TPD_PRI1_ADDR_LO 0x157C
478#define REG_NTPD_HEAD_ADDR_LO 0x1580 507#define REG_TPD_PRI0_ADDR_LO 0x1580
479#define REG_TPD_RING_SIZE 0x1584 508#define REG_TPD_RING_SIZE 0x1584
480#define TPD_RING_SIZE_MASK 0xFFFF 509#define TPD_RING_SIZE_MASK 0xFFFF
481#define REG_CMB_BASE_ADDR_LO 0x1588
482
483/* RSS about */
484#define REG_RSS_KEY0 0x14B0
485#define REG_RSS_KEY1 0x14B4
486#define REG_RSS_KEY2 0x14B8
487#define REG_RSS_KEY3 0x14BC
488#define REG_RSS_KEY4 0x14C0
489#define REG_RSS_KEY5 0x14C4
490#define REG_RSS_KEY6 0x14C8
491#define REG_RSS_KEY7 0x14CC
492#define REG_RSS_KEY8 0x14D0
493#define REG_RSS_KEY9 0x14D4
494#define REG_IDT_TABLE0 0x14E0
495#define REG_IDT_TABLE1 0x14E4
496#define REG_IDT_TABLE2 0x14E8
497#define REG_IDT_TABLE3 0x14EC
498#define REG_IDT_TABLE4 0x14F0
499#define REG_IDT_TABLE5 0x14F4
500#define REG_IDT_TABLE6 0x14F8
501#define REG_IDT_TABLE7 0x14FC
502#define REG_IDT_TABLE REG_IDT_TABLE0
503#define REG_RSS_HASH_VALUE 0x15B0
504#define REG_RSS_HASH_FLAG 0x15B4
505#define REG_BASE_CPU_NUMBER 0x15B8
506 510
507/* TXQ Control Register */ 511/* TXQ Control Register */
508#define REG_TXQ_CTRL 0x1590 512#define REG_TXQ_CTRL 0x1590
509#define TXQ_NUM_TPD_BURST_MASK 0xF 513#define TXQ_TXF_BURST_NUM_MASK 0xFFFFUL
510#define TXQ_NUM_TPD_BURST_SHIFT 0 514#define TXQ_TXF_BURST_NUM_SHIFT 16
511#define TXQ_CTRL_IP_OPTION_EN 0x10 515#define L1C_TXQ_TXF_BURST_PREF 0x200
512#define TXQ_CTRL_EN 0x20 516#define L2CB_TXQ_TXF_BURST_PREF 0x40
513#define TXQ_CTRL_ENH_MODE 0x40 517#define TXQ_CTRL_PEDING_CLR BIT(8)
514#define TXQ_CTRL_LS_8023_EN 0x80 518#define TXQ_CTRL_LS_8023_EN BIT(7)
515#define TXQ_TXF_BURST_NUM_SHIFT 16 519#define TXQ_CTRL_ENH_MODE BIT(6)
516#define TXQ_TXF_BURST_NUM_MASK 0xFFFF 520#define TXQ_CTRL_EN BIT(5)
521#define TXQ_CTRL_IP_OPTION_EN BIT(4)
522#define TXQ_NUM_TPD_BURST_MASK 0xFUL
523#define TXQ_NUM_TPD_BURST_SHIFT 0
524#define TXQ_NUM_TPD_BURST_DEF 5
525#define TXQ_CFGV (\
526 FIELDX(TXQ_NUM_TPD_BURST, TXQ_NUM_TPD_BURST_DEF) |\
527 TXQ_CTRL_ENH_MODE |\
528 TXQ_CTRL_LS_8023_EN |\
529 TXQ_CTRL_IP_OPTION_EN)
530#define L1C_TXQ_CFGV (\
531 TXQ_CFGV |\
532 FIELDX(TXQ_TXF_BURST_NUM, L1C_TXQ_TXF_BURST_PREF))
533#define L2CB_TXQ_CFGV (\
534 TXQ_CFGV |\
535 FIELDX(TXQ_TXF_BURST_NUM, L2CB_TXQ_TXF_BURST_PREF))
536
517 537
518/* Jumbo packet Threshold for task offload */ 538/* Jumbo packet Threshold for task offload */
519#define REG_TX_TSO_OFFLOAD_THRESH 0x1594 /* In 8-bytes */ 539#define REG_TX_TSO_OFFLOAD_THRESH 0x1594 /* In 8-bytes */
520#define TX_TSO_OFFLOAD_THRESH_MASK 0x07FF 540#define TX_TSO_OFFLOAD_THRESH_MASK 0x07FF
541#define MAX_TSO_FRAME_SIZE (7*1024)
521 542
522#define REG_TXF_WATER_MARK 0x1598 /* In 8-bytes */ 543#define REG_TXF_WATER_MARK 0x1598 /* In 8-bytes */
523#define TXF_WATER_MARK_MASK 0x0FFF 544#define TXF_WATER_MARK_MASK 0x0FFF
@@ -537,26 +558,21 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
537#define ASPM_THRUPUT_LIMIT_NO 0x00 558#define ASPM_THRUPUT_LIMIT_NO 0x00
538#define ASPM_THRUPUT_LIMIT_1M 0x01 559#define ASPM_THRUPUT_LIMIT_1M 0x01
539#define ASPM_THRUPUT_LIMIT_10M 0x02 560#define ASPM_THRUPUT_LIMIT_10M 0x02
540#define ASPM_THRUPUT_LIMIT_100M 0x04 561#define ASPM_THRUPUT_LIMIT_100M 0x03
541#define RXQ1_CTRL_EN 0x10 562#define IPV6_CHKSUM_CTRL_EN BIT(7)
542#define RXQ2_CTRL_EN 0x20
543#define RXQ3_CTRL_EN 0x40
544#define IPV6_CHKSUM_CTRL_EN 0x80
545#define RSS_HASH_BITS_MASK 0x00FF
546#define RSS_HASH_BITS_SHIFT 8
547#define RSS_HASH_IPV4 0x10000
548#define RSS_HASH_IPV4_TCP 0x20000
549#define RSS_HASH_IPV6 0x40000
550#define RSS_HASH_IPV6_TCP 0x80000
551#define RXQ_RFD_BURST_NUM_MASK 0x003F 563#define RXQ_RFD_BURST_NUM_MASK 0x003F
552#define RXQ_RFD_BURST_NUM_SHIFT 20 564#define RXQ_RFD_BURST_NUM_SHIFT 20
553#define RSS_MODE_MASK 0x0003 565#define RXQ_NUM_RFD_PREF_DEF 8
566#define RSS_MODE_MASK 3UL
554#define RSS_MODE_SHIFT 26 567#define RSS_MODE_SHIFT 26
555#define RSS_NIP_QUEUE_SEL_MASK 0x1 568#define RSS_MODE_DIS 0
556#define RSS_NIP_QUEUE_SEL_SHIFT 28 569#define RSS_MODE_SQSI 1
557#define RRS_HASH_CTRL_EN 0x20000000 570#define RSS_MODE_MQSI 2
558#define RX_CUT_THRU_EN 0x40000000 571#define RSS_MODE_MQMI 3
559#define RXQ_CTRL_EN 0x80000000 572#define RSS_NIP_QUEUE_SEL BIT(28) /* 0:q0, 1:table */
573#define RRS_HASH_CTRL_EN BIT(29)
574#define RX_CUT_THRU_EN BIT(30)
575#define RXQ_CTRL_EN BIT(31)
560 576
561#define REG_RFD_FREE_THRESH 0x15A4 577#define REG_RFD_FREE_THRESH 0x15A4
562#define RFD_FREE_THRESH_MASK 0x003F 578#define RFD_FREE_THRESH_MASK 0x003F
@@ -577,57 +593,45 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
577#define RXD_DMA_DOWN_TIMER_SHIFT 16 593#define RXD_DMA_DOWN_TIMER_SHIFT 16
578 594
579/* DMA Engine Control Register */ 595/* DMA Engine Control Register */
580#define REG_DMA_CTRL 0x15C0 596#define REG_DMA_CTRL 0x15C0
581#define DMA_CTRL_DMAR_IN_ORDER 0x1 597#define DMA_CTRL_SMB_NOW BIT(31)
582#define DMA_CTRL_DMAR_ENH_ORDER 0x2 598#define DMA_CTRL_WPEND_CLR BIT(30)
583#define DMA_CTRL_DMAR_OUT_ORDER 0x4 599#define DMA_CTRL_RPEND_CLR BIT(29)
584#define DMA_CTRL_RCB_VALUE 0x8 600#define DMA_CTRL_WDLY_CNT_MASK 0xFUL
585#define DMA_CTRL_DMAR_BURST_LEN_MASK 0x0007 601#define DMA_CTRL_WDLY_CNT_SHIFT 16
586#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4 602#define DMA_CTRL_WDLY_CNT_DEF 4
587#define DMA_CTRL_DMAW_BURST_LEN_MASK 0x0007 603#define DMA_CTRL_RDLY_CNT_MASK 0x1FUL
588#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7 604#define DMA_CTRL_RDLY_CNT_SHIFT 11
589#define DMA_CTRL_DMAR_REQ_PRI 0x400 605#define DMA_CTRL_RDLY_CNT_DEF 15
590#define DMA_CTRL_DMAR_DLY_CNT_MASK 0x001F 606#define DMA_CTRL_RREQ_PRI_DATA BIT(10) /* 0:tpd, 1:data */
591#define DMA_CTRL_DMAR_DLY_CNT_SHIFT 11 607#define DMA_CTRL_WREQ_BLEN_MASK 7UL
592#define DMA_CTRL_DMAW_DLY_CNT_MASK 0x000F 608#define DMA_CTRL_WREQ_BLEN_SHIFT 7
593#define DMA_CTRL_DMAW_DLY_CNT_SHIFT 16 609#define DMA_CTRL_RREQ_BLEN_MASK 7UL
594#define DMA_CTRL_CMB_EN 0x100000 610#define DMA_CTRL_RREQ_BLEN_SHIFT 4
595#define DMA_CTRL_SMB_EN 0x200000 611#define L1C_CTRL_DMA_RCB_LEN128 BIT(3) /* 0:64bytes,1:128bytes */
596#define DMA_CTRL_CMB_NOW 0x400000 612#define DMA_CTRL_RORDER_MODE_MASK 7UL
597#define MAC_CTRL_SMB_DIS 0x1000000 613#define DMA_CTRL_RORDER_MODE_SHIFT 0
598#define DMA_CTRL_SMB_NOW 0x80000000 614#define DMA_CTRL_RORDER_MODE_OUT 4
599 615#define DMA_CTRL_RORDER_MODE_ENHANCE 2
600/* CMB/SMB Control Register */ 616#define DMA_CTRL_RORDER_MODE_IN 1
617
618/* INT-triggle/SMB Control Register */
601#define REG_SMB_STAT_TIMER 0x15C4 /* 2us resolution */ 619#define REG_SMB_STAT_TIMER 0x15C4 /* 2us resolution */
602#define SMB_STAT_TIMER_MASK 0xFFFFFF 620#define SMB_STAT_TIMER_MASK 0xFFFFFF
603#define REG_CMB_TPD_THRESH 0x15C8 621#define REG_TINT_TPD_THRESH 0x15C8 /* tpd th to trig intrrupt */
604#define CMB_TPD_THRESH_MASK 0xFFFF
605#define REG_CMB_TX_TIMER 0x15CC /* 2us resolution */
606#define CMB_TX_TIMER_MASK 0xFFFF
607 622
608/* Mail box */ 623/* Mail box */
609#define MB_RFDX_PROD_IDX_MASK 0xFFFF 624#define MB_RFDX_PROD_IDX_MASK 0xFFFF
610#define REG_MB_RFD0_PROD_IDX 0x15E0 625#define REG_MB_RFD0_PROD_IDX 0x15E0
611#define REG_MB_RFD1_PROD_IDX 0x15E4
612#define REG_MB_RFD2_PROD_IDX 0x15E8
613#define REG_MB_RFD3_PROD_IDX 0x15EC
614
615#define MB_PRIO_PROD_IDX_MASK 0xFFFF
616#define REG_MB_PRIO_PROD_IDX 0x15F0
617#define MB_HTPD_PROD_IDX_SHIFT 0
618#define MB_NTPD_PROD_IDX_SHIFT 16
619 626
620#define MB_PRIO_CONS_IDX_MASK 0xFFFF 627#define REG_TPD_PRI1_PIDX 0x15F0 /* 16bit,hi-tpd producer idx */
621#define REG_MB_PRIO_CONS_IDX 0x15F4 628#define REG_TPD_PRI0_PIDX 0x15F2 /* 16bit,lo-tpd producer idx */
622#define MB_HTPD_CONS_IDX_SHIFT 0 629#define REG_TPD_PRI1_CIDX 0x15F4 /* 16bit,hi-tpd consumer idx */
623#define MB_NTPD_CONS_IDX_SHIFT 16 630#define REG_TPD_PRI0_CIDX 0x15F6 /* 16bit,lo-tpd consumer idx */
624 631
625#define REG_MB_RFD01_CONS_IDX 0x15F8 632#define REG_MB_RFD01_CONS_IDX 0x15F8
626#define MB_RFD0_CONS_IDX_MASK 0x0000FFFF 633#define MB_RFD0_CONS_IDX_MASK 0x0000FFFF
627#define MB_RFD1_CONS_IDX_MASK 0xFFFF0000 634#define MB_RFD1_CONS_IDX_MASK 0xFFFF0000
628#define REG_MB_RFD23_CONS_IDX 0x15FC
629#define MB_RFD2_CONS_IDX_MASK 0x0000FFFF
630#define MB_RFD3_CONS_IDX_MASK 0xFFFF0000
631 635
632/* Interrupt Status Register */ 636/* Interrupt Status Register */
633#define REG_ISR 0x1600 637#define REG_ISR 0x1600
@@ -705,13 +709,6 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
705#define REG_INT_RETRIG_TIMER 0x1608 709#define REG_INT_RETRIG_TIMER 0x1608
706#define INT_RETRIG_TIMER_MASK 0xFFFF 710#define INT_RETRIG_TIMER_MASK 0xFFFF
707 711
708#define REG_HDS_CTRL 0x160C
709#define HDS_CTRL_EN 0x0001
710#define HDS_CTRL_BACKFILLSIZE_SHIFT 8
711#define HDS_CTRL_BACKFILLSIZE_MASK 0x0FFF
712#define HDS_CTRL_MAX_HDRSIZE_SHIFT 20
713#define HDS_CTRL_MAC_HDRSIZE_MASK 0x0FFF
714
715#define REG_MAC_RX_STATUS_BIN 0x1700 712#define REG_MAC_RX_STATUS_BIN 0x1700
716#define REG_MAC_RX_STATUS_END 0x175c 713#define REG_MAC_RX_STATUS_END 0x175c
717#define REG_MAC_TX_STATUS_BIN 0x1760 714#define REG_MAC_TX_STATUS_BIN 0x1760
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 1ef0c9275dee..df106370eb6d 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -54,8 +54,9 @@ static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
54}; 54};
55MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl); 55MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl);
56 56
57MODULE_AUTHOR("Jie Yang <jie.yang@atheros.com>"); 57MODULE_AUTHOR("Jie Yang");
58MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver"); 58MODULE_AUTHOR("Qualcomm Atheros Inc., <nic-devel@qualcomm.com>");
59MODULE_DESCRIPTION("Qualcom Atheros 100/1000M Ethernet Network Driver");
59MODULE_LICENSE("GPL"); 60MODULE_LICENSE("GPL");
60MODULE_VERSION(ATL1C_DRV_VERSION); 61MODULE_VERSION(ATL1C_DRV_VERSION);
61 62
@@ -63,9 +64,9 @@ static int atl1c_stop_mac(struct atl1c_hw *hw);
63static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw); 64static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw);
64static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw); 65static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw);
65static void atl1c_disable_l0s_l1(struct atl1c_hw *hw); 66static void atl1c_disable_l0s_l1(struct atl1c_hw *hw);
66static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup); 67static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed);
67static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter); 68static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter);
68static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que, 69static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
69 int *work_done, int work_to_do); 70 int *work_done, int work_to_do);
70static int atl1c_up(struct atl1c_adapter *adapter); 71static int atl1c_up(struct atl1c_adapter *adapter);
71static void atl1c_down(struct atl1c_adapter *adapter); 72static void atl1c_down(struct atl1c_adapter *adapter);
@@ -74,50 +75,49 @@ static const u16 atl1c_pay_load_size[] = {
74 128, 256, 512, 1024, 2048, 4096, 75 128, 256, 512, 1024, 2048, 4096,
75}; 76};
76 77
77static const u16 atl1c_rfd_prod_idx_regs[AT_MAX_RECEIVE_QUEUE] =
78{
79 REG_MB_RFD0_PROD_IDX,
80 REG_MB_RFD1_PROD_IDX,
81 REG_MB_RFD2_PROD_IDX,
82 REG_MB_RFD3_PROD_IDX
83};
84
85static const u16 atl1c_rfd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
86{
87 REG_RFD0_HEAD_ADDR_LO,
88 REG_RFD1_HEAD_ADDR_LO,
89 REG_RFD2_HEAD_ADDR_LO,
90 REG_RFD3_HEAD_ADDR_LO
91};
92
93static const u16 atl1c_rrd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
94{
95 REG_RRD0_HEAD_ADDR_LO,
96 REG_RRD1_HEAD_ADDR_LO,
97 REG_RRD2_HEAD_ADDR_LO,
98 REG_RRD3_HEAD_ADDR_LO
99};
100 78
101static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | 79static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
102 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; 80 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
103static void atl1c_pcie_patch(struct atl1c_hw *hw) 81static void atl1c_pcie_patch(struct atl1c_hw *hw)
104{ 82{
105 u32 data; 83 u32 mst_data, data;
106 84
107 AT_READ_REG(hw, REG_PCIE_PHYMISC, &data); 85 /* pclk sel could switch to 25M */
108 data |= PCIE_PHYMISC_FORCE_RCV_DET; 86 AT_READ_REG(hw, REG_MASTER_CTRL, &mst_data);
109 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data); 87 mst_data &= ~MASTER_CTRL_CLK_SEL_DIS;
88 AT_WRITE_REG(hw, REG_MASTER_CTRL, mst_data);
110 89
90 /* WoL/PCIE related settings */
91 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
92 AT_READ_REG(hw, REG_PCIE_PHYMISC, &data);
93 data |= PCIE_PHYMISC_FORCE_RCV_DET;
94 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data);
95 } else { /* new dev set bit5 of MASTER */
96 if (!(mst_data & MASTER_CTRL_WAKEN_25M))
97 AT_WRITE_REG(hw, REG_MASTER_CTRL,
98 mst_data | MASTER_CTRL_WAKEN_25M);
99 }
100 /* aspm/PCIE setting only for l2cb 1.0 */
111 if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) { 101 if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) {
112 AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data); 102 AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data);
113 103 data = FIELD_SETX(data, PCIE_PHYMISC2_CDR_BW,
114 data &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK << 104 L2CB1_PCIE_PHYMISC2_CDR_BW);
115 PCIE_PHYMISC2_SERDES_CDR_SHIFT); 105 data = FIELD_SETX(data, PCIE_PHYMISC2_L0S_TH,
116 data |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT; 106 L2CB1_PCIE_PHYMISC2_L0S_TH);
117 data &= ~(PCIE_PHYMISC2_SERDES_TH_MASK <<
118 PCIE_PHYMISC2_SERDES_TH_SHIFT);
119 data |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
120 AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data); 107 AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data);
108 /* extend L1 sync timer */
109 AT_READ_REG(hw, REG_LINK_CTRL, &data);
110 data |= LINK_CTRL_EXT_SYNC;
111 AT_WRITE_REG(hw, REG_LINK_CTRL, data);
112 }
113 /* l2cb 1.x & l1d 1.x */
114 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d) {
115 AT_READ_REG(hw, REG_PM_CTRL, &data);
116 data |= PM_CTRL_L0S_BUFSRX_EN;
117 AT_WRITE_REG(hw, REG_PM_CTRL, data);
118 /* clear vendor msg */
119 AT_READ_REG(hw, REG_DMA_DBG, &data);
120 AT_WRITE_REG(hw, REG_DMA_DBG, data & ~DMA_DBG_VENDOR_MSG);
121 } 121 }
122} 122}
123 123
@@ -130,6 +130,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
130 u32 data; 130 u32 data;
131 u32 pci_cmd; 131 u32 pci_cmd;
132 struct pci_dev *pdev = hw->adapter->pdev; 132 struct pci_dev *pdev = hw->adapter->pdev;
133 int pos;
133 134
134 AT_READ_REG(hw, PCI_COMMAND, &pci_cmd); 135 AT_READ_REG(hw, PCI_COMMAND, &pci_cmd);
135 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; 136 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
@@ -146,10 +147,16 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
146 /* 147 /*
147 * Mask some pcie error bits 148 * Mask some pcie error bits
148 */ 149 */
149 AT_READ_REG(hw, REG_PCIE_UC_SEVERITY, &data); 150 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
150 data &= ~PCIE_UC_SERVRITY_DLP; 151 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
151 data &= ~PCIE_UC_SERVRITY_FCP; 152 data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
152 AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data); 153 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
154 /* clear error status */
155 pci_write_config_word(pdev, pci_pcie_cap(pdev) + PCI_EXP_DEVSTA,
156 PCI_EXP_DEVSTA_NFED |
157 PCI_EXP_DEVSTA_FED |
158 PCI_EXP_DEVSTA_CED |
159 PCI_EXP_DEVSTA_URD);
153 160
154 AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data); 161 AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data);
155 data &= ~LTSSM_ID_EN_WRO; 162 data &= ~LTSSM_ID_EN_WRO;
@@ -207,14 +214,14 @@ static inline void atl1c_irq_reset(struct atl1c_adapter *adapter)
207 * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads 214 * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads
208 * of the idle status register until the device is actually idle 215 * of the idle status register until the device is actually idle
209 */ 216 */
210static u32 atl1c_wait_until_idle(struct atl1c_hw *hw) 217static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
211{ 218{
212 int timeout; 219 int timeout;
213 u32 data; 220 u32 data;
214 221
215 for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { 222 for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
216 AT_READ_REG(hw, REG_IDLE_STATUS, &data); 223 AT_READ_REG(hw, REG_IDLE_STATUS, &data);
217 if ((data & IDLE_STATUS_MASK) == 0) 224 if ((data & modu_ctrl) == 0)
218 return 0; 225 return 0;
219 msleep(1); 226 msleep(1);
220 } 227 }
@@ -265,7 +272,7 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
265 if (atl1c_stop_mac(hw) != 0) 272 if (atl1c_stop_mac(hw) != 0)
266 if (netif_msg_hw(adapter)) 273 if (netif_msg_hw(adapter))
267 dev_warn(&pdev->dev, "stop mac failed\n"); 274 dev_warn(&pdev->dev, "stop mac failed\n");
268 atl1c_set_aspm(hw, false); 275 atl1c_set_aspm(hw, SPEED_0);
269 netif_carrier_off(netdev); 276 netif_carrier_off(netdev);
270 netif_stop_queue(netdev); 277 netif_stop_queue(netdev);
271 atl1c_phy_reset(hw); 278 atl1c_phy_reset(hw);
@@ -283,7 +290,7 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
283 adapter->link_duplex != duplex) { 290 adapter->link_duplex != duplex) {
284 adapter->link_speed = speed; 291 adapter->link_speed = speed;
285 adapter->link_duplex = duplex; 292 adapter->link_duplex = duplex;
286 atl1c_set_aspm(hw, true); 293 atl1c_set_aspm(hw, speed);
287 atl1c_enable_tx_ctrl(hw); 294 atl1c_enable_tx_ctrl(hw);
288 atl1c_enable_rx_ctrl(hw); 295 atl1c_enable_rx_ctrl(hw);
289 atl1c_setup_mac_ctrl(adapter); 296 atl1c_setup_mac_ctrl(adapter);
@@ -523,11 +530,16 @@ static int atl1c_set_features(struct net_device *netdev,
523static int atl1c_change_mtu(struct net_device *netdev, int new_mtu) 530static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
524{ 531{
525 struct atl1c_adapter *adapter = netdev_priv(netdev); 532 struct atl1c_adapter *adapter = netdev_priv(netdev);
533 struct atl1c_hw *hw = &adapter->hw;
526 int old_mtu = netdev->mtu; 534 int old_mtu = netdev->mtu;
527 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 535 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
528 536
529 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 537 /* Fast Ethernet controller doesn't support jumbo packet */
530 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 538 if (((hw->nic_type == athr_l2c ||
539 hw->nic_type == athr_l2c_b ||
540 hw->nic_type == athr_l2c_b2) && new_mtu > ETH_DATA_LEN) ||
541 max_frame < ETH_ZLEN + ETH_FCS_LEN ||
542 max_frame > MAX_JUMBO_FRAME_SIZE) {
531 if (netif_msg_link(adapter)) 543 if (netif_msg_link(adapter))
532 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); 544 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
533 return -EINVAL; 545 return -EINVAL;
@@ -563,7 +575,7 @@ static int atl1c_mdio_read(struct net_device *netdev, int phy_id, int reg_num)
563 struct atl1c_adapter *adapter = netdev_priv(netdev); 575 struct atl1c_adapter *adapter = netdev_priv(netdev);
564 u16 result; 576 u16 result;
565 577
566 atl1c_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result); 578 atl1c_read_phy_reg(&adapter->hw, reg_num, &result);
567 return result; 579 return result;
568} 580}
569 581
@@ -572,7 +584,7 @@ static void atl1c_mdio_write(struct net_device *netdev, int phy_id,
572{ 584{
573 struct atl1c_adapter *adapter = netdev_priv(netdev); 585 struct atl1c_adapter *adapter = netdev_priv(netdev);
574 586
575 atl1c_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val); 587 atl1c_write_phy_reg(&adapter->hw, reg_num, val);
576} 588}
577 589
578/* 590/*
@@ -696,12 +708,8 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
696 708
697 hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE | 709 hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE |
698 ATL1C_TXQ_MODE_ENHANCE; 710 ATL1C_TXQ_MODE_ENHANCE;
699 if (link_ctrl_data & LINK_CTRL_L0S_EN) 711 hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT |
700 hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT; 712 ATL1C_ASPM_L1_SUPPORT;
701 if (link_ctrl_data & LINK_CTRL_L1_EN)
702 hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
703 if (link_ctrl_data & LINK_CTRL_EXT_SYNC)
704 hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC;
705 hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON; 713 hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
706 714
707 if (hw->nic_type == athr_l1c || 715 if (hw->nic_type == athr_l1c ||
@@ -729,9 +737,8 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
729 device_set_wakeup_enable(&pdev->dev, false); 737 device_set_wakeup_enable(&pdev->dev, false);
730 adapter->link_speed = SPEED_0; 738 adapter->link_speed = SPEED_0;
731 adapter->link_duplex = FULL_DUPLEX; 739 adapter->link_duplex = FULL_DUPLEX;
732 adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE;
733 adapter->tpd_ring[0].count = 1024; 740 adapter->tpd_ring[0].count = 1024;
734 adapter->rfd_ring[0].count = 512; 741 adapter->rfd_ring.count = 512;
735 742
736 hw->vendor_id = pdev->vendor; 743 hw->vendor_id = pdev->vendor;
737 hw->device_id = pdev->device; 744 hw->device_id = pdev->device;
@@ -750,22 +757,12 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
750 hw->phy_configured = false; 757 hw->phy_configured = false;
751 hw->preamble_len = 7; 758 hw->preamble_len = 7;
752 hw->max_frame_size = adapter->netdev->mtu; 759 hw->max_frame_size = adapter->netdev->mtu;
753 if (adapter->num_rx_queues < 2) {
754 hw->rss_type = atl1c_rss_disable;
755 hw->rss_mode = atl1c_rss_mode_disable;
756 } else {
757 hw->rss_type = atl1c_rss_ipv4;
758 hw->rss_mode = atl1c_rss_mul_que_mul_int;
759 hw->rss_hash_bits = 16;
760 }
761 hw->autoneg_advertised = ADVERTISED_Autoneg; 760 hw->autoneg_advertised = ADVERTISED_Autoneg;
762 hw->indirect_tab = 0xE4E4E4E4; 761 hw->indirect_tab = 0xE4E4E4E4;
763 hw->base_cpu = 0; 762 hw->base_cpu = 0;
764 763
765 hw->ict = 50000; /* 100ms */ 764 hw->ict = 50000; /* 100ms */
766 hw->smb_timer = 200000; /* 400ms */ 765 hw->smb_timer = 200000; /* 400ms */
767 hw->cmb_tpd = 4;
768 hw->cmb_tx_timer = 1; /* 2 us */
769 hw->rx_imt = 200; 766 hw->rx_imt = 200;
770 hw->tx_imt = 1000; 767 hw->tx_imt = 1000;
771 768
@@ -773,9 +770,6 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
773 hw->rfd_burst = 8; 770 hw->rfd_burst = 8;
774 hw->dma_order = atl1c_dma_ord_out; 771 hw->dma_order = atl1c_dma_ord_out;
775 hw->dmar_block = atl1c_dma_req_1024; 772 hw->dmar_block = atl1c_dma_req_1024;
776 hw->dmaw_block = atl1c_dma_req_1024;
777 hw->dmar_dly_cnt = 15;
778 hw->dmaw_dly_cnt = 4;
779 773
780 if (atl1c_alloc_queues(adapter)) { 774 if (atl1c_alloc_queues(adapter)) {
781 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 775 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
@@ -851,24 +845,22 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
851 */ 845 */
852static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter) 846static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
853{ 847{
854 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; 848 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
855 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; 849 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
856 struct atl1c_buffer *buffer_info; 850 struct atl1c_buffer *buffer_info;
857 struct pci_dev *pdev = adapter->pdev; 851 struct pci_dev *pdev = adapter->pdev;
858 int i, j; 852 int j;
859 853
860 for (i = 0; i < adapter->num_rx_queues; i++) { 854 for (j = 0; j < rfd_ring->count; j++) {
861 for (j = 0; j < rfd_ring[i].count; j++) { 855 buffer_info = &rfd_ring->buffer_info[j];
862 buffer_info = &rfd_ring[i].buffer_info[j]; 856 atl1c_clean_buffer(pdev, buffer_info, 0);
863 atl1c_clean_buffer(pdev, buffer_info, 0);
864 }
865 /* zero out the descriptor ring */
866 memset(rfd_ring[i].desc, 0, rfd_ring[i].size);
867 rfd_ring[i].next_to_clean = 0;
868 rfd_ring[i].next_to_use = 0;
869 rrd_ring[i].next_to_use = 0;
870 rrd_ring[i].next_to_clean = 0;
871 } 857 }
858 /* zero out the descriptor ring */
859 memset(rfd_ring->desc, 0, rfd_ring->size);
860 rfd_ring->next_to_clean = 0;
861 rfd_ring->next_to_use = 0;
862 rrd_ring->next_to_use = 0;
863 rrd_ring->next_to_clean = 0;
872} 864}
873 865
874/* 866/*
@@ -877,8 +869,8 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
877static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter) 869static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
878{ 870{
879 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; 871 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
880 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; 872 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
881 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; 873 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
882 struct atl1c_buffer *buffer_info; 874 struct atl1c_buffer *buffer_info;
883 int i, j; 875 int i, j;
884 876
@@ -890,15 +882,13 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
890 ATL1C_SET_BUFFER_STATE(&buffer_info[i], 882 ATL1C_SET_BUFFER_STATE(&buffer_info[i],
891 ATL1C_BUFFER_FREE); 883 ATL1C_BUFFER_FREE);
892 } 884 }
893 for (i = 0; i < adapter->num_rx_queues; i++) { 885 rfd_ring->next_to_use = 0;
894 rfd_ring[i].next_to_use = 0; 886 rfd_ring->next_to_clean = 0;
895 rfd_ring[i].next_to_clean = 0; 887 rrd_ring->next_to_use = 0;
896 rrd_ring[i].next_to_use = 0; 888 rrd_ring->next_to_clean = 0;
897 rrd_ring[i].next_to_clean = 0; 889 for (j = 0; j < rfd_ring->count; j++) {
898 for (j = 0; j < rfd_ring[i].count; j++) { 890 buffer_info = &rfd_ring->buffer_info[j];
899 buffer_info = &rfd_ring[i].buffer_info[j]; 891 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
900 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
901 }
902 } 892 }
903} 893}
904 894
@@ -935,27 +925,23 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
935{ 925{
936 struct pci_dev *pdev = adapter->pdev; 926 struct pci_dev *pdev = adapter->pdev;
937 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; 927 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
938 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; 928 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
939 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; 929 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
940 struct atl1c_ring_header *ring_header = &adapter->ring_header; 930 struct atl1c_ring_header *ring_header = &adapter->ring_header;
941 int num_rx_queues = adapter->num_rx_queues;
942 int size; 931 int size;
943 int i; 932 int i;
944 int count = 0; 933 int count = 0;
945 int rx_desc_count = 0; 934 int rx_desc_count = 0;
946 u32 offset = 0; 935 u32 offset = 0;
947 936
948 rrd_ring[0].count = rfd_ring[0].count; 937 rrd_ring->count = rfd_ring->count;
949 for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++) 938 for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++)
950 tpd_ring[i].count = tpd_ring[0].count; 939 tpd_ring[i].count = tpd_ring[0].count;
951 940
952 for (i = 1; i < adapter->num_rx_queues; i++)
953 rfd_ring[i].count = rrd_ring[i].count = rfd_ring[0].count;
954
955 /* 2 tpd queue, one high priority queue, 941 /* 2 tpd queue, one high priority queue,
956 * another normal priority queue */ 942 * another normal priority queue */
957 size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 + 943 size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 +
958 rfd_ring->count * num_rx_queues); 944 rfd_ring->count);
959 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); 945 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
960 if (unlikely(!tpd_ring->buffer_info)) { 946 if (unlikely(!tpd_ring->buffer_info)) {
961 dev_err(&pdev->dev, "kzalloc failed, size = %d\n", 947 dev_err(&pdev->dev, "kzalloc failed, size = %d\n",
@@ -968,12 +954,11 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
968 count += tpd_ring[i].count; 954 count += tpd_ring[i].count;
969 } 955 }
970 956
971 for (i = 0; i < num_rx_queues; i++) { 957 rfd_ring->buffer_info =
972 rfd_ring[i].buffer_info = 958 (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
973 (struct atl1c_buffer *) (tpd_ring->buffer_info + count); 959 count += rfd_ring->count;
974 count += rfd_ring[i].count; 960 rx_desc_count += rfd_ring->count;
975 rx_desc_count += rfd_ring[i].count; 961
976 }
977 /* 962 /*
978 * real ring DMA buffer 963 * real ring DMA buffer
979 * each ring/block may need up to 8 bytes for alignment, hence the 964 * each ring/block may need up to 8 bytes for alignment, hence the
@@ -983,8 +968,7 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
983 sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 + 968 sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 +
984 sizeof(struct atl1c_rx_free_desc) * rx_desc_count + 969 sizeof(struct atl1c_rx_free_desc) * rx_desc_count +
985 sizeof(struct atl1c_recv_ret_status) * rx_desc_count + 970 sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
986 sizeof(struct atl1c_hw_stats) + 971 8 * 4;
987 8 * 4 + 8 * 2 * num_rx_queues;
988 972
989 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, 973 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
990 &ring_header->dma); 974 &ring_header->dma);
@@ -1005,25 +989,18 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
1005 offset += roundup(tpd_ring[i].size, 8); 989 offset += roundup(tpd_ring[i].size, 8);
1006 } 990 }
1007 /* init RFD ring */ 991 /* init RFD ring */
1008 for (i = 0; i < num_rx_queues; i++) { 992 rfd_ring->dma = ring_header->dma + offset;
1009 rfd_ring[i].dma = ring_header->dma + offset; 993 rfd_ring->desc = (u8 *) ring_header->desc + offset;
1010 rfd_ring[i].desc = (u8 *) ring_header->desc + offset; 994 rfd_ring->size = sizeof(struct atl1c_rx_free_desc) * rfd_ring->count;
1011 rfd_ring[i].size = sizeof(struct atl1c_rx_free_desc) * 995 offset += roundup(rfd_ring->size, 8);
1012 rfd_ring[i].count;
1013 offset += roundup(rfd_ring[i].size, 8);
1014 }
1015 996
1016 /* init RRD ring */ 997 /* init RRD ring */
1017 for (i = 0; i < num_rx_queues; i++) { 998 rrd_ring->dma = ring_header->dma + offset;
1018 rrd_ring[i].dma = ring_header->dma + offset; 999 rrd_ring->desc = (u8 *) ring_header->desc + offset;
1019 rrd_ring[i].desc = (u8 *) ring_header->desc + offset; 1000 rrd_ring->size = sizeof(struct atl1c_recv_ret_status) *
1020 rrd_ring[i].size = sizeof(struct atl1c_recv_ret_status) * 1001 rrd_ring->count;
1021 rrd_ring[i].count; 1002 offset += roundup(rrd_ring->size, 8);
1022 offset += roundup(rrd_ring[i].size, 8);
1023 }
1024 1003
1025 adapter->smb.dma = ring_header->dma + offset;
1026 adapter->smb.smb = (u8 *)ring_header->desc + offset;
1027 return 0; 1004 return 0;
1028 1005
1029err_nomem: 1006err_nomem:
@@ -1034,15 +1011,10 @@ err_nomem:
1034static void atl1c_configure_des_ring(struct atl1c_adapter *adapter) 1011static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
1035{ 1012{
1036 struct atl1c_hw *hw = &adapter->hw; 1013 struct atl1c_hw *hw = &adapter->hw;
1037 struct atl1c_rfd_ring *rfd_ring = (struct atl1c_rfd_ring *) 1014 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
1038 adapter->rfd_ring; 1015 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
1039 struct atl1c_rrd_ring *rrd_ring = (struct atl1c_rrd_ring *)
1040 adapter->rrd_ring;
1041 struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *) 1016 struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
1042 adapter->tpd_ring; 1017 adapter->tpd_ring;
1043 struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb;
1044 struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb;
1045 int i;
1046 u32 data; 1018 u32 data;
1047 1019
1048 /* TPD */ 1020 /* TPD */
@@ -1050,10 +1022,10 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
1050 (u32)((tpd_ring[atl1c_trans_normal].dma & 1022 (u32)((tpd_ring[atl1c_trans_normal].dma &
1051 AT_DMA_HI_ADDR_MASK) >> 32)); 1023 AT_DMA_HI_ADDR_MASK) >> 32));
1052 /* just enable normal priority TX queue */ 1024 /* just enable normal priority TX queue */
1053 AT_WRITE_REG(hw, REG_NTPD_HEAD_ADDR_LO, 1025 AT_WRITE_REG(hw, REG_TPD_PRI0_ADDR_LO,
1054 (u32)(tpd_ring[atl1c_trans_normal].dma & 1026 (u32)(tpd_ring[atl1c_trans_normal].dma &
1055 AT_DMA_LO_ADDR_MASK)); 1027 AT_DMA_LO_ADDR_MASK));
1056 AT_WRITE_REG(hw, REG_HTPD_HEAD_ADDR_LO, 1028 AT_WRITE_REG(hw, REG_TPD_PRI1_ADDR_LO,
1057 (u32)(tpd_ring[atl1c_trans_high].dma & 1029 (u32)(tpd_ring[atl1c_trans_high].dma &
1058 AT_DMA_LO_ADDR_MASK)); 1030 AT_DMA_LO_ADDR_MASK));
1059 AT_WRITE_REG(hw, REG_TPD_RING_SIZE, 1031 AT_WRITE_REG(hw, REG_TPD_RING_SIZE,
@@ -1062,31 +1034,21 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
1062 1034
1063 /* RFD */ 1035 /* RFD */
1064 AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI, 1036 AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI,
1065 (u32)((rfd_ring[0].dma & AT_DMA_HI_ADDR_MASK) >> 32)); 1037 (u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32));
1066 for (i = 0; i < adapter->num_rx_queues; i++) 1038 AT_WRITE_REG(hw, REG_RFD0_HEAD_ADDR_LO,
1067 AT_WRITE_REG(hw, atl1c_rfd_addr_lo_regs[i], 1039 (u32)(rfd_ring->dma & AT_DMA_LO_ADDR_MASK));
1068 (u32)(rfd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
1069 1040
1070 AT_WRITE_REG(hw, REG_RFD_RING_SIZE, 1041 AT_WRITE_REG(hw, REG_RFD_RING_SIZE,
1071 rfd_ring[0].count & RFD_RING_SIZE_MASK); 1042 rfd_ring->count & RFD_RING_SIZE_MASK);
1072 AT_WRITE_REG(hw, REG_RX_BUF_SIZE, 1043 AT_WRITE_REG(hw, REG_RX_BUF_SIZE,
1073 adapter->rx_buffer_len & RX_BUF_SIZE_MASK); 1044 adapter->rx_buffer_len & RX_BUF_SIZE_MASK);
1074 1045
1075 /* RRD */ 1046 /* RRD */
1076 for (i = 0; i < adapter->num_rx_queues; i++) 1047 AT_WRITE_REG(hw, REG_RRD0_HEAD_ADDR_LO,
1077 AT_WRITE_REG(hw, atl1c_rrd_addr_lo_regs[i], 1048 (u32)(rrd_ring->dma & AT_DMA_LO_ADDR_MASK));
1078 (u32)(rrd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
1079 AT_WRITE_REG(hw, REG_RRD_RING_SIZE, 1049 AT_WRITE_REG(hw, REG_RRD_RING_SIZE,
1080 (rrd_ring[0].count & RRD_RING_SIZE_MASK)); 1050 (rrd_ring->count & RRD_RING_SIZE_MASK));
1081 1051
1082 /* CMB */
1083 AT_WRITE_REG(hw, REG_CMB_BASE_ADDR_LO, cmb->dma & AT_DMA_LO_ADDR_MASK);
1084
1085 /* SMB */
1086 AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_HI,
1087 (u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32));
1088 AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO,
1089 (u32)(smb->dma & AT_DMA_LO_ADDR_MASK));
1090 if (hw->nic_type == athr_l2c_b) { 1052 if (hw->nic_type == athr_l2c_b) {
1091 AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L); 1053 AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L);
1092 AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L); 1054 AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L);
@@ -1111,32 +1073,26 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
1111static void atl1c_configure_tx(struct atl1c_adapter *adapter) 1073static void atl1c_configure_tx(struct atl1c_adapter *adapter)
1112{ 1074{
1113 struct atl1c_hw *hw = &adapter->hw; 1075 struct atl1c_hw *hw = &adapter->hw;
1114 u32 dev_ctrl_data; 1076 int max_pay_load;
1115 u32 max_pay_load;
1116 u16 tx_offload_thresh; 1077 u16 tx_offload_thresh;
1117 u32 txq_ctrl_data; 1078 u32 txq_ctrl_data;
1118 u32 max_pay_load_data;
1119 1079
1120 tx_offload_thresh = MAX_TX_OFFLOAD_THRESH; 1080 tx_offload_thresh = MAX_TSO_FRAME_SIZE;
1121 AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH, 1081 AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH,
1122 (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK); 1082 (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK);
1123 AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data); 1083 max_pay_load = pcie_get_readrq(adapter->pdev) >> 8;
1124 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) &
1125 DEVICE_CTRL_MAX_PAYLOAD_MASK;
1126 hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
1127 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) &
1128 DEVICE_CTRL_MAX_RREQ_SZ_MASK;
1129 hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block); 1084 hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
1130 1085 /*
1131 txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) << 1086 * if BIOS had changed the dam-read-max-length to an invalid value,
1132 TXQ_NUM_TPD_BURST_SHIFT; 1087 * restore it to default value
1133 if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE) 1088 */
1134 txq_ctrl_data |= TXQ_CTRL_ENH_MODE; 1089 if (hw->dmar_block < DEVICE_CTRL_MAXRRS_MIN) {
1135 max_pay_load_data = (atl1c_pay_load_size[hw->dmar_block] & 1090 pcie_set_readrq(adapter->pdev, 128 << DEVICE_CTRL_MAXRRS_MIN);
1136 TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT; 1091 hw->dmar_block = DEVICE_CTRL_MAXRRS_MIN;
1137 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) 1092 }
1138 max_pay_load_data >>= 1; 1093 txq_ctrl_data =
1139 txq_ctrl_data |= max_pay_load_data; 1094 hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 ?
1095 L2CB_TXQ_CFGV : L1C_TXQ_CFGV;
1140 1096
1141 AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data); 1097 AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
1142} 1098}
@@ -1151,34 +1107,13 @@ static void atl1c_configure_rx(struct atl1c_adapter *adapter)
1151 1107
1152 if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM) 1108 if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM)
1153 rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN; 1109 rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN;
1154 if (hw->rss_type == atl1c_rss_ipv4)
1155 rxq_ctrl_data |= RSS_HASH_IPV4;
1156 if (hw->rss_type == atl1c_rss_ipv4_tcp)
1157 rxq_ctrl_data |= RSS_HASH_IPV4_TCP;
1158 if (hw->rss_type == atl1c_rss_ipv6)
1159 rxq_ctrl_data |= RSS_HASH_IPV6;
1160 if (hw->rss_type == atl1c_rss_ipv6_tcp)
1161 rxq_ctrl_data |= RSS_HASH_IPV6_TCP;
1162 if (hw->rss_type != atl1c_rss_disable)
1163 rxq_ctrl_data |= RRS_HASH_CTRL_EN;
1164
1165 rxq_ctrl_data |= (hw->rss_mode & RSS_MODE_MASK) <<
1166 RSS_MODE_SHIFT;
1167 rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) <<
1168 RSS_HASH_BITS_SHIFT;
1169 if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON)
1170 rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_1M &
1171 ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT;
1172
1173 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
1174}
1175 1110
1176static void atl1c_configure_rss(struct atl1c_adapter *adapter) 1111 /* aspm for gigabit */
1177{ 1112 if (hw->nic_type != athr_l1d_2 && (hw->device_id & 1) != 0)
1178 struct atl1c_hw *hw = &adapter->hw; 1113 rxq_ctrl_data = FIELD_SETX(rxq_ctrl_data, ASPM_THRUPUT_LIMIT,
1114 ASPM_THRUPUT_LIMIT_100M);
1179 1115
1180 AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab); 1116 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
1181 AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu);
1182} 1117}
1183 1118
1184static void atl1c_configure_dma(struct atl1c_adapter *adapter) 1119static void atl1c_configure_dma(struct atl1c_adapter *adapter)
@@ -1186,36 +1121,11 @@ static void atl1c_configure_dma(struct atl1c_adapter *adapter)
1186 struct atl1c_hw *hw = &adapter->hw; 1121 struct atl1c_hw *hw = &adapter->hw;
1187 u32 dma_ctrl_data; 1122 u32 dma_ctrl_data;
1188 1123
1189 dma_ctrl_data = DMA_CTRL_DMAR_REQ_PRI; 1124 dma_ctrl_data = FIELDX(DMA_CTRL_RORDER_MODE, DMA_CTRL_RORDER_MODE_OUT) |
1190 if (hw->ctrl_flags & ATL1C_CMB_ENABLE) 1125 DMA_CTRL_RREQ_PRI_DATA |
1191 dma_ctrl_data |= DMA_CTRL_CMB_EN; 1126 FIELDX(DMA_CTRL_RREQ_BLEN, hw->dmar_block) |
1192 if (hw->ctrl_flags & ATL1C_SMB_ENABLE) 1127 FIELDX(DMA_CTRL_WDLY_CNT, DMA_CTRL_WDLY_CNT_DEF) |
1193 dma_ctrl_data |= DMA_CTRL_SMB_EN; 1128 FIELDX(DMA_CTRL_RDLY_CNT, DMA_CTRL_RDLY_CNT_DEF);
1194 else
1195 dma_ctrl_data |= MAC_CTRL_SMB_DIS;
1196
1197 switch (hw->dma_order) {
1198 case atl1c_dma_ord_in:
1199 dma_ctrl_data |= DMA_CTRL_DMAR_IN_ORDER;
1200 break;
1201 case atl1c_dma_ord_enh:
1202 dma_ctrl_data |= DMA_CTRL_DMAR_ENH_ORDER;
1203 break;
1204 case atl1c_dma_ord_out:
1205 dma_ctrl_data |= DMA_CTRL_DMAR_OUT_ORDER;
1206 break;
1207 default:
1208 break;
1209 }
1210
1211 dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
1212 << DMA_CTRL_DMAR_BURST_LEN_SHIFT;
1213 dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
1214 << DMA_CTRL_DMAW_BURST_LEN_SHIFT;
1215 dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK)
1216 << DMA_CTRL_DMAR_DLY_CNT_SHIFT;
1217 dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK)
1218 << DMA_CTRL_DMAW_DLY_CNT_SHIFT;
1219 1129
1220 AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data); 1130 AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
1221} 1131}
@@ -1230,21 +1140,21 @@ static int atl1c_stop_mac(struct atl1c_hw *hw)
1230 u32 data; 1140 u32 data;
1231 1141
1232 AT_READ_REG(hw, REG_RXQ_CTRL, &data); 1142 AT_READ_REG(hw, REG_RXQ_CTRL, &data);
1233 data &= ~(RXQ1_CTRL_EN | RXQ2_CTRL_EN | 1143 data &= ~RXQ_CTRL_EN;
1234 RXQ3_CTRL_EN | RXQ_CTRL_EN);
1235 AT_WRITE_REG(hw, REG_RXQ_CTRL, data); 1144 AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
1236 1145
1237 AT_READ_REG(hw, REG_TXQ_CTRL, &data); 1146 AT_READ_REG(hw, REG_TXQ_CTRL, &data);
1238 data &= ~TXQ_CTRL_EN; 1147 data &= ~TXQ_CTRL_EN;
1239 AT_WRITE_REG(hw, REG_TWSI_CTRL, data); 1148 AT_WRITE_REG(hw, REG_TXQ_CTRL, data);
1240 1149
1241 atl1c_wait_until_idle(hw); 1150 atl1c_wait_until_idle(hw, IDLE_STATUS_RXQ_BUSY | IDLE_STATUS_TXQ_BUSY);
1242 1151
1243 AT_READ_REG(hw, REG_MAC_CTRL, &data); 1152 AT_READ_REG(hw, REG_MAC_CTRL, &data);
1244 data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN); 1153 data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN);
1245 AT_WRITE_REG(hw, REG_MAC_CTRL, data); 1154 AT_WRITE_REG(hw, REG_MAC_CTRL, data);
1246 1155
1247 return (int)atl1c_wait_until_idle(hw); 1156 return (int)atl1c_wait_until_idle(hw,
1157 IDLE_STATUS_TXMAC_BUSY | IDLE_STATUS_RXMAC_BUSY);
1248} 1158}
1249 1159
1250static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw) 1160static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw)
@@ -1252,19 +1162,6 @@ static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw)
1252 u32 data; 1162 u32 data;
1253 1163
1254 AT_READ_REG(hw, REG_RXQ_CTRL, &data); 1164 AT_READ_REG(hw, REG_RXQ_CTRL, &data);
1255 switch (hw->adapter->num_rx_queues) {
1256 case 4:
1257 data |= (RXQ3_CTRL_EN | RXQ2_CTRL_EN | RXQ1_CTRL_EN);
1258 break;
1259 case 3:
1260 data |= (RXQ2_CTRL_EN | RXQ1_CTRL_EN);
1261 break;
1262 case 2:
1263 data |= RXQ1_CTRL_EN;
1264 break;
1265 default:
1266 break;
1267 }
1268 data |= RXQ_CTRL_EN; 1165 data |= RXQ_CTRL_EN;
1269 AT_WRITE_REG(hw, REG_RXQ_CTRL, data); 1166 AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
1270} 1167}
@@ -1300,131 +1197,104 @@ static int atl1c_reset_mac(struct atl1c_hw *hw)
1300 * clearing, and should clear within a microsecond. 1197 * clearing, and should clear within a microsecond.
1301 */ 1198 */
1302 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); 1199 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
1303 master_ctrl_data |= MASTER_CTRL_OOB_DIS_OFF; 1200 master_ctrl_data |= MASTER_CTRL_OOB_DIS;
1304 AT_WRITE_REGW(hw, REG_MASTER_CTRL, ((master_ctrl_data | MASTER_CTRL_SOFT_RST) 1201 AT_WRITE_REG(hw, REG_MASTER_CTRL,
1305 & 0xFFFF)); 1202 master_ctrl_data | MASTER_CTRL_SOFT_RST);
1306 1203
1307 AT_WRITE_FLUSH(hw); 1204 AT_WRITE_FLUSH(hw);
1308 msleep(10); 1205 msleep(10);
1309 /* Wait at least 10ms for All module to be Idle */ 1206 /* Wait at least 10ms for All module to be Idle */
1310 1207
1311 if (atl1c_wait_until_idle(hw)) { 1208 if (atl1c_wait_until_idle(hw, IDLE_STATUS_MASK)) {
1312 dev_err(&pdev->dev, 1209 dev_err(&pdev->dev,
1313 "MAC state machine can't be idle since" 1210 "MAC state machine can't be idle since"
1314 " disabled for 10ms second\n"); 1211 " disabled for 10ms second\n");
1315 return -1; 1212 return -1;
1316 } 1213 }
1214 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
1215
1317 return 0; 1216 return 0;
1318} 1217}
1319 1218
1320static void atl1c_disable_l0s_l1(struct atl1c_hw *hw) 1219static void atl1c_disable_l0s_l1(struct atl1c_hw *hw)
1321{ 1220{
1322 u32 pm_ctrl_data; 1221 u16 ctrl_flags = hw->ctrl_flags;
1323 1222
1324 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); 1223 hw->ctrl_flags &= ~(ATL1C_ASPM_L0S_SUPPORT | ATL1C_ASPM_L1_SUPPORT);
1325 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << 1224 atl1c_set_aspm(hw, SPEED_0);
1326 PM_CTRL_L1_ENTRY_TIMER_SHIFT); 1225 hw->ctrl_flags = ctrl_flags;
1327 pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
1328 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1329 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1330 pm_ctrl_data &= ~PM_CTRL_MAC_ASPM_CHK;
1331 pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
1332
1333 pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
1334 pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
1335 pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
1336 AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
1337} 1226}
1338 1227
1339/* 1228/*
1340 * Set ASPM state. 1229 * Set ASPM state.
1341 * Enable/disable L0s/L1 depend on link state. 1230 * Enable/disable L0s/L1 depend on link state.
1342 */ 1231 */
1343static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup) 1232static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed)
1344{ 1233{
1345 u32 pm_ctrl_data; 1234 u32 pm_ctrl_data;
1346 u32 link_ctrl_data; 1235 u32 link_l1_timer;
1347 u32 link_l1_timer = 0xF;
1348 1236
1349 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); 1237 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
1350 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); 1238 pm_ctrl_data &= ~(PM_CTRL_ASPM_L1_EN |
1239 PM_CTRL_ASPM_L0S_EN |
1240 PM_CTRL_MAC_ASPM_CHK);
1241 /* L1 timer */
1242 if (hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1243 pm_ctrl_data &= ~PMCTRL_TXL1_AFTER_L0S;
1244 link_l1_timer =
1245 link_speed == SPEED_1000 || link_speed == SPEED_100 ?
1246 L1D_PMCTRL_L1_ENTRY_TM_16US : 1;
1247 pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1248 L1D_PMCTRL_L1_ENTRY_TM, link_l1_timer);
1249 } else {
1250 link_l1_timer = hw->nic_type == athr_l2c_b ?
1251 L2CB1_PM_CTRL_L1_ENTRY_TM : L1C_PM_CTRL_L1_ENTRY_TM;
1252 if (link_speed != SPEED_1000 && link_speed != SPEED_100)
1253 link_l1_timer = 1;
1254 pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1255 PM_CTRL_L1_ENTRY_TIMER, link_l1_timer);
1256 }
1351 1257
1352 pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1; 1258 /* L0S/L1 enable */
1353 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << 1259 if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
1354 PM_CTRL_L1_ENTRY_TIMER_SHIFT); 1260 pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN | PM_CTRL_MAC_ASPM_CHK;
1355 pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK << 1261 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
1356 PM_CTRL_LCKDET_TIMER_SHIFT); 1262 pm_ctrl_data |= PM_CTRL_ASPM_L1_EN | PM_CTRL_MAC_ASPM_CHK;
1357 pm_ctrl_data |= AT_LCKDET_TIMER << PM_CTRL_LCKDET_TIMER_SHIFT;
1358 1263
1264 /* l2cb & l1d & l2cb2 & l1d2 */
1359 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d || 1265 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
1360 hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { 1266 hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1361 link_ctrl_data &= ~LINK_CTRL_EXT_SYNC; 1267 pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1362 if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) { 1268 PM_CTRL_PM_REQ_TIMER, PM_CTRL_PM_REQ_TO_DEF);
1363 if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) 1269 pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER |
1364 link_ctrl_data |= LINK_CTRL_EXT_SYNC; 1270 PM_CTRL_SERDES_PD_EX_L1 |
1365 } 1271 PM_CTRL_CLK_SWH_L1;
1366 1272 pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN |
1367 AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data); 1273 PM_CTRL_SERDES_PLL_L1_EN |
1368 1274 PM_CTRL_SERDES_BUFS_RX_L1_EN |
1369 pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER; 1275 PM_CTRL_SA_DLY_EN |
1370 pm_ctrl_data &= ~(PM_CTRL_PM_REQ_TIMER_MASK << 1276 PM_CTRL_HOTRST);
1371 PM_CTRL_PM_REQ_TIMER_SHIFT); 1277 /* disable l0s if link down or l2cb */
1372 pm_ctrl_data |= AT_ASPM_L1_TIMER << 1278 if (link_speed == SPEED_0 || hw->nic_type == athr_l2c_b)
1373 PM_CTRL_PM_REQ_TIMER_SHIFT;
1374 pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN;
1375 pm_ctrl_data &= ~PM_CTRL_HOTRST;
1376 pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT;
1377 pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1;
1378 }
1379 pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
1380 if (linkup) {
1381 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1382 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1383 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
1384 pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
1385 if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
1386 pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
1387
1388 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
1389 hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1390 if (hw->nic_type == athr_l2c_b)
1391 if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE))
1392 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1393 pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
1394 pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
1395 pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
1396 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1397 if (hw->adapter->link_speed == SPEED_100 ||
1398 hw->adapter->link_speed == SPEED_1000) {
1399 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
1400 PM_CTRL_L1_ENTRY_TIMER_SHIFT);
1401 if (hw->nic_type == athr_l2c_b)
1402 link_l1_timer = 7;
1403 else if (hw->nic_type == athr_l2c_b2 ||
1404 hw->nic_type == athr_l1d_2)
1405 link_l1_timer = 4;
1406 pm_ctrl_data |= link_l1_timer <<
1407 PM_CTRL_L1_ENTRY_TIMER_SHIFT;
1408 }
1409 } else {
1410 pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
1411 pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
1412 pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
1413 pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
1414 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; 1279 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1415 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; 1280 } else { /* l1c */
1416 1281 pm_ctrl_data =
1282 FIELD_SETX(pm_ctrl_data, PM_CTRL_L1_ENTRY_TIMER, 0);
1283 if (link_speed != SPEED_0) {
1284 pm_ctrl_data |= PM_CTRL_SERDES_L1_EN |
1285 PM_CTRL_SERDES_PLL_L1_EN |
1286 PM_CTRL_SERDES_BUFS_RX_L1_EN;
1287 pm_ctrl_data &= ~(PM_CTRL_SERDES_PD_EX_L1 |
1288 PM_CTRL_CLK_SWH_L1 |
1289 PM_CTRL_ASPM_L0S_EN |
1290 PM_CTRL_ASPM_L1_EN);
1291 } else { /* link down */
1292 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1293 pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN |
1294 PM_CTRL_SERDES_PLL_L1_EN |
1295 PM_CTRL_SERDES_BUFS_RX_L1_EN |
1296 PM_CTRL_ASPM_L0S_EN);
1417 } 1297 }
1418 } else {
1419 pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
1420 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1421 pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
1422 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1423
1424 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
1425 pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
1426 else
1427 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1428 } 1298 }
1429 AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data); 1299 AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
1430 1300
@@ -1487,6 +1357,10 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
1487 u32 intr_modrt_data; 1357 u32 intr_modrt_data;
1488 u32 data; 1358 u32 data;
1489 1359
1360 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
1361 master_ctrl_data &= ~(MASTER_CTRL_TX_ITIMER_EN |
1362 MASTER_CTRL_RX_ITIMER_EN |
1363 MASTER_CTRL_INT_RDCLR);
1490 /* clear interrupt status */ 1364 /* clear interrupt status */
1491 AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF); 1365 AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
1492 /* Clear any WOL status */ 1366 /* Clear any WOL status */
@@ -1525,25 +1399,15 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
1525 master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN; 1399 master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN;
1526 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); 1400 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
1527 1401
1528 if (hw->ctrl_flags & ATL1C_CMB_ENABLE) { 1402 AT_WRITE_REG(hw, REG_SMB_STAT_TIMER,
1529 AT_WRITE_REG(hw, REG_CMB_TPD_THRESH, 1403 hw->smb_timer & SMB_STAT_TIMER_MASK);
1530 hw->cmb_tpd & CMB_TPD_THRESH_MASK);
1531 AT_WRITE_REG(hw, REG_CMB_TX_TIMER,
1532 hw->cmb_tx_timer & CMB_TX_TIMER_MASK);
1533 }
1534 1404
1535 if (hw->ctrl_flags & ATL1C_SMB_ENABLE)
1536 AT_WRITE_REG(hw, REG_SMB_STAT_TIMER,
1537 hw->smb_timer & SMB_STAT_TIMER_MASK);
1538 /* set MTU */ 1405 /* set MTU */
1539 AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + 1406 AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
1540 VLAN_HLEN + ETH_FCS_LEN); 1407 VLAN_HLEN + ETH_FCS_LEN);
1541 /* HDS, disable */
1542 AT_WRITE_REG(hw, REG_HDS_CTRL, 0);
1543 1408
1544 atl1c_configure_tx(adapter); 1409 atl1c_configure_tx(adapter);
1545 atl1c_configure_rx(adapter); 1410 atl1c_configure_rx(adapter);
1546 atl1c_configure_rss(adapter);
1547 atl1c_configure_dma(adapter); 1411 atl1c_configure_dma(adapter);
1548 1412
1549 return 0; 1413 return 0;
@@ -1635,16 +1499,11 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
1635 struct pci_dev *pdev = adapter->pdev; 1499 struct pci_dev *pdev = adapter->pdev;
1636 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); 1500 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1637 u16 hw_next_to_clean; 1501 u16 hw_next_to_clean;
1638 u16 shift; 1502 u16 reg;
1639 u32 data;
1640 1503
1641 if (type == atl1c_trans_high) 1504 reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX;
1642 shift = MB_HTPD_CONS_IDX_SHIFT;
1643 else
1644 shift = MB_NTPD_CONS_IDX_SHIFT;
1645 1505
1646 AT_READ_REG(&adapter->hw, REG_MB_PRIO_CONS_IDX, &data); 1506 AT_READ_REGW(&adapter->hw, reg, &hw_next_to_clean);
1647 hw_next_to_clean = (data >> shift) & MB_PRIO_PROD_IDX_MASK;
1648 1507
1649 while (next_to_clean != hw_next_to_clean) { 1508 while (next_to_clean != hw_next_to_clean) {
1650 buffer_info = &tpd_ring->buffer_info[next_to_clean]; 1509 buffer_info = &tpd_ring->buffer_info[next_to_clean];
@@ -1746,9 +1605,9 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
1746 skb_checksum_none_assert(skb); 1605 skb_checksum_none_assert(skb);
1747} 1606}
1748 1607
1749static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid) 1608static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
1750{ 1609{
1751 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[ringid]; 1610 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
1752 struct pci_dev *pdev = adapter->pdev; 1611 struct pci_dev *pdev = adapter->pdev;
1753 struct atl1c_buffer *buffer_info, *next_info; 1612 struct atl1c_buffer *buffer_info, *next_info;
1754 struct sk_buff *skb; 1613 struct sk_buff *skb;
@@ -1800,7 +1659,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid
1800 /* TODO: update mailbox here */ 1659 /* TODO: update mailbox here */
1801 wmb(); 1660 wmb();
1802 rfd_ring->next_to_use = rfd_next_to_use; 1661 rfd_ring->next_to_use = rfd_next_to_use;
1803 AT_WRITE_REG(&adapter->hw, atl1c_rfd_prod_idx_regs[ringid], 1662 AT_WRITE_REG(&adapter->hw, REG_MB_RFD0_PROD_IDX,
1804 rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK); 1663 rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK);
1805 } 1664 }
1806 1665
@@ -1839,7 +1698,7 @@ static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring,
1839 rfd_ring->next_to_clean = rfd_index; 1698 rfd_ring->next_to_clean = rfd_index;
1840} 1699}
1841 1700
1842static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que, 1701static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
1843 int *work_done, int work_to_do) 1702 int *work_done, int work_to_do)
1844{ 1703{
1845 u16 rfd_num, rfd_index; 1704 u16 rfd_num, rfd_index;
@@ -1847,8 +1706,8 @@ static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
1847 u16 length; 1706 u16 length;
1848 struct pci_dev *pdev = adapter->pdev; 1707 struct pci_dev *pdev = adapter->pdev;
1849 struct net_device *netdev = adapter->netdev; 1708 struct net_device *netdev = adapter->netdev;
1850 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[que]; 1709 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
1851 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[que]; 1710 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
1852 struct sk_buff *skb; 1711 struct sk_buff *skb;
1853 struct atl1c_recv_ret_status *rrs; 1712 struct atl1c_recv_ret_status *rrs;
1854 struct atl1c_buffer *buffer_info; 1713 struct atl1c_buffer *buffer_info;
@@ -1914,7 +1773,7 @@ rrs_checked:
1914 count++; 1773 count++;
1915 } 1774 }
1916 if (count) 1775 if (count)
1917 atl1c_alloc_rx_buffer(adapter, que); 1776 atl1c_alloc_rx_buffer(adapter);
1918} 1777}
1919 1778
1920/* 1779/*
@@ -1931,7 +1790,7 @@ static int atl1c_clean(struct napi_struct *napi, int budget)
1931 if (!netif_carrier_ok(adapter->netdev)) 1790 if (!netif_carrier_ok(adapter->netdev))
1932 goto quit_polling; 1791 goto quit_polling;
1933 /* just enable one RXQ */ 1792 /* just enable one RXQ */
1934 atl1c_clean_rx_irq(adapter, 0, &work_done, budget); 1793 atl1c_clean_rx_irq(adapter, &work_done, budget);
1935 1794
1936 if (work_done < budget) { 1795 if (work_done < budget) {
1937quit_polling: 1796quit_polling:
@@ -2206,23 +2065,10 @@ static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
2206 struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type) 2065 struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type)
2207{ 2066{
2208 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; 2067 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
2209 u32 prod_data; 2068 u16 reg;
2210 2069
2211 AT_READ_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, &prod_data); 2070 reg = type == atl1c_trans_high ? REG_TPD_PRI1_PIDX : REG_TPD_PRI0_PIDX;
2212 switch (type) { 2071 AT_WRITE_REGW(&adapter->hw, reg, tpd_ring->next_to_use);
2213 case atl1c_trans_high:
2214 prod_data &= 0xFFFF0000;
2215 prod_data |= tpd_ring->next_to_use & 0xFFFF;
2216 break;
2217 case atl1c_trans_normal:
2218 prod_data &= 0x0000FFFF;
2219 prod_data |= (tpd_ring->next_to_use & 0xFFFF) << 16;
2220 break;
2221 default:
2222 break;
2223 }
2224 wmb();
2225 AT_WRITE_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, prod_data);
2226} 2072}
2227 2073
2228static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, 2074static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
@@ -2307,8 +2153,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
2307 "Unable to allocate MSI interrupt Error: %d\n", 2153 "Unable to allocate MSI interrupt Error: %d\n",
2308 err); 2154 err);
2309 adapter->have_msi = false; 2155 adapter->have_msi = false;
2310 } else 2156 }
2311 netdev->irq = pdev->irq;
2312 2157
2313 if (!adapter->have_msi) 2158 if (!adapter->have_msi)
2314 flags |= IRQF_SHARED; 2159 flags |= IRQF_SHARED;
@@ -2333,19 +2178,16 @@ static int atl1c_up(struct atl1c_adapter *adapter)
2333 struct net_device *netdev = adapter->netdev; 2178 struct net_device *netdev = adapter->netdev;
2334 int num; 2179 int num;
2335 int err; 2180 int err;
2336 int i;
2337 2181
2338 netif_carrier_off(netdev); 2182 netif_carrier_off(netdev);
2339 atl1c_init_ring_ptrs(adapter); 2183 atl1c_init_ring_ptrs(adapter);
2340 atl1c_set_multi(netdev); 2184 atl1c_set_multi(netdev);
2341 atl1c_restore_vlan(adapter); 2185 atl1c_restore_vlan(adapter);
2342 2186
2343 for (i = 0; i < adapter->num_rx_queues; i++) { 2187 num = atl1c_alloc_rx_buffer(adapter);
2344 num = atl1c_alloc_rx_buffer(adapter, i); 2188 if (unlikely(num == 0)) {
2345 if (unlikely(num == 0)) { 2189 err = -ENOMEM;
2346 err = -ENOMEM; 2190 goto err_alloc_rx;
2347 goto err_alloc_rx;
2348 }
2349 } 2191 }
2350 2192
2351 if (atl1c_configure(adapter)) { 2193 if (atl1c_configure(adapter)) {
@@ -2383,6 +2225,8 @@ static void atl1c_down(struct atl1c_adapter *adapter)
2383 napi_disable(&adapter->napi); 2225 napi_disable(&adapter->napi);
2384 atl1c_irq_disable(adapter); 2226 atl1c_irq_disable(adapter);
2385 atl1c_free_irq(adapter); 2227 atl1c_free_irq(adapter);
2228 /* disable ASPM if device inactive */
2229 atl1c_disable_l0s_l1(&adapter->hw);
2386 /* reset MAC to disable all RX/TX */ 2230 /* reset MAC to disable all RX/TX */
2387 atl1c_reset_mac(&adapter->hw); 2231 atl1c_reset_mac(&adapter->hw);
2388 msleep(1); 2232 msleep(1);
@@ -2510,9 +2354,14 @@ static int atl1c_suspend(struct device *dev)
2510 mac_ctrl_data |= MAC_CTRL_DUPLX; 2354 mac_ctrl_data |= MAC_CTRL_DUPLX;
2511 2355
2512 /* turn on magic packet wol */ 2356 /* turn on magic packet wol */
2513 if (wufc & AT_WUFC_MAG) 2357 if (wufc & AT_WUFC_MAG) {
2514 wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN; 2358 wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
2515 2359 if (hw->nic_type == athr_l2c_b &&
2360 hw->revision_id == L2CB_V11) {
2361 wol_ctrl_data |=
2362 WOL_PATTERN_EN | WOL_PATTERN_PME_EN;
2363 }
2364 }
2516 if (wufc & AT_WUFC_LNKC) { 2365 if (wufc & AT_WUFC_LNKC) {
2517 wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; 2366 wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
2518 /* only link up can wake up */ 2367 /* only link up can wake up */
@@ -2616,7 +2465,6 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2616 SET_NETDEV_DEV(netdev, &pdev->dev); 2465 SET_NETDEV_DEV(netdev, &pdev->dev);
2617 pci_set_drvdata(pdev, netdev); 2466 pci_set_drvdata(pdev, netdev);
2618 2467
2619 netdev->irq = pdev->irq;
2620 netdev->netdev_ops = &atl1c_netdev_ops; 2468 netdev->netdev_ops = &atl1c_netdev_ops;
2621 netdev->watchdog_timeo = AT_TX_WATCHDOG; 2469 netdev->watchdog_timeo = AT_TX_WATCHDOG;
2622 atl1c_set_ethtool_ops(netdev); 2470 atl1c_set_ethtool_ops(netdev);
@@ -2706,7 +2554,6 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
2706 dev_err(&pdev->dev, "cannot map device registers\n"); 2554 dev_err(&pdev->dev, "cannot map device registers\n");
2707 goto err_ioremap; 2555 goto err_ioremap;
2708 } 2556 }
2709 netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
2710 2557
2711 /* init mii data */ 2558 /* init mii data */
2712 adapter->mii.dev = netdev; 2559 adapter->mii.dev = netdev;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 93ff2b231284..1220e511ced6 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1883,27 +1883,24 @@ static int atl1e_request_irq(struct atl1e_adapter *adapter)
1883 int err = 0; 1883 int err = 0;
1884 1884
1885 adapter->have_msi = true; 1885 adapter->have_msi = true;
1886 err = pci_enable_msi(adapter->pdev); 1886 err = pci_enable_msi(pdev);
1887 if (err) { 1887 if (err) {
1888 netdev_dbg(adapter->netdev, 1888 netdev_dbg(netdev,
1889 "Unable to allocate MSI interrupt Error: %d\n", err); 1889 "Unable to allocate MSI interrupt Error: %d\n", err);
1890 adapter->have_msi = false; 1890 adapter->have_msi = false;
1891 } else 1891 }
1892 netdev->irq = pdev->irq;
1893
1894 1892
1895 if (!adapter->have_msi) 1893 if (!adapter->have_msi)
1896 flags |= IRQF_SHARED; 1894 flags |= IRQF_SHARED;
1897 err = request_irq(adapter->pdev->irq, atl1e_intr, flags, 1895 err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev);
1898 netdev->name, netdev);
1899 if (err) { 1896 if (err) {
1900 netdev_dbg(adapter->netdev, 1897 netdev_dbg(adapter->netdev,
1901 "Unable to allocate interrupt Error: %d\n", err); 1898 "Unable to allocate interrupt Error: %d\n", err);
1902 if (adapter->have_msi) 1899 if (adapter->have_msi)
1903 pci_disable_msi(adapter->pdev); 1900 pci_disable_msi(pdev);
1904 return err; 1901 return err;
1905 } 1902 }
1906 netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n"); 1903 netdev_dbg(netdev, "atl1e_request_irq OK\n");
1907 return err; 1904 return err;
1908} 1905}
1909 1906
@@ -2233,7 +2230,6 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2233 SET_NETDEV_DEV(netdev, &pdev->dev); 2230 SET_NETDEV_DEV(netdev, &pdev->dev);
2234 pci_set_drvdata(pdev, netdev); 2231 pci_set_drvdata(pdev, netdev);
2235 2232
2236 netdev->irq = pdev->irq;
2237 netdev->netdev_ops = &atl1e_netdev_ops; 2233 netdev->netdev_ops = &atl1e_netdev_ops;
2238 2234
2239 netdev->watchdog_timeo = AT_TX_WATCHDOG; 2235 netdev->watchdog_timeo = AT_TX_WATCHDOG;
@@ -2319,7 +2315,6 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
2319 netdev_err(netdev, "cannot map device registers\n"); 2315 netdev_err(netdev, "cannot map device registers\n");
2320 goto err_ioremap; 2316 goto err_ioremap;
2321 } 2317 }
2322 netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
2323 2318
2324 /* init mii data */ 2319 /* init mii data */
2325 adapter->mii.dev = netdev; 2320 adapter->mii.dev = netdev;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 40ac41436549..5d10884e5080 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -266,7 +266,7 @@ static s32 atl1_reset_hw(struct atl1_hw *hw)
266 * interrupts & Clear any pending interrupt events 266 * interrupts & Clear any pending interrupt events
267 */ 267 */
268 /* 268 /*
269 * iowrite32(0, hw->hw_addr + REG_IMR); 269 * atlx_irq_disable(adapter);
270 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR); 270 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
271 */ 271 */
272 272
@@ -1917,7 +1917,7 @@ next:
1917 return num_alloc; 1917 return num_alloc;
1918} 1918}
1919 1919
1920static void atl1_intr_rx(struct atl1_adapter *adapter) 1920static int atl1_intr_rx(struct atl1_adapter *adapter, int budget)
1921{ 1921{
1922 int i, count; 1922 int i, count;
1923 u16 length; 1923 u16 length;
@@ -1933,7 +1933,7 @@ static void atl1_intr_rx(struct atl1_adapter *adapter)
1933 1933
1934 rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); 1934 rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
1935 1935
1936 while (1) { 1936 while (count < budget) {
1937 rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); 1937 rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
1938 i = 1; 1938 i = 1;
1939 if (likely(rrd->xsz.valid)) { /* packet valid */ 1939 if (likely(rrd->xsz.valid)) { /* packet valid */
@@ -2032,7 +2032,7 @@ rrd_ok:
2032 2032
2033 __vlan_hwaccel_put_tag(skb, vlan_tag); 2033 __vlan_hwaccel_put_tag(skb, vlan_tag);
2034 } 2034 }
2035 netif_rx(skb); 2035 netif_receive_skb(skb);
2036 2036
2037 /* let protocol layer free skb */ 2037 /* let protocol layer free skb */
2038 buffer_info->skb = NULL; 2038 buffer_info->skb = NULL;
@@ -2065,14 +2065,17 @@ rrd_ok:
2065 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); 2065 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
2066 spin_unlock(&adapter->mb_lock); 2066 spin_unlock(&adapter->mb_lock);
2067 } 2067 }
2068
2069 return count;
2068} 2070}
2069 2071
2070static void atl1_intr_tx(struct atl1_adapter *adapter) 2072static int atl1_intr_tx(struct atl1_adapter *adapter)
2071{ 2073{
2072 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 2074 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
2073 struct atl1_buffer *buffer_info; 2075 struct atl1_buffer *buffer_info;
2074 u16 sw_tpd_next_to_clean; 2076 u16 sw_tpd_next_to_clean;
2075 u16 cmb_tpd_next_to_clean; 2077 u16 cmb_tpd_next_to_clean;
2078 int count = 0;
2076 2079
2077 sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); 2080 sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
2078 cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); 2081 cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
@@ -2092,12 +2095,16 @@ static void atl1_intr_tx(struct atl1_adapter *adapter)
2092 2095
2093 if (++sw_tpd_next_to_clean == tpd_ring->count) 2096 if (++sw_tpd_next_to_clean == tpd_ring->count)
2094 sw_tpd_next_to_clean = 0; 2097 sw_tpd_next_to_clean = 0;
2098
2099 count++;
2095 } 2100 }
2096 atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); 2101 atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
2097 2102
2098 if (netif_queue_stopped(adapter->netdev) && 2103 if (netif_queue_stopped(adapter->netdev) &&
2099 netif_carrier_ok(adapter->netdev)) 2104 netif_carrier_ok(adapter->netdev))
2100 netif_wake_queue(adapter->netdev); 2105 netif_wake_queue(adapter->netdev);
2106
2107 return count;
2101} 2108}
2102 2109
2103static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) 2110static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
@@ -2439,6 +2446,49 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
2439 return NETDEV_TX_OK; 2446 return NETDEV_TX_OK;
2440} 2447}
2441 2448
2449static int atl1_rings_clean(struct napi_struct *napi, int budget)
2450{
2451 struct atl1_adapter *adapter = container_of(napi, struct atl1_adapter, napi);
2452 int work_done = atl1_intr_rx(adapter, budget);
2453
2454 if (atl1_intr_tx(adapter))
2455 work_done = budget;
2456
2457 /* Let's come again to process some more packets */
2458 if (work_done >= budget)
2459 return work_done;
2460
2461 napi_complete(napi);
2462 /* re-enable Interrupt */
2463 if (likely(adapter->int_enabled))
2464 atlx_imr_set(adapter, IMR_NORMAL_MASK);
2465 return work_done;
2466}
2467
2468static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
2469{
2470 if (!napi_schedule_prep(&adapter->napi))
2471 /* It is possible in case even the RX/TX ints are disabled via IMR
2472 * register the ISR bits are set anyway (but do not produce IRQ).
2473 * To handle such situation the napi functions used to check is
2474 * something scheduled or not.
2475 */
2476 return 0;
2477
2478 __napi_schedule(&adapter->napi);
2479
2480 /*
2481 * Disable RX/TX ints via IMR register if it is
2482 * allowed. NAPI handler must reenable them in same
2483 * way.
2484 */
2485 if (!adapter->int_enabled)
2486 return 1;
2487
2488 atlx_imr_set(adapter, IMR_NORXTX_MASK);
2489 return 1;
2490}
2491
2442/* 2492/*
2443 * atl1_intr - Interrupt Handler 2493 * atl1_intr - Interrupt Handler
2444 * @irq: interrupt number 2494 * @irq: interrupt number
@@ -2449,78 +2499,74 @@ static irqreturn_t atl1_intr(int irq, void *data)
2449{ 2499{
2450 struct atl1_adapter *adapter = netdev_priv(data); 2500 struct atl1_adapter *adapter = netdev_priv(data);
2451 u32 status; 2501 u32 status;
2452 int max_ints = 10;
2453 2502
2454 status = adapter->cmb.cmb->int_stats; 2503 status = adapter->cmb.cmb->int_stats;
2455 if (!status) 2504 if (!status)
2456 return IRQ_NONE; 2505 return IRQ_NONE;
2457 2506
2458 do { 2507 /* clear CMB interrupt status at once,
2459 /* clear CMB interrupt status at once */ 2508 * but leave rx/tx interrupt status in case it should be dropped
2460 adapter->cmb.cmb->int_stats = 0; 2509 * only if rx/tx processing queued. In other case interrupt
2461 2510 * can be lost.
2462 if (status & ISR_GPHY) /* clear phy status */ 2511 */
2463 atlx_clear_phy_int(adapter); 2512 adapter->cmb.cmb->int_stats = status & (ISR_CMB_TX | ISR_CMB_RX);
2464 2513
2465 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ 2514 if (status & ISR_GPHY) /* clear phy status */
2466 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); 2515 atlx_clear_phy_int(adapter);
2467 2516
2468 /* check if SMB intr */ 2517 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
2469 if (status & ISR_SMB) 2518 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
2470 atl1_inc_smb(adapter);
2471 2519
2472 /* check if PCIE PHY Link down */ 2520 /* check if SMB intr */
2473 if (status & ISR_PHY_LINKDOWN) { 2521 if (status & ISR_SMB)
2474 if (netif_msg_intr(adapter)) 2522 atl1_inc_smb(adapter);
2475 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
2476 "pcie phy link down %x\n", status);
2477 if (netif_running(adapter->netdev)) { /* reset MAC */
2478 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
2479 schedule_work(&adapter->pcie_dma_to_rst_task);
2480 return IRQ_HANDLED;
2481 }
2482 }
2483 2523
2484 /* check if DMA read/write error ? */ 2524 /* check if PCIE PHY Link down */
2485 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { 2525 if (status & ISR_PHY_LINKDOWN) {
2486 if (netif_msg_intr(adapter)) 2526 if (netif_msg_intr(adapter))
2487 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 2527 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
2488 "pcie DMA r/w error (status = 0x%x)\n", 2528 "pcie phy link down %x\n", status);
2489 status); 2529 if (netif_running(adapter->netdev)) { /* reset MAC */
2490 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 2530 atlx_irq_disable(adapter);
2491 schedule_work(&adapter->pcie_dma_to_rst_task); 2531 schedule_work(&adapter->reset_dev_task);
2492 return IRQ_HANDLED; 2532 return IRQ_HANDLED;
2493 } 2533 }
2534 }
2494 2535
2495 /* link event */ 2536 /* check if DMA read/write error ? */
2496 if (status & ISR_GPHY) { 2537 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
2497 adapter->soft_stats.tx_carrier_errors++; 2538 if (netif_msg_intr(adapter))
2498 atl1_check_for_link(adapter); 2539 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
2499 } 2540 "pcie DMA r/w error (status = 0x%x)\n",
2541 status);
2542 atlx_irq_disable(adapter);
2543 schedule_work(&adapter->reset_dev_task);
2544 return IRQ_HANDLED;
2545 }
2500 2546
2501 /* transmit event */ 2547 /* link event */
2502 if (status & ISR_CMB_TX) 2548 if (status & ISR_GPHY) {
2503 atl1_intr_tx(adapter); 2549 adapter->soft_stats.tx_carrier_errors++;
2504 2550 atl1_check_for_link(adapter);
2505 /* rx exception */ 2551 }
2506 if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
2507 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
2508 ISR_HOST_RRD_OV | ISR_CMB_RX))) {
2509 if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
2510 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
2511 ISR_HOST_RRD_OV))
2512 if (netif_msg_intr(adapter))
2513 dev_printk(KERN_DEBUG,
2514 &adapter->pdev->dev,
2515 "rx exception, ISR = 0x%x\n",
2516 status);
2517 atl1_intr_rx(adapter);
2518 }
2519 2552
2520 if (--max_ints < 0) 2553 /* transmit or receive event */
2521 break; 2554 if (status & (ISR_CMB_TX | ISR_CMB_RX) &&
2555 atl1_sched_rings_clean(adapter))
2556 adapter->cmb.cmb->int_stats = adapter->cmb.cmb->int_stats &
2557 ~(ISR_CMB_TX | ISR_CMB_RX);
2522 2558
2523 } while ((status = adapter->cmb.cmb->int_stats)); 2559 /* rx exception */
2560 if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
2561 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
2562 ISR_HOST_RRD_OV))) {
2563 if (netif_msg_intr(adapter))
2564 dev_printk(KERN_DEBUG,
2565 &adapter->pdev->dev,
2566 "rx exception, ISR = 0x%x\n",
2567 status);
2568 atl1_sched_rings_clean(adapter);
2569 }
2524 2570
2525 /* re-enable Interrupt */ 2571 /* re-enable Interrupt */
2526 iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); 2572 iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
@@ -2599,6 +2645,7 @@ static s32 atl1_up(struct atl1_adapter *adapter)
2599 if (unlikely(err)) 2645 if (unlikely(err))
2600 goto err_up; 2646 goto err_up;
2601 2647
2648 napi_enable(&adapter->napi);
2602 atlx_irq_enable(adapter); 2649 atlx_irq_enable(adapter);
2603 atl1_check_link(adapter); 2650 atl1_check_link(adapter);
2604 netif_start_queue(netdev); 2651 netif_start_queue(netdev);
@@ -2615,6 +2662,7 @@ static void atl1_down(struct atl1_adapter *adapter)
2615{ 2662{
2616 struct net_device *netdev = adapter->netdev; 2663 struct net_device *netdev = adapter->netdev;
2617 2664
2665 napi_disable(&adapter->napi);
2618 netif_stop_queue(netdev); 2666 netif_stop_queue(netdev);
2619 del_timer_sync(&adapter->phy_config_timer); 2667 del_timer_sync(&adapter->phy_config_timer);
2620 adapter->phy_timer_pending = false; 2668 adapter->phy_timer_pending = false;
@@ -2633,10 +2681,10 @@ static void atl1_down(struct atl1_adapter *adapter)
2633 atl1_clean_rx_ring(adapter); 2681 atl1_clean_rx_ring(adapter);
2634} 2682}
2635 2683
2636static void atl1_tx_timeout_task(struct work_struct *work) 2684static void atl1_reset_dev_task(struct work_struct *work)
2637{ 2685{
2638 struct atl1_adapter *adapter = 2686 struct atl1_adapter *adapter =
2639 container_of(work, struct atl1_adapter, tx_timeout_task); 2687 container_of(work, struct atl1_adapter, reset_dev_task);
2640 struct net_device *netdev = adapter->netdev; 2688 struct net_device *netdev = adapter->netdev;
2641 2689
2642 netif_device_detach(netdev); 2690 netif_device_detach(netdev);
@@ -2971,6 +3019,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2971 3019
2972 netdev->netdev_ops = &atl1_netdev_ops; 3020 netdev->netdev_ops = &atl1_netdev_ops;
2973 netdev->watchdog_timeo = 5 * HZ; 3021 netdev->watchdog_timeo = 5 * HZ;
3022 netif_napi_add(netdev, &adapter->napi, atl1_rings_clean, 64);
2974 3023
2975 netdev->ethtool_ops = &atl1_ethtool_ops; 3024 netdev->ethtool_ops = &atl1_ethtool_ops;
2976 adapter->bd_number = cards_found; 3025 adapter->bd_number = cards_found;
@@ -3038,12 +3087,10 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
3038 (unsigned long)adapter); 3087 (unsigned long)adapter);
3039 adapter->phy_timer_pending = false; 3088 adapter->phy_timer_pending = false;
3040 3089
3041 INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); 3090 INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
3042 3091
3043 INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); 3092 INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
3044 3093
3045 INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
3046
3047 err = register_netdev(netdev); 3094 err = register_netdev(netdev);
3048 if (err) 3095 if (err)
3049 goto err_common; 3096 goto err_common;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
index 109d6da8be97..3bf79a56220d 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.h
+++ b/drivers/net/ethernet/atheros/atlx/atl1.h
@@ -275,13 +275,17 @@ static u32 atl1_check_link(struct atl1_adapter *adapter);
275#define ISR_DIS_SMB 0x20000000 275#define ISR_DIS_SMB 0x20000000
276#define ISR_DIS_DMA 0x40000000 276#define ISR_DIS_DMA 0x40000000
277 277
278/* Normal Interrupt mask */ 278/* Normal Interrupt mask without RX/TX enabled */
279#define IMR_NORMAL_MASK (\ 279#define IMR_NORXTX_MASK (\
280 ISR_SMB |\ 280 ISR_SMB |\
281 ISR_GPHY |\ 281 ISR_GPHY |\
282 ISR_PHY_LINKDOWN|\ 282 ISR_PHY_LINKDOWN|\
283 ISR_DMAR_TO_RST |\ 283 ISR_DMAR_TO_RST |\
284 ISR_DMAW_TO_RST |\ 284 ISR_DMAW_TO_RST)
285
286/* Normal Interrupt mask */
287#define IMR_NORMAL_MASK (\
288 IMR_NORXTX_MASK |\
285 ISR_CMB_TX |\ 289 ISR_CMB_TX |\
286 ISR_CMB_RX) 290 ISR_CMB_RX)
287 291
@@ -758,9 +762,9 @@ struct atl1_adapter {
758 u16 link_speed; 762 u16 link_speed;
759 u16 link_duplex; 763 u16 link_duplex;
760 spinlock_t lock; 764 spinlock_t lock;
761 struct work_struct tx_timeout_task; 765 struct napi_struct napi;
766 struct work_struct reset_dev_task;
762 struct work_struct link_chg_task; 767 struct work_struct link_chg_task;
763 struct work_struct pcie_dma_to_rst_task;
764 768
765 struct timer_list phy_config_timer; 769 struct timer_list phy_config_timer;
766 bool phy_timer_pending; 770 bool phy_timer_pending;
@@ -782,6 +786,12 @@ struct atl1_adapter {
782 u16 ict; /* interrupt clear timer (2us resolution */ 786 u16 ict; /* interrupt clear timer (2us resolution */
783 struct mii_if_info mii; /* MII interface info */ 787 struct mii_if_info mii; /* MII interface info */
784 788
789 /*
790 * Use this value to check is napi handler allowed to
791 * enable ints or not
792 */
793 bool int_enabled;
794
785 u32 bd_number; /* board number */ 795 u32 bd_number; /* board number */
786 bool pci_using_64; 796 bool pci_using_64;
787 struct atl1_hw hw; 797 struct atl1_hw hw;
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index 3cd8837236dc..b4f3aa49a7fc 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -155,14 +155,21 @@ static void atlx_set_multi(struct net_device *netdev)
155 } 155 }
156} 156}
157 157
158static inline void atlx_imr_set(struct atlx_adapter *adapter,
159 unsigned int imr)
160{
161 iowrite32(imr, adapter->hw.hw_addr + REG_IMR);
162 ioread32(adapter->hw.hw_addr + REG_IMR);
163}
164
158/* 165/*
159 * atlx_irq_enable - Enable default interrupt generation settings 166 * atlx_irq_enable - Enable default interrupt generation settings
160 * @adapter: board private structure 167 * @adapter: board private structure
161 */ 168 */
162static void atlx_irq_enable(struct atlx_adapter *adapter) 169static void atlx_irq_enable(struct atlx_adapter *adapter)
163{ 170{
164 iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); 171 atlx_imr_set(adapter, IMR_NORMAL_MASK);
165 ioread32(adapter->hw.hw_addr + REG_IMR); 172 adapter->int_enabled = true;
166} 173}
167 174
168/* 175/*
@@ -171,8 +178,8 @@ static void atlx_irq_enable(struct atlx_adapter *adapter)
171 */ 178 */
172static void atlx_irq_disable(struct atlx_adapter *adapter) 179static void atlx_irq_disable(struct atlx_adapter *adapter)
173{ 180{
174 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 181 adapter->int_enabled = false;
175 ioread32(adapter->hw.hw_addr + REG_IMR); 182 atlx_imr_set(adapter, 0);
176 synchronize_irq(adapter->pdev->irq); 183 synchronize_irq(adapter->pdev->irq);
177} 184}
178 185
@@ -194,7 +201,7 @@ static void atlx_tx_timeout(struct net_device *netdev)
194{ 201{
195 struct atlx_adapter *adapter = netdev_priv(netdev); 202 struct atlx_adapter *adapter = netdev_priv(netdev);
196 /* Do the reset outside of interrupt context */ 203 /* Do the reset outside of interrupt context */
197 schedule_work(&adapter->tx_timeout_task); 204 schedule_work(&adapter->reset_dev_task);
198} 205}
199 206
200/* 207/*
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 8297e2868736..ab55979b3756 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -7343,8 +7343,7 @@ static struct {
7343 { "rx_fw_discards" }, 7343 { "rx_fw_discards" },
7344}; 7344};
7345 7345
7346#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\ 7346#define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7347 sizeof(bnx2_stats_str_arr[0]))
7348 7347
7349#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) 7348#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7350 7349
@@ -7976,7 +7975,6 @@ static int __devinit
7976bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) 7975bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7977{ 7976{
7978 struct bnx2 *bp; 7977 struct bnx2 *bp;
7979 unsigned long mem_len;
7980 int rc, i, j; 7978 int rc, i, j;
7981 u32 reg; 7979 u32 reg;
7982 u64 dma_mask, persist_dma_mask; 7980 u64 dma_mask, persist_dma_mask;
@@ -8036,13 +8034,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8036#endif 8034#endif
8037 INIT_WORK(&bp->reset_task, bnx2_reset_task); 8035 INIT_WORK(&bp->reset_task, bnx2_reset_task);
8038 8036
8039 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 8037 bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8040 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1); 8038 TX_MAX_TSS_RINGS + 1));
8041 dev->mem_end = dev->mem_start + mem_len;
8042 dev->irq = pdev->irq;
8043
8044 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
8045
8046 if (!bp->regview) { 8039 if (!bp->regview) {
8047 dev_err(&pdev->dev, "Cannot map register space, aborting\n"); 8040 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8048 rc = -ENOMEM; 8041 rc = -ENOMEM;
@@ -8346,10 +8339,8 @@ err_out_unmap:
8346 bp->flags &= ~BNX2_FLAG_AER_ENABLED; 8339 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8347 } 8340 }
8348 8341
8349 if (bp->regview) { 8342 pci_iounmap(pdev, bp->regview);
8350 iounmap(bp->regview); 8343 bp->regview = NULL;
8351 bp->regview = NULL;
8352 }
8353 8344
8354err_out_release: 8345err_out_release:
8355 pci_release_regions(pdev); 8346 pci_release_regions(pdev);
@@ -8432,7 +8423,7 @@ static int __devinit
8432bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 8423bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8433{ 8424{
8434 static int version_printed = 0; 8425 static int version_printed = 0;
8435 struct net_device *dev = NULL; 8426 struct net_device *dev;
8436 struct bnx2 *bp; 8427 struct bnx2 *bp;
8437 int rc; 8428 int rc;
8438 char str[40]; 8429 char str[40];
@@ -8442,15 +8433,12 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8442 8433
8443 /* dev zeroed in init_etherdev */ 8434 /* dev zeroed in init_etherdev */
8444 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS); 8435 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8445
8446 if (!dev) 8436 if (!dev)
8447 return -ENOMEM; 8437 return -ENOMEM;
8448 8438
8449 rc = bnx2_init_board(pdev, dev); 8439 rc = bnx2_init_board(pdev, dev);
8450 if (rc < 0) { 8440 if (rc < 0)
8451 free_netdev(dev); 8441 goto err_free;
8452 return rc;
8453 }
8454 8442
8455 dev->netdev_ops = &bnx2_netdev_ops; 8443 dev->netdev_ops = &bnx2_netdev_ops;
8456 dev->watchdog_timeo = TX_TIMEOUT; 8444 dev->watchdog_timeo = TX_TIMEOUT;
@@ -8480,22 +8468,21 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8480 goto error; 8468 goto error;
8481 } 8469 }
8482 8470
8483 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n", 8471 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8484 board_info[ent->driver_data].name, 8472 "node addr %pM\n", board_info[ent->driver_data].name,
8485 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', 8473 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8486 ((CHIP_ID(bp) & 0x0ff0) >> 4), 8474 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8487 bnx2_bus_string(bp, str), 8475 bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8488 dev->base_addr, 8476 pdev->irq, dev->dev_addr);
8489 bp->pdev->irq, dev->dev_addr);
8490 8477
8491 return 0; 8478 return 0;
8492 8479
8493error: 8480error:
8494 if (bp->regview) 8481 iounmap(bp->regview);
8495 iounmap(bp->regview);
8496 pci_release_regions(pdev); 8482 pci_release_regions(pdev);
8497 pci_disable_device(pdev); 8483 pci_disable_device(pdev);
8498 pci_set_drvdata(pdev, NULL); 8484 pci_set_drvdata(pdev, NULL);
8485err_free:
8499 free_netdev(dev); 8486 free_netdev(dev);
8500 return rc; 8487 return rc;
8501} 8488}
@@ -8511,8 +8498,7 @@ bnx2_remove_one(struct pci_dev *pdev)
8511 del_timer_sync(&bp->timer); 8498 del_timer_sync(&bp->timer);
8512 cancel_work_sync(&bp->reset_task); 8499 cancel_work_sync(&bp->reset_task);
8513 8500
8514 if (bp->regview) 8501 pci_iounmap(bp->pdev, bp->regview);
8515 iounmap(bp->regview);
8516 8502
8517 kfree(bp->temp_stats_blk); 8503 kfree(bp->temp_stats_blk);
8518 8504
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 2c9ee552dffc..e30e2a2f354c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,13 +23,17 @@
23 * (you will need to reboot afterwards) */ 23 * (you will need to reboot afterwards) */
24/* #define BNX2X_STOP_ON_ERROR */ 24/* #define BNX2X_STOP_ON_ERROR */
25 25
26#define DRV_MODULE_VERSION "1.72.10-0" 26#define DRV_MODULE_VERSION "1.72.50-0"
27#define DRV_MODULE_RELDATE "2012/02/20" 27#define DRV_MODULE_RELDATE "2012/04/23"
28#define BNX2X_BC_VER 0x040200 28#define BNX2X_BC_VER 0x040200
29 29
30#if defined(CONFIG_DCB) 30#if defined(CONFIG_DCB)
31#define BCM_DCBNL 31#define BCM_DCBNL
32#endif 32#endif
33
34
35#include "bnx2x_hsi.h"
36
33#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 37#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
34#define BCM_CNIC 1 38#define BCM_CNIC 1
35#include "../cnic_if.h" 39#include "../cnic_if.h"
@@ -345,7 +349,6 @@ union db_prod {
345#define SGE_PAGE_SIZE PAGE_SIZE 349#define SGE_PAGE_SIZE PAGE_SIZE
346#define SGE_PAGE_SHIFT PAGE_SHIFT 350#define SGE_PAGE_SHIFT PAGE_SHIFT
347#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) 351#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
348#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE)
349 352
350/* SGE ring related macros */ 353/* SGE ring related macros */
351#define NUM_RX_SGE_PAGES 2 354#define NUM_RX_SGE_PAGES 2
@@ -815,6 +818,8 @@ struct bnx2x_common {
815#define CHIP_NUM_57800_MF 0x16a5 818#define CHIP_NUM_57800_MF 0x16a5
816#define CHIP_NUM_57810 0x168e 819#define CHIP_NUM_57810 0x168e
817#define CHIP_NUM_57810_MF 0x16ae 820#define CHIP_NUM_57810_MF 0x16ae
821#define CHIP_NUM_57811 0x163d
822#define CHIP_NUM_57811_MF 0x163e
818#define CHIP_NUM_57840 0x168d 823#define CHIP_NUM_57840 0x168d
819#define CHIP_NUM_57840_MF 0x16ab 824#define CHIP_NUM_57840_MF 0x16ab
820#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) 825#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
@@ -826,6 +831,8 @@ struct bnx2x_common {
826#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF) 831#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF)
827#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810) 832#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810)
828#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF) 833#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
834#define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811)
835#define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF)
829#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840) 836#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840)
830#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF) 837#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF)
831#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ 838#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
@@ -836,6 +843,8 @@ struct bnx2x_common {
836 CHIP_IS_57800_MF(bp) || \ 843 CHIP_IS_57800_MF(bp) || \
837 CHIP_IS_57810(bp) || \ 844 CHIP_IS_57810(bp) || \
838 CHIP_IS_57810_MF(bp) || \ 845 CHIP_IS_57810_MF(bp) || \
846 CHIP_IS_57811(bp) || \
847 CHIP_IS_57811_MF(bp) || \
839 CHIP_IS_57840(bp) || \ 848 CHIP_IS_57840(bp) || \
840 CHIP_IS_57840_MF(bp)) 849 CHIP_IS_57840_MF(bp))
841#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) 850#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
@@ -1053,6 +1062,13 @@ struct bnx2x_slowpath {
1053 struct flow_control_configuration pfc_config; 1062 struct flow_control_configuration pfc_config;
1054 } func_rdata; 1063 } func_rdata;
1055 1064
1065 /* afex ramrod can not be a part of func_rdata union because these
1066 * events might arrive in parallel to other events from func_rdata.
1067 * Therefore, if they would have been defined in the same union,
1068 * data can get corrupted.
1069 */
1070 struct afex_vif_list_ramrod_data func_afex_rdata;
1071
1056 /* used by dmae command executer */ 1072 /* used by dmae command executer */
1057 struct dmae_command dmae[MAX_DMAE_C]; 1073 struct dmae_command dmae[MAX_DMAE_C];
1058 1074
@@ -1169,6 +1185,7 @@ struct bnx2x_fw_stats_data {
1169enum { 1185enum {
1170 BNX2X_SP_RTNL_SETUP_TC, 1186 BNX2X_SP_RTNL_SETUP_TC,
1171 BNX2X_SP_RTNL_TX_TIMEOUT, 1187 BNX2X_SP_RTNL_TX_TIMEOUT,
1188 BNX2X_SP_RTNL_AFEX_F_UPDATE,
1172 BNX2X_SP_RTNL_FAN_FAILURE, 1189 BNX2X_SP_RTNL_FAN_FAILURE,
1173}; 1190};
1174 1191
@@ -1222,7 +1239,6 @@ struct bnx2x {
1222#define ETH_MAX_JUMBO_PACKET_SIZE 9600 1239#define ETH_MAX_JUMBO_PACKET_SIZE 9600
1223/* TCP with Timestamp Option (32) + IPv6 (40) */ 1240/* TCP with Timestamp Option (32) + IPv6 (40) */
1224#define ETH_MAX_TPA_HEADER_SIZE 72 1241#define ETH_MAX_TPA_HEADER_SIZE 72
1225#define ETH_MIN_TPA_HEADER_SIZE 40
1226 1242
1227 /* Max supported alignment is 256 (8 shift) */ 1243 /* Max supported alignment is 256 (8 shift) */
1228#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT) 1244#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT)
@@ -1300,6 +1316,7 @@ struct bnx2x {
1300#define NO_ISCSI_FLAG (1 << 14) 1316#define NO_ISCSI_FLAG (1 << 14)
1301#define NO_FCOE_FLAG (1 << 15) 1317#define NO_FCOE_FLAG (1 << 15)
1302#define BC_SUPPORTS_PFC_STATS (1 << 17) 1318#define BC_SUPPORTS_PFC_STATS (1 << 17)
1319#define USING_SINGLE_MSIX_FLAG (1 << 20)
1303 1320
1304#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) 1321#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
1305#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) 1322#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1329,21 +1346,20 @@ struct bnx2x {
1329 struct bnx2x_common common; 1346 struct bnx2x_common common;
1330 struct bnx2x_port port; 1347 struct bnx2x_port port;
1331 1348
1332 struct cmng_struct_per_port cmng; 1349 struct cmng_init cmng;
1333 u32 vn_weight_sum; 1350
1334 u32 mf_config[E1HVN_MAX]; 1351 u32 mf_config[E1HVN_MAX];
1335 u32 mf2_config[E2_FUNC_MAX]; 1352 u32 mf_ext_config;
1336 u32 path_has_ovlan; /* E3 */ 1353 u32 path_has_ovlan; /* E3 */
1337 u16 mf_ov; 1354 u16 mf_ov;
1338 u8 mf_mode; 1355 u8 mf_mode;
1339#define IS_MF(bp) (bp->mf_mode != 0) 1356#define IS_MF(bp) (bp->mf_mode != 0)
1340#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI) 1357#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
1341#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD) 1358#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
1359#define IS_MF_AFEX(bp) (bp->mf_mode == MULTI_FUNCTION_AFEX)
1342 1360
1343 u8 wol; 1361 u8 wol;
1344 1362
1345 bool gro_check;
1346
1347 int rx_ring_size; 1363 int rx_ring_size;
1348 1364
1349 u16 tx_quick_cons_trip_int; 1365 u16 tx_quick_cons_trip_int;
@@ -1371,7 +1387,6 @@ struct bnx2x {
1371#define BNX2X_STATE_DIAG 0xe000 1387#define BNX2X_STATE_DIAG 0xe000
1372#define BNX2X_STATE_ERROR 0xf000 1388#define BNX2X_STATE_ERROR 0xf000
1373 1389
1374 int multi_mode;
1375#define BNX2X_MAX_PRIORITY 8 1390#define BNX2X_MAX_PRIORITY 8
1376#define BNX2X_MAX_ENTRIES_PER_PRI 16 1391#define BNX2X_MAX_ENTRIES_PER_PRI 16
1377#define BNX2X_MAX_COS 3 1392#define BNX2X_MAX_COS 3
@@ -1582,6 +1597,9 @@ struct bnx2x {
1582 struct dcbx_features dcbx_remote_feat; 1597 struct dcbx_features dcbx_remote_feat;
1583 u32 dcbx_remote_flags; 1598 u32 dcbx_remote_flags;
1584#endif 1599#endif
1600 /* AFEX: store default vlan used */
1601 int afex_def_vlan_tag;
1602 enum mf_cfg_afex_vlan_mode afex_vlan_mode;
1585 u32 pending_max; 1603 u32 pending_max;
1586 1604
1587 /* multiple tx classes of service */ 1605 /* multiple tx classes of service */
@@ -2138,9 +2156,16 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
2138#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) 2156#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp))
2139#define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) 2157#define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))
2140 2158
2159#define BNX2X_MF_EXT_PROTOCOL_FCOE(bp) ((bp)->mf_ext_config & \
2160 MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
2161
2162#define IS_MF_FCOE_AFEX(bp) (IS_MF_AFEX(bp) && BNX2X_MF_EXT_PROTOCOL_FCOE(bp))
2141#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \ 2163#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \
2142 (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \ 2164 (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
2143 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) 2165 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
2166#else
2167#define IS_MF_FCOE_AFEX(bp) false
2144#endif 2168#endif
2145 2169
2170
2146#endif /* bnx2x.h */ 2171#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4b054812713a..60d5b548f697 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -23,7 +23,6 @@
23#include <linux/ip.h> 23#include <linux/ip.h>
24#include <net/ipv6.h> 24#include <net/ipv6.h>
25#include <net/ip6_checksum.h> 25#include <net/ip6_checksum.h>
26#include <linux/firmware.h>
27#include <linux/prefetch.h> 26#include <linux/prefetch.h>
28#include "bnx2x_cmn.h" 27#include "bnx2x_cmn.h"
29#include "bnx2x_init.h" 28#include "bnx2x_init.h"
@@ -329,16 +328,6 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
329 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len); 328 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
330 tpa_info->full_page = 329 tpa_info->full_page =
331 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size; 330 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
332 /*
333 * FW 7.2.16 BUG workaround:
334 * if SGE size is (exactly) multiple gro_size
335 * fw will place one less frag on SGE.
336 * the calculation is done only for potentially
337 * dangerous MTUs.
338 */
339 if (unlikely(bp->gro_check))
340 if (!(SGE_PAGE_SIZE * PAGES_PER_SGE % gro_size))
341 tpa_info->full_page -= gro_size;
342 tpa_info->gro_size = gro_size; 331 tpa_info->gro_size = gro_size;
343 } 332 }
344 333
@@ -1212,16 +1201,15 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1212 1201
1213void bnx2x_free_irq(struct bnx2x *bp) 1202void bnx2x_free_irq(struct bnx2x *bp)
1214{ 1203{
1215 if (bp->flags & USING_MSIX_FLAG) 1204 if (bp->flags & USING_MSIX_FLAG &&
1205 !(bp->flags & USING_SINGLE_MSIX_FLAG))
1216 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + 1206 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1217 CNIC_PRESENT + 1); 1207 CNIC_PRESENT + 1);
1218 else if (bp->flags & USING_MSI_FLAG)
1219 free_irq(bp->pdev->irq, bp->dev);
1220 else 1208 else
1221 free_irq(bp->pdev->irq, bp->dev); 1209 free_irq(bp->dev->irq, bp->dev);
1222} 1210}
1223 1211
1224int bnx2x_enable_msix(struct bnx2x *bp) 1212int __devinit bnx2x_enable_msix(struct bnx2x *bp)
1225{ 1213{
1226 int msix_vec = 0, i, rc, req_cnt; 1214 int msix_vec = 0, i, rc, req_cnt;
1227 1215
@@ -1261,8 +1249,8 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1261 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); 1249 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1262 1250
1263 if (rc) { 1251 if (rc) {
1264 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); 1252 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1265 return rc; 1253 goto no_msix;
1266 } 1254 }
1267 /* 1255 /*
1268 * decrease number of queues by number of unallocated entries 1256 * decrease number of queues by number of unallocated entries
@@ -1270,18 +1258,34 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1270 bp->num_queues -= diff; 1258 bp->num_queues -= diff;
1271 1259
1272 BNX2X_DEV_INFO("New queue configuration set: %d\n", 1260 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1273 bp->num_queues); 1261 bp->num_queues);
1274 } else if (rc) { 1262 } else if (rc > 0) {
1275 /* fall to INTx if not enough memory */ 1263 /* Get by with single vector */
1276 if (rc == -ENOMEM) 1264 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1277 bp->flags |= DISABLE_MSI_FLAG; 1265 if (rc) {
1266 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1267 rc);
1268 goto no_msix;
1269 }
1270
1271 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1272 bp->flags |= USING_SINGLE_MSIX_FLAG;
1273
1274 } else if (rc < 0) {
1278 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); 1275 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1279 return rc; 1276 goto no_msix;
1280 } 1277 }
1281 1278
1282 bp->flags |= USING_MSIX_FLAG; 1279 bp->flags |= USING_MSIX_FLAG;
1283 1280
1284 return 0; 1281 return 0;
1282
1283no_msix:
1284 /* fall to INTx if not enough memory */
1285 if (rc == -ENOMEM)
1286 bp->flags |= DISABLE_MSI_FLAG;
1287
1288 return rc;
1285} 1289}
1286 1290
1287static int bnx2x_req_msix_irqs(struct bnx2x *bp) 1291static int bnx2x_req_msix_irqs(struct bnx2x *bp)
@@ -1343,22 +1347,26 @@ int bnx2x_enable_msi(struct bnx2x *bp)
1343static int bnx2x_req_irq(struct bnx2x *bp) 1347static int bnx2x_req_irq(struct bnx2x *bp)
1344{ 1348{
1345 unsigned long flags; 1349 unsigned long flags;
1346 int rc; 1350 unsigned int irq;
1347 1351
1348 if (bp->flags & USING_MSI_FLAG) 1352 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1349 flags = 0; 1353 flags = 0;
1350 else 1354 else
1351 flags = IRQF_SHARED; 1355 flags = IRQF_SHARED;
1352 1356
1353 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, 1357 if (bp->flags & USING_MSIX_FLAG)
1354 bp->dev->name, bp->dev); 1358 irq = bp->msix_table[0].vector;
1355 return rc; 1359 else
1360 irq = bp->pdev->irq;
1361
1362 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1356} 1363}
1357 1364
1358static inline int bnx2x_setup_irqs(struct bnx2x *bp) 1365static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1359{ 1366{
1360 int rc = 0; 1367 int rc = 0;
1361 if (bp->flags & USING_MSIX_FLAG) { 1368 if (bp->flags & USING_MSIX_FLAG &&
1369 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1362 rc = bnx2x_req_msix_irqs(bp); 1370 rc = bnx2x_req_msix_irqs(bp);
1363 if (rc) 1371 if (rc)
1364 return rc; 1372 return rc;
@@ -1371,8 +1379,13 @@ static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1371 } 1379 }
1372 if (bp->flags & USING_MSI_FLAG) { 1380 if (bp->flags & USING_MSI_FLAG) {
1373 bp->dev->irq = bp->pdev->irq; 1381 bp->dev->irq = bp->pdev->irq;
1374 netdev_info(bp->dev, "using MSI IRQ %d\n", 1382 netdev_info(bp->dev, "using MSI IRQ %d\n",
1375 bp->pdev->irq); 1383 bp->dev->irq);
1384 }
1385 if (bp->flags & USING_MSIX_FLAG) {
1386 bp->dev->irq = bp->msix_table[0].vector;
1387 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1388 bp->dev->irq);
1376 } 1389 }
1377 } 1390 }
1378 1391
@@ -1437,24 +1450,15 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1437 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); 1450 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1438} 1451}
1439 1452
1453
1440void bnx2x_set_num_queues(struct bnx2x *bp) 1454void bnx2x_set_num_queues(struct bnx2x *bp)
1441{ 1455{
1442 switch (bp->multi_mode) { 1456 /* RSS queues */
1443 case ETH_RSS_MODE_DISABLED: 1457 bp->num_queues = bnx2x_calc_num_queues(bp);
1444 bp->num_queues = 1;
1445 break;
1446 case ETH_RSS_MODE_REGULAR:
1447 bp->num_queues = bnx2x_calc_num_queues(bp);
1448 break;
1449
1450 default:
1451 bp->num_queues = 1;
1452 break;
1453 }
1454 1458
1455#ifdef BCM_CNIC 1459#ifdef BCM_CNIC
1456 /* override in STORAGE SD mode */ 1460 /* override in STORAGE SD modes */
1457 if (IS_MF_STORAGE_SD(bp)) 1461 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1458 bp->num_queues = 1; 1462 bp->num_queues = 1;
1459#endif 1463#endif
1460 /* Add special queues */ 1464 /* Add special queues */
@@ -1549,16 +1553,13 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1549 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; 1553 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1550 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); 1554 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1551 1555
1552 /* 1556 /* Prepare the initial contents fo the indirection table if RSS is
1553 * Prepare the inital contents fo the indirection table if RSS is
1554 * enabled 1557 * enabled
1555 */ 1558 */
1556 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { 1559 for (i = 0; i < sizeof(ind_table); i++)
1557 for (i = 0; i < sizeof(ind_table); i++) 1560 ind_table[i] =
1558 ind_table[i] = 1561 bp->fp->cl_id +
1559 bp->fp->cl_id + 1562 ethtool_rxfh_indir_default(i, num_eth_queues);
1560 ethtool_rxfh_indir_default(i, num_eth_queues);
1561 }
1562 1563
1563 /* 1564 /*
1564 * For 57710 and 57711 SEARCHER configuration (rss_keys) is 1565 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
@@ -1568,11 +1569,12 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1568 * For 57712 and newer on the other hand it's a per-function 1569 * For 57712 and newer on the other hand it's a per-function
1569 * configuration. 1570 * configuration.
1570 */ 1571 */
1571 return bnx2x_config_rss_pf(bp, ind_table, 1572 return bnx2x_config_rss_eth(bp, ind_table,
1572 bp->port.pmf || !CHIP_IS_E1x(bp)); 1573 bp->port.pmf || !CHIP_IS_E1x(bp));
1573} 1574}
1574 1575
1575int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash) 1576int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1577 u8 *ind_table, bool config_hash)
1576{ 1578{
1577 struct bnx2x_config_rss_params params = {NULL}; 1579 struct bnx2x_config_rss_params params = {NULL};
1578 int i; 1580 int i;
@@ -1584,52 +1586,29 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1584 * bp->multi_mode = ETH_RSS_MODE_DISABLED; 1586 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1585 */ 1587 */
1586 1588
1587 params.rss_obj = &bp->rss_conf_obj; 1589 params.rss_obj = rss_obj;
1588 1590
1589 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags); 1591 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1590 1592
1591 /* RSS mode */ 1593 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1592 switch (bp->multi_mode) {
1593 case ETH_RSS_MODE_DISABLED:
1594 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1595 break;
1596 case ETH_RSS_MODE_REGULAR:
1597 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1598 break;
1599 case ETH_RSS_MODE_VLAN_PRI:
1600 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1601 break;
1602 case ETH_RSS_MODE_E1HOV_PRI:
1603 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1604 break;
1605 case ETH_RSS_MODE_IP_DSCP:
1606 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1607 break;
1608 default:
1609 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1610 return -EINVAL;
1611 }
1612 1594
1613 /* If RSS is enabled */ 1595 /* RSS configuration */
1614 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { 1596 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1615 /* RSS configuration */ 1597 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1616 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags); 1598 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1617 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags); 1599 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1618 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1619 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1620 1600
1621 /* Hash bits */ 1601 /* Hash bits */
1622 params.rss_result_mask = MULTI_MASK; 1602 params.rss_result_mask = MULTI_MASK;
1623 1603
1624 memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); 1604 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1625 1605
1626 if (config_hash) { 1606 if (config_hash) {
1627 /* RSS keys */ 1607 /* RSS keys */
1628 for (i = 0; i < sizeof(params.rss_key) / 4; i++) 1608 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1629 params.rss_key[i] = random32(); 1609 params.rss_key[i] = random32();
1630 1610
1631 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags); 1611 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1632 }
1633 } 1612 }
1634 1613
1635 return bnx2x_config_rss(bp, &params); 1614 return bnx2x_config_rss(bp, &params);
@@ -1911,8 +1890,14 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1911 SHMEM2_WR(bp, dcc_support, 1890 SHMEM2_WR(bp, dcc_support,
1912 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | 1891 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1913 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); 1892 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1893 if (SHMEM2_HAS(bp, afex_driver_support))
1894 SHMEM2_WR(bp, afex_driver_support,
1895 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
1914 } 1896 }
1915 1897
1898 /* Set AFEX default VLAN tag to an invalid value */
1899 bp->afex_def_vlan_tag = -1;
1900
1916 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; 1901 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1917 rc = bnx2x_func_start(bp); 1902 rc = bnx2x_func_start(bp);
1918 if (rc) { 1903 if (rc) {
@@ -3084,7 +3069,8 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3084 } 3069 }
3085 3070
3086#ifdef BCM_CNIC 3071#ifdef BCM_CNIC
3087 if (IS_MF_STORAGE_SD(bp) && !is_zero_ether_addr(addr->sa_data)) { 3072 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3073 !is_zero_ether_addr(addr->sa_data)) {
3088 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n"); 3074 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3089 return -EINVAL; 3075 return -EINVAL;
3090 } 3076 }
@@ -3206,7 +3192,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3206 int rx_ring_size = 0; 3192 int rx_ring_size = 0;
3207 3193
3208#ifdef BCM_CNIC 3194#ifdef BCM_CNIC
3209 if (!bp->rx_ring_size && IS_MF_STORAGE_SD(bp)) { 3195 if (!bp->rx_ring_size &&
3196 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3210 rx_ring_size = MIN_RX_SIZE_NONTPA; 3197 rx_ring_size = MIN_RX_SIZE_NONTPA;
3211 bp->rx_ring_size = rx_ring_size; 3198 bp->rx_ring_size = rx_ring_size;
3212 } else 3199 } else
@@ -3528,8 +3515,6 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3528 */ 3515 */
3529 dev->mtu = new_mtu; 3516 dev->mtu = new_mtu;
3530 3517
3531 bp->gro_check = bnx2x_need_gro_check(new_mtu);
3532
3533 return bnx2x_reload_if_running(dev); 3518 return bnx2x_reload_if_running(dev);
3534} 3519}
3535 3520
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 5c27454d2ec2..cec993bc2f47 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -86,13 +86,15 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
86void bnx2x_send_unload_done(struct bnx2x *bp); 86void bnx2x_send_unload_done(struct bnx2x *bp);
87 87
88/** 88/**
89 * bnx2x_config_rss_pf - configure RSS parameters. 89 * bnx2x_config_rss_pf - configure RSS parameters in a PF.
90 * 90 *
91 * @bp: driver handle 91 * @bp: driver handle
92 * @rss_obj RSS object to use
92 * @ind_table: indirection table to configure 93 * @ind_table: indirection table to configure
93 * @config_hash: re-configure RSS hash keys configuration 94 * @config_hash: re-configure RSS hash keys configuration
94 */ 95 */
95int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash); 96int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
97 u8 *ind_table, bool config_hash);
96 98
97/** 99/**
98 * bnx2x__init_func_obj - init function object 100 * bnx2x__init_func_obj - init function object
@@ -485,7 +487,7 @@ void bnx2x_netif_start(struct bnx2x *bp);
485 * fills msix_table, requests vectors, updates num_queues 487 * fills msix_table, requests vectors, updates num_queues
486 * according to number of available vectors. 488 * according to number of available vectors.
487 */ 489 */
488int bnx2x_enable_msix(struct bnx2x *bp); 490int __devinit bnx2x_enable_msix(struct bnx2x *bp);
489 491
490/** 492/**
491 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly 493 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
@@ -843,7 +845,7 @@ static inline void bnx2x_disable_msi(struct bnx2x *bp)
843{ 845{
844 if (bp->flags & USING_MSIX_FLAG) { 846 if (bp->flags & USING_MSIX_FLAG) {
845 pci_disable_msix(bp->pdev); 847 pci_disable_msix(bp->pdev);
846 bp->flags &= ~USING_MSIX_FLAG; 848 bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG);
847 } else if (bp->flags & USING_MSI_FLAG) { 849 } else if (bp->flags & USING_MSI_FLAG) {
848 pci_disable_msi(bp->pdev); 850 pci_disable_msi(bp->pdev);
849 bp->flags &= ~USING_MSI_FLAG; 851 bp->flags &= ~USING_MSI_FLAG;
@@ -964,6 +966,19 @@ static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
964 966
965/************************* Init ******************************************/ 967/************************* Init ******************************************/
966 968
969/* returns func by VN for current port */
970static inline int func_by_vn(struct bnx2x *bp, int vn)
971{
972 return 2 * vn + BP_PORT(bp);
973}
974
975static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table,
976 bool config_hash)
977{
978 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table,
979 config_hash);
980}
981
967/** 982/**
968 * bnx2x_func_start - init function 983 * bnx2x_func_start - init function
969 * 984 *
@@ -1419,15 +1434,32 @@ static inline void storm_memset_func_cfg(struct bnx2x *bp,
1419} 1434}
1420 1435
1421static inline void storm_memset_cmng(struct bnx2x *bp, 1436static inline void storm_memset_cmng(struct bnx2x *bp,
1422 struct cmng_struct_per_port *cmng, 1437 struct cmng_init *cmng,
1423 u8 port) 1438 u8 port)
1424{ 1439{
1440 int vn;
1425 size_t size = sizeof(struct cmng_struct_per_port); 1441 size_t size = sizeof(struct cmng_struct_per_port);
1426 1442
1427 u32 addr = BAR_XSTRORM_INTMEM + 1443 u32 addr = BAR_XSTRORM_INTMEM +
1428 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); 1444 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
1429 1445
1430 __storm_memset_struct(bp, addr, size, (u32 *)cmng); 1446 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
1447
1448 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
1449 int func = func_by_vn(bp, vn);
1450
1451 addr = BAR_XSTRORM_INTMEM +
1452 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
1453 size = sizeof(struct rate_shaping_vars_per_vn);
1454 __storm_memset_struct(bp, addr, size,
1455 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
1456
1457 addr = BAR_XSTRORM_INTMEM +
1458 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
1459 size = sizeof(struct fairness_vars_per_vn);
1460 __storm_memset_struct(bp, addr, size,
1461 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
1462 }
1431} 1463}
1432 1464
1433/** 1465/**
@@ -1512,13 +1544,6 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
1512 */ 1544 */
1513 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; 1545 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
1514} 1546}
1515
1516static inline bool bnx2x_need_gro_check(int mtu)
1517{
1518 return (SGE_PAGES / (mtu - ETH_MAX_TPA_HEADER_SIZE - 1)) !=
1519 (SGE_PAGES / (mtu - ETH_MIN_TPA_HEADER_SIZE + 1));
1520}
1521
1522/** 1547/**
1523 * bnx2x_bz_fp - zero content of the fastpath structure. 1548 * bnx2x_bz_fp - zero content of the fastpath structure.
1524 * 1549 *
@@ -1608,11 +1633,6 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
1608 */ 1633 */
1609void bnx2x_get_iscsi_info(struct bnx2x *bp); 1634void bnx2x_get_iscsi_info(struct bnx2x *bp);
1610#endif 1635#endif
1611/* returns func by VN for current port */
1612static inline int func_by_vn(struct bnx2x *bp, int vn)
1613{
1614 return 2 * vn + BP_PORT(bp);
1615}
1616 1636
1617/** 1637/**
1618 * bnx2x_link_sync_notify - send notification to other functions. 1638 * bnx2x_link_sync_notify - send notification to other functions.
@@ -1667,7 +1687,8 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
1667 if (is_valid_ether_addr(addr)) 1687 if (is_valid_ether_addr(addr))
1668 return true; 1688 return true;
1669#ifdef BCM_CNIC 1689#ifdef BCM_CNIC
1670 if (is_zero_ether_addr(addr) && IS_MF_STORAGE_SD(bp)) 1690 if (is_zero_ether_addr(addr) &&
1691 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))
1671 return true; 1692 return true;
1672#endif 1693#endif
1673 return false; 1694 return false;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 2cc0a1703970..faf8abd0b7eb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -22,13 +22,10 @@
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/crc32.h> 24#include <linux/crc32.h>
25
26
27#include "bnx2x.h" 25#include "bnx2x.h"
28#include "bnx2x_cmn.h" 26#include "bnx2x_cmn.h"
29#include "bnx2x_dump.h" 27#include "bnx2x_dump.h"
30#include "bnx2x_init.h" 28#include "bnx2x_init.h"
31#include "bnx2x_sp.h"
32 29
33/* Note: in the format strings below %s is replaced by the queue-name which is 30/* Note: in the format strings below %s is replaced by the queue-name which is
34 * either its index or 'fcoe' for the fcoe queue. Make sure the format string 31 * either its index or 'fcoe' for the fcoe queue. Make sure the format string
@@ -1433,7 +1430,7 @@ static void bnx2x_get_ringparam(struct net_device *dev,
1433 else 1430 else
1434 ering->rx_pending = MAX_RX_AVAIL; 1431 ering->rx_pending = MAX_RX_AVAIL;
1435 1432
1436 ering->tx_max_pending = MAX_TX_AVAIL; 1433 ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
1437 ering->tx_pending = bp->tx_ring_size; 1434 ering->tx_pending = bp->tx_ring_size;
1438} 1435}
1439 1436
@@ -1451,7 +1448,7 @@ static int bnx2x_set_ringparam(struct net_device *dev,
1451 if ((ering->rx_pending > MAX_RX_AVAIL) || 1448 if ((ering->rx_pending > MAX_RX_AVAIL) ||
1452 (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA : 1449 (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
1453 MIN_RX_SIZE_TPA)) || 1450 MIN_RX_SIZE_TPA)) ||
1454 (ering->tx_pending > MAX_TX_AVAIL) || 1451 (ering->tx_pending > (IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL)) ||
1455 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) { 1452 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
1456 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); 1453 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
1457 return -EINVAL; 1454 return -EINVAL;
@@ -2396,10 +2393,7 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2396 2393
2397static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev) 2394static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
2398{ 2395{
2399 struct bnx2x *bp = netdev_priv(dev); 2396 return T_ETH_INDIRECTION_TABLE_SIZE;
2400
2401 return (bp->multi_mode == ETH_RSS_MODE_DISABLED ?
2402 0 : T_ETH_INDIRECTION_TABLE_SIZE);
2403} 2397}
2404 2398
2405static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir) 2399static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
@@ -2445,7 +2439,7 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
2445 ind_table[i] = indir[i] + bp->fp->cl_id; 2439 ind_table[i] = indir[i] + bp->fp->cl_id;
2446 } 2440 }
2447 2441
2448 return bnx2x_config_rss_pf(bp, ind_table, false); 2442 return bnx2x_config_rss_eth(bp, ind_table, false);
2449} 2443}
2450 2444
2451static const struct ethtool_ops bnx2x_ethtool_ops = { 2445static const struct ethtool_ops bnx2x_ethtool_ops = {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index b9b263323436..426f77aa721a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -387,7 +387,7 @@
387 387
388#define STATS_QUERY_CMD_COUNT 16 388#define STATS_QUERY_CMD_COUNT 16
389 389
390#define NIV_LIST_TABLE_SIZE 4096 390#define AFEX_LIST_TABLE_SIZE 4096
391 391
392#define INVALID_VNIC_ID 0xFF 392#define INVALID_VNIC_ID 0xFF
393 393
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index dbff5915b81a..a440a8ba85f2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -833,6 +833,7 @@ struct shared_feat_cfg { /* NVRAM Offset */
833 #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100 833 #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
834 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 834 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
835 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 835 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
836 #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400
836 837
837 /* The interval in seconds between sending LLDP packets. Set to zero 838 /* The interval in seconds between sending LLDP packets. Set to zero
838 to disable the feature */ 839 to disable the feature */
@@ -1235,6 +1236,8 @@ struct drv_func_mb {
1235 #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006 1236 #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006
1236 #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 1237 #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
1237 #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 1238 #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
1239 #define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED 0xa2000000
1240 #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002
1238 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 1241 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
1239 #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201 1242 #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
1240 1243
@@ -1242,6 +1245,13 @@ struct drv_func_mb {
1242 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 1245 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
1243 1246
1244 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 1247 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
1248
1249 #define DRV_MSG_CODE_AFEX_DRIVER_SETMAC 0xd0000000
1250 #define DRV_MSG_CODE_AFEX_LISTGET_ACK 0xd1000000
1251 #define DRV_MSG_CODE_AFEX_LISTSET_ACK 0xd2000000
1252 #define DRV_MSG_CODE_AFEX_STATSGET_ACK 0xd3000000
1253 #define DRV_MSG_CODE_AFEX_VIFSET_ACK 0xd4000000
1254
1245 #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000 1255 #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
1246 #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000 1256 #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
1247 1257
@@ -1299,6 +1309,14 @@ struct drv_func_mb {
1299 #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 1309 #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000
1300 #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 1310 #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000
1301 #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 1311 #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
1312 #define FW_MSG_CODE_HW_SET_INVALID_IMAGE 0xb0100000
1313
1314 #define FW_MSG_CODE_AFEX_DRIVER_SETMAC_DONE 0xd0100000
1315 #define FW_MSG_CODE_AFEX_LISTGET_ACK 0xd1100000
1316 #define FW_MSG_CODE_AFEX_LISTSET_ACK 0xd2100000
1317 #define FW_MSG_CODE_AFEX_STATSGET_ACK 0xd3100000
1318 #define FW_MSG_CODE_AFEX_VIFSET_ACK 0xd4100000
1319
1302 #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000 1320 #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
1303 #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000 1321 #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
1304 1322
@@ -1357,6 +1375,12 @@ struct drv_func_mb {
1357 1375
1358 #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 1376 #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
1359 #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 1377 #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
1378 #define DRV_STATUS_AFEX_EVENT_MASK 0x03f00000
1379 #define DRV_STATUS_AFEX_LISTGET_REQ 0x00100000
1380 #define DRV_STATUS_AFEX_LISTSET_REQ 0x00200000
1381 #define DRV_STATUS_AFEX_STATSGET_REQ 0x00400000
1382 #define DRV_STATUS_AFEX_VIFSET_REQ 0x00800000
1383
1360 #define DRV_STATUS_DRV_INFO_REQ 0x04000000 1384 #define DRV_STATUS_DRV_INFO_REQ 0x04000000
1361 1385
1362 u32 virt_mac_upper; 1386 u32 virt_mac_upper;
@@ -1448,7 +1472,26 @@ struct func_mf_cfg {
1448 #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 1472 #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0
1449 #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK 1473 #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK
1450 1474
1451 u32 reserved[2]; 1475 /* afex default VLAN ID - 12 bits */
1476 #define FUNC_MF_CFG_AFEX_VLAN_MASK 0x0fff0000
1477 #define FUNC_MF_CFG_AFEX_VLAN_SHIFT 16
1478
1479 u32 afex_config;
1480 #define FUNC_MF_CFG_AFEX_COS_FILTER_MASK 0x000000ff
1481 #define FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT 0
1482 #define FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK 0x0000ff00
1483 #define FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT 8
1484 #define FUNC_MF_CFG_AFEX_MBA_ENABLED_VAL 0x00000100
1485 #define FUNC_MF_CFG_AFEX_VLAN_MODE_MASK 0x000f0000
1486 #define FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT 16
1487
1488 u32 reserved;
1489};
1490
1491enum mf_cfg_afex_vlan_mode {
1492 FUNC_MF_CFG_AFEX_VLAN_TRUNK_MODE = 0,
1493 FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE,
1494 FUNC_MF_CFG_AFEX_VLAN_TRUNK_TAG_NATIVE_MODE
1452}; 1495};
1453 1496
1454/* This structure is not applicable and should not be accessed on 57711 */ 1497/* This structure is not applicable and should not be accessed on 57711 */
@@ -1945,18 +1988,29 @@ struct shmem2_region {
1945 1988
1946 u32 nvm_retain_bitmap_addr; /* 0x0070 */ 1989 u32 nvm_retain_bitmap_addr; /* 0x0070 */
1947 1990
1948 u32 reserved1; /* 0x0074 */ 1991 /* afex support of that driver */
1992 u32 afex_driver_support; /* 0x0074 */
1993 #define SHMEM_AFEX_VERSION_MASK 0x100f
1994 #define SHMEM_AFEX_SUPPORTED_VERSION_ONE 0x1001
1995 #define SHMEM_AFEX_REDUCED_DRV_LOADED 0x8000
1949 1996
1950 u32 reserved2[E2_FUNC_MAX]; 1997 /* driver receives addr in scratchpad to which it should respond */
1998 u32 afex_scratchpad_addr_to_write[E2_FUNC_MAX];
1951 1999
1952 u32 reserved3[E2_FUNC_MAX];/* 0x0088 */ 2000 /* generic params from MCP to driver (value depends on the msg sent
1953 u32 reserved4[E2_FUNC_MAX];/* 0x0098 */ 2001 * to driver
2002 */
2003 u32 afex_param1_to_driver[E2_FUNC_MAX]; /* 0x0088 */
2004 u32 afex_param2_to_driver[E2_FUNC_MAX]; /* 0x0098 */
1954 2005
1955 u32 swim_base_addr; /* 0x0108 */ 2006 u32 swim_base_addr; /* 0x0108 */
1956 u32 swim_funcs; 2007 u32 swim_funcs;
1957 u32 swim_main_cb; 2008 u32 swim_main_cb;
1958 2009
1959 u32 reserved5[2]; 2010 /* bitmap notifying which VIF profiles stored in nvram are enabled by
2011 * switch
2012 */
2013 u32 afex_profiles_enabled[2];
1960 2014
1961 /* generic flags controlled by the driver */ 2015 /* generic flags controlled by the driver */
1962 u32 drv_flags; 2016 u32 drv_flags;
@@ -2696,10 +2750,51 @@ union drv_info_to_mcp {
2696 struct fcoe_stats_info fcoe_stat; 2750 struct fcoe_stats_info fcoe_stat;
2697 struct iscsi_stats_info iscsi_stat; 2751 struct iscsi_stats_info iscsi_stat;
2698}; 2752};
2753
2754/* stats collected for afex.
2755 * NOTE: structure is exactly as expected to be received by the switch.
2756 * order must remain exactly as is unless protocol changes !
2757 */
2758struct afex_stats {
2759 u32 tx_unicast_frames_hi;
2760 u32 tx_unicast_frames_lo;
2761 u32 tx_unicast_bytes_hi;
2762 u32 tx_unicast_bytes_lo;
2763 u32 tx_multicast_frames_hi;
2764 u32 tx_multicast_frames_lo;
2765 u32 tx_multicast_bytes_hi;
2766 u32 tx_multicast_bytes_lo;
2767 u32 tx_broadcast_frames_hi;
2768 u32 tx_broadcast_frames_lo;
2769 u32 tx_broadcast_bytes_hi;
2770 u32 tx_broadcast_bytes_lo;
2771 u32 tx_frames_discarded_hi;
2772 u32 tx_frames_discarded_lo;
2773 u32 tx_frames_dropped_hi;
2774 u32 tx_frames_dropped_lo;
2775
2776 u32 rx_unicast_frames_hi;
2777 u32 rx_unicast_frames_lo;
2778 u32 rx_unicast_bytes_hi;
2779 u32 rx_unicast_bytes_lo;
2780 u32 rx_multicast_frames_hi;
2781 u32 rx_multicast_frames_lo;
2782 u32 rx_multicast_bytes_hi;
2783 u32 rx_multicast_bytes_lo;
2784 u32 rx_broadcast_frames_hi;
2785 u32 rx_broadcast_frames_lo;
2786 u32 rx_broadcast_bytes_hi;
2787 u32 rx_broadcast_bytes_lo;
2788 u32 rx_frames_discarded_hi;
2789 u32 rx_frames_discarded_lo;
2790 u32 rx_frames_dropped_hi;
2791 u32 rx_frames_dropped_lo;
2792};
2793
2699#define BCM_5710_FW_MAJOR_VERSION 7 2794#define BCM_5710_FW_MAJOR_VERSION 7
2700#define BCM_5710_FW_MINOR_VERSION 2 2795#define BCM_5710_FW_MINOR_VERSION 2
2701#define BCM_5710_FW_REVISION_VERSION 16 2796#define BCM_5710_FW_REVISION_VERSION 51
2702#define BCM_5710_FW_ENGINEERING_VERSION 0 2797#define BCM_5710_FW_ENGINEERING_VERSION 0
2703#define BCM_5710_FW_COMPILE_FLAGS 1 2798#define BCM_5710_FW_COMPILE_FLAGS 1
2704 2799
2705 2800
@@ -3389,7 +3484,7 @@ struct client_init_tx_data {
3389#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4) 3484#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
3390#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4 3485#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
3391 u8 default_vlan_flg; 3486 u8 default_vlan_flg;
3392 u8 reserved2; 3487 u8 force_default_pri_flg;
3393 __le32 reserved3; 3488 __le32 reserved3;
3394}; 3489};
3395 3490
@@ -4375,8 +4470,21 @@ struct fcoe_statistics_params {
4375 4470
4376 4471
4377/* 4472/*
4473 * The data afex vif list ramrod need
4474 */
4475struct afex_vif_list_ramrod_data {
4476 u8 afex_vif_list_command;
4477 u8 func_bit_map;
4478 __le16 vif_list_index;
4479 u8 func_to_clear;
4480 u8 echo;
4481 __le16 reserved1;
4482};
4483
4484
4485/*
4378 * cfc delete event data 4486 * cfc delete event data
4379*/ 4487 */
4380struct cfc_del_event_data { 4488struct cfc_del_event_data {
4381 u32 cid; 4489 u32 cid;
4382 u32 reserved0; 4490 u32 reserved0;
@@ -4448,6 +4556,65 @@ struct cmng_struct_per_port {
4448 struct cmng_flags_per_port flags; 4556 struct cmng_flags_per_port flags;
4449}; 4557};
4450 4558
4559/*
4560 * a single rate shaping counter. can be used as protocol or vnic counter
4561 */
4562struct rate_shaping_counter {
4563 u32 quota;
4564#if defined(__BIG_ENDIAN)
4565 u16 __reserved0;
4566 u16 rate;
4567#elif defined(__LITTLE_ENDIAN)
4568 u16 rate;
4569 u16 __reserved0;
4570#endif
4571};
4572
4573/*
4574 * per-vnic rate shaping variables
4575 */
4576struct rate_shaping_vars_per_vn {
4577 struct rate_shaping_counter vn_counter;
4578};
4579
4580/*
4581 * per-vnic fairness variables
4582 */
4583struct fairness_vars_per_vn {
4584 u32 cos_credit_delta[MAX_COS_NUMBER];
4585 u32 vn_credit_delta;
4586 u32 __reserved0;
4587};
4588
4589/*
4590 * cmng port init state
4591 */
4592struct cmng_vnic {
4593 struct rate_shaping_vars_per_vn vnic_max_rate[4];
4594 struct fairness_vars_per_vn vnic_min_rate[4];
4595};
4596
4597/*
4598 * cmng port init state
4599 */
4600struct cmng_init {
4601 struct cmng_struct_per_port port;
4602 struct cmng_vnic vnic;
4603};
4604
4605
4606/*
4607 * driver parameters for congestion management init, all rates are in Mbps
4608 */
4609struct cmng_init_input {
4610 u32 port_rate;
4611 u16 vnic_min_rate[4];
4612 u16 vnic_max_rate[4];
4613 u16 cos_min_rate[MAX_COS_NUMBER];
4614 u16 cos_to_pause_mask[MAX_COS_NUMBER];
4615 struct cmng_flags_per_port flags;
4616};
4617
4451 4618
4452/* 4619/*
4453 * Protocol-common command ID for slow path elements 4620 * Protocol-common command ID for slow path elements
@@ -4462,7 +4629,7 @@ enum common_spqe_cmd_id {
4462 RAMROD_CMD_ID_COMMON_STAT_QUERY, 4629 RAMROD_CMD_ID_COMMON_STAT_QUERY,
4463 RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 4630 RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
4464 RAMROD_CMD_ID_COMMON_START_TRAFFIC, 4631 RAMROD_CMD_ID_COMMON_START_TRAFFIC,
4465 RAMROD_CMD_ID_COMMON_RESERVED1, 4632 RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
4466 MAX_COMMON_SPQE_CMD_ID 4633 MAX_COMMON_SPQE_CMD_ID
4467}; 4634};
4468 4635
@@ -4670,6 +4837,17 @@ struct malicious_vf_event_data {
4670}; 4837};
4671 4838
4672/* 4839/*
4840 * vif list event data
4841 */
4842struct vif_list_event_data {
4843 u8 func_bit_map;
4844 u8 echo;
4845 __le16 reserved0;
4846 __le32 reserved1;
4847 __le32 reserved2;
4848};
4849
4850/*
4673 * union for all event ring message types 4851 * union for all event ring message types
4674 */ 4852 */
4675union event_data { 4853union event_data {
@@ -4678,6 +4856,7 @@ union event_data {
4678 struct cfc_del_event_data cfc_del_event; 4856 struct cfc_del_event_data cfc_del_event;
4679 struct vf_flr_event_data vf_flr_event; 4857 struct vf_flr_event_data vf_flr_event;
4680 struct malicious_vf_event_data malicious_vf_event; 4858 struct malicious_vf_event_data malicious_vf_event;
4859 struct vif_list_event_data vif_list_event;
4681}; 4860};
4682 4861
4683 4862
@@ -4743,7 +4922,7 @@ enum event_ring_opcode {
4743 EVENT_RING_OPCODE_FORWARD_SETUP, 4922 EVENT_RING_OPCODE_FORWARD_SETUP,
4744 EVENT_RING_OPCODE_RSS_UPDATE_RULES, 4923 EVENT_RING_OPCODE_RSS_UPDATE_RULES,
4745 EVENT_RING_OPCODE_FUNCTION_UPDATE, 4924 EVENT_RING_OPCODE_FUNCTION_UPDATE,
4746 EVENT_RING_OPCODE_RESERVED1, 4925 EVENT_RING_OPCODE_AFEX_VIF_LISTS,
4747 EVENT_RING_OPCODE_SET_MAC, 4926 EVENT_RING_OPCODE_SET_MAC,
4748 EVENT_RING_OPCODE_CLASSIFICATION_RULES, 4927 EVENT_RING_OPCODE_CLASSIFICATION_RULES,
4749 EVENT_RING_OPCODE_FILTERS_RULES, 4928 EVENT_RING_OPCODE_FILTERS_RULES,
@@ -4763,16 +4942,6 @@ enum fairness_mode {
4763 4942
4764 4943
4765/* 4944/*
4766 * per-vnic fairness variables
4767 */
4768struct fairness_vars_per_vn {
4769 u32 cos_credit_delta[MAX_COS_NUMBER];
4770 u32 vn_credit_delta;
4771 u32 __reserved0;
4772};
4773
4774
4775/*
4776 * Priority and cos 4945 * Priority and cos
4777 */ 4946 */
4778struct priority_cos { 4947struct priority_cos {
@@ -4800,12 +4969,27 @@ struct flow_control_configuration {
4800struct function_start_data { 4969struct function_start_data {
4801 __le16 function_mode; 4970 __le16 function_mode;
4802 __le16 sd_vlan_tag; 4971 __le16 sd_vlan_tag;
4803 u16 reserved; 4972 __le16 vif_id;
4804 u8 path_id; 4973 u8 path_id;
4805 u8 network_cos_mode; 4974 u8 network_cos_mode;
4806}; 4975};
4807 4976
4808 4977
4978struct function_update_data {
4979 u8 vif_id_change_flg;
4980 u8 afex_default_vlan_change_flg;
4981 u8 allowed_priorities_change_flg;
4982 u8 network_cos_mode_change_flg;
4983 __le16 vif_id;
4984 __le16 afex_default_vlan;
4985 u8 allowed_priorities;
4986 u8 network_cos_mode;
4987 u8 lb_mode_en;
4988 u8 reserved0;
4989 __le32 reserved1;
4990};
4991
4992
4809/* 4993/*
4810 * FW version stored in the Xstorm RAM 4994 * FW version stored in the Xstorm RAM
4811 */ 4995 */
@@ -5003,7 +5187,7 @@ enum mf_mode {
5003 SINGLE_FUNCTION, 5187 SINGLE_FUNCTION,
5004 MULTI_FUNCTION_SD, 5188 MULTI_FUNCTION_SD,
5005 MULTI_FUNCTION_SI, 5189 MULTI_FUNCTION_SI,
5006 MULTI_FUNCTION_RESERVED, 5190 MULTI_FUNCTION_AFEX,
5007 MAX_MF_MODE 5191 MAX_MF_MODE
5008}; 5192};
5009 5193
@@ -5128,6 +5312,7 @@ union protocol_common_specific_data {
5128 u8 protocol_data[8]; 5312 u8 protocol_data[8];
5129 struct regpair phy_address; 5313 struct regpair phy_address;
5130 struct regpair mac_config_addr; 5314 struct regpair mac_config_addr;
5315 struct afex_vif_list_ramrod_data afex_vif_list_data;
5131}; 5316};
5132 5317
5133/* 5318/*
@@ -5140,29 +5325,6 @@ struct protocol_common_spe {
5140 5325
5141 5326
5142/* 5327/*
5143 * a single rate shaping counter. can be used as protocol or vnic counter
5144 */
5145struct rate_shaping_counter {
5146 u32 quota;
5147#if defined(__BIG_ENDIAN)
5148 u16 __reserved0;
5149 u16 rate;
5150#elif defined(__LITTLE_ENDIAN)
5151 u16 rate;
5152 u16 __reserved0;
5153#endif
5154};
5155
5156
5157/*
5158 * per-vnic rate shaping variables
5159 */
5160struct rate_shaping_vars_per_vn {
5161 struct rate_shaping_counter vn_counter;
5162};
5163
5164
5165/*
5166 * The send queue element 5328 * The send queue element
5167 */ 5329 */
5168struct slow_path_element { 5330struct slow_path_element {
@@ -5330,6 +5492,18 @@ enum vf_pf_channel_state {
5330 5492
5331 5493
5332/* 5494/*
5495 * vif_list_rule_kind
5496 */
5497enum vif_list_rule_kind {
5498 VIF_LIST_RULE_SET,
5499 VIF_LIST_RULE_GET,
5500 VIF_LIST_RULE_CLEAR_ALL,
5501 VIF_LIST_RULE_CLEAR_FUNC,
5502 MAX_VIF_LIST_RULE_KIND
5503};
5504
5505
5506/*
5333 * zone A per-queue data 5507 * zone A per-queue data
5334 */ 5508 */
5335struct xstorm_queue_zone_data { 5509struct xstorm_queue_zone_data {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 29f5c3cca31a..559c396d45cc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -125,7 +125,7 @@ enum {
125 MODE_MF = 0x00000100, 125 MODE_MF = 0x00000100,
126 MODE_MF_SD = 0x00000200, 126 MODE_MF_SD = 0x00000200,
127 MODE_MF_SI = 0x00000400, 127 MODE_MF_SI = 0x00000400,
128 MODE_MF_NIV = 0x00000800, 128 MODE_MF_AFEX = 0x00000800,
129 MODE_E3_A0 = 0x00001000, 129 MODE_E3_A0 = 0x00001000,
130 MODE_E3_B0 = 0x00002000, 130 MODE_E3_B0 = 0x00002000,
131 MODE_COS3 = 0x00004000, 131 MODE_COS3 = 0x00004000,
@@ -241,7 +241,8 @@ static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos)
241 REG_WR(bp, reg_addr, reg_bit_map | q_bit_map); 241 REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
242 242
243 /* set/clear queue bit in command-queue bit map 243 /* set/clear queue bit in command-queue bit map
244 (E2/E3A0 only, valid COS values are 0/1) */ 244 * (E2/E3A0 only, valid COS values are 0/1)
245 */
245 if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) { 246 if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
246 reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num); 247 reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
247 reg_bit_map = REG_RD(bp, reg_addr); 248 reg_bit_map = REG_RD(bp, reg_addr);
@@ -277,7 +278,215 @@ static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
277} 278}
278 279
279 280
280/* Returns the index of start or end of a specific block stage in ops array*/ 281/* congestion managment port init api description
282 * the api works as follows:
283 * the driver should pass the cmng_init_input struct, the port_init function
284 * will prepare the required internal ram structure which will be passed back
285 * to the driver (cmng_init) that will write it into the internal ram.
286 *
287 * IMPORTANT REMARKS:
288 * 1. the cmng_init struct does not represent the contiguous internal ram
289 * structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET
290 * offset in order to write the port sub struct and the
291 * PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other
292 * words - don't use memcpy!).
293 * 2. although the cmng_init struct is filled for the maximal vnic number
294 * possible, the driver should only write the valid vnics into the internal
295 * ram according to the appropriate port mode.
296 */
297#define BITS_TO_BYTES(x) ((x)/8)
298
299/* CMNG constants, as derived from system spec calculations */
300
301/* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */
302#define DEF_MIN_RATE 100
303
304/* resolution of the rate shaping timer - 400 usec */
305#define RS_PERIODIC_TIMEOUT_USEC 400
306
307/* number of bytes in single QM arbitration cycle -
308 * coefficient for calculating the fairness timer
309 */
310#define QM_ARB_BYTES 160000
311
312/* resolution of Min algorithm 1:100 */
313#define MIN_RES 100
314
315/* how many bytes above threshold for
316 * the minimal credit of Min algorithm
317 */
318#define MIN_ABOVE_THRESH 32768
319
320/* Fairness algorithm integration time coefficient -
321 * for calculating the actual Tfair
322 */
323#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
324
325/* Memory of fairness algorithm - 2 cycles */
326#define FAIR_MEM 2
327#define SAFC_TIMEOUT_USEC 52
328
329#define SDM_TICKS 4
330
331
332static inline void bnx2x_init_max(const struct cmng_init_input *input_data,
333 u32 r_param, struct cmng_init *ram_data)
334{
335 u32 vnic;
336 struct cmng_vnic *vdata = &ram_data->vnic;
337 struct cmng_struct_per_port *pdata = &ram_data->port;
338 /* rate shaping per-port variables
339 * 100 micro seconds in SDM ticks = 25
340 * since each tick is 4 microSeconds
341 */
342
343 pdata->rs_vars.rs_periodic_timeout =
344 RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS;
345
346 /* this is the threshold below which no timer arming will occur.
347 * 1.25 coefficient is for the threshold to be a little bigger
348 * then the real time to compensate for timer in-accuracy
349 */
350 pdata->rs_vars.rs_threshold =
351 (5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4;
352
353 /* rate shaping per-vnic variables */
354 for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
355 /* global vnic counter */
356 vdata->vnic_max_rate[vnic].vn_counter.rate =
357 input_data->vnic_max_rate[vnic];
358 /* maximal Mbps for this vnic
359 * the quota in each timer period - number of bytes
360 * transmitted in this period
361 */
362 vdata->vnic_max_rate[vnic].vn_counter.quota =
363 RS_PERIODIC_TIMEOUT_USEC *
364 (u32)vdata->vnic_max_rate[vnic].vn_counter.rate / 8;
365 }
366
367}
368
369static inline void bnx2x_init_min(const struct cmng_init_input *input_data,
370 u32 r_param, struct cmng_init *ram_data)
371{
372 u32 vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair;
373 struct cmng_vnic *vdata = &ram_data->vnic;
374 struct cmng_struct_per_port *pdata = &ram_data->port;
375
376 /* this is the resolution of the fairness timer */
377 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
378
379 /* fairness per-port variables
380 * for 10G it is 1000usec. for 1G it is 10000usec.
381 */
382 tFair = T_FAIR_COEF / input_data->port_rate;
383
384 /* this is the threshold below which we won't arm the timer anymore */
385 pdata->fair_vars.fair_threshold = QM_ARB_BYTES;
386
387 /* we multiply by 1e3/8 to get bytes/msec. We don't want the credits
388 * to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution)
389 */
390 pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM;
391
392 /* since each tick is 4 microSeconds */
393 pdata->fair_vars.fairness_timeout =
394 fair_periodic_timeout_usec / SDM_TICKS;
395
396 /* calculate sum of weights */
397 vnicWeightSum = 0;
398
399 for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++)
400 vnicWeightSum += input_data->vnic_min_rate[vnic];
401
402 /* global vnic counter */
403 if (vnicWeightSum > 0) {
404 /* fairness per-vnic variables */
405 for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
406 /* this is the credit for each period of the fairness
407 * algorithm - number of bytes in T_FAIR (this vnic
408 * share of the port rate)
409 */
410 vdata->vnic_min_rate[vnic].vn_credit_delta =
411 (u32)input_data->vnic_min_rate[vnic] * 100 *
412 (T_FAIR_COEF / (8 * 100 * vnicWeightSum));
413 if (vdata->vnic_min_rate[vnic].vn_credit_delta <
414 pdata->fair_vars.fair_threshold +
415 MIN_ABOVE_THRESH) {
416 vdata->vnic_min_rate[vnic].vn_credit_delta =
417 pdata->fair_vars.fair_threshold +
418 MIN_ABOVE_THRESH;
419 }
420 }
421 }
422}
423
424static inline void bnx2x_init_fw_wrr(const struct cmng_init_input *input_data,
425 u32 r_param, struct cmng_init *ram_data)
426{
427 u32 vnic, cos;
428 u32 cosWeightSum = 0;
429 struct cmng_vnic *vdata = &ram_data->vnic;
430 struct cmng_struct_per_port *pdata = &ram_data->port;
431
432 for (cos = 0; cos < MAX_COS_NUMBER; cos++)
433 cosWeightSum += input_data->cos_min_rate[cos];
434
435 if (cosWeightSum > 0) {
436
437 for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
438 /* Since cos and vnic shouldn't work together the rate
439 * to divide between the coses is the port rate.
440 */
441 u32 *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta;
442 for (cos = 0; cos < MAX_COS_NUMBER; cos++) {
443 /* this is the credit for each period of
444 * the fairness algorithm - number of bytes
445 * in T_FAIR (this cos share of the vnic rate)
446 */
447 ccd[cos] =
448 (u32)input_data->cos_min_rate[cos] * 100 *
449 (T_FAIR_COEF / (8 * 100 * cosWeightSum));
450 if (ccd[cos] < pdata->fair_vars.fair_threshold
451 + MIN_ABOVE_THRESH) {
452 ccd[cos] =
453 pdata->fair_vars.fair_threshold +
454 MIN_ABOVE_THRESH;
455 }
456 }
457 }
458 }
459}
460
461static inline void bnx2x_init_safc(const struct cmng_init_input *input_data,
462 struct cmng_init *ram_data)
463{
464 /* in microSeconds */
465 ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC;
466}
467
468/* Congestion management port init */
469static inline void bnx2x_init_cmng(const struct cmng_init_input *input_data,
470 struct cmng_init *ram_data)
471{
472 u32 r_param;
473 memset(ram_data, 0, sizeof(struct cmng_init));
474
475 ram_data->port.flags = input_data->flags;
476
477 /* number of bytes transmitted in a rate of 10Gbps
478 * in one usec = 1.25KB.
479 */
480 r_param = BITS_TO_BYTES(input_data->port_rate);
481 bnx2x_init_max(input_data, r_param, ram_data);
482 bnx2x_init_min(input_data, r_param, ram_data);
483 bnx2x_init_fw_wrr(input_data, r_param, ram_data);
484 bnx2x_init_safc(input_data, ram_data);
485}
486
487
488
489/* Returns the index of start or end of a specific block stage in ops array */
281#define BLOCK_OPS_IDX(block, stage, end) \ 490#define BLOCK_OPS_IDX(block, stage, end) \
282 (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) 491 (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
283 492
@@ -499,9 +708,7 @@ static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
499 bnx2x_set_mcp_parity(bp, false); 708 bnx2x_set_mcp_parity(bp, false);
500} 709}
501 710
502/** 711/* Clear the parity error status registers. */
503 * Clear the parity error status registers.
504 */
505static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp) 712static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
506{ 713{
507 int i; 714 int i;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index ad95324dc042..a3fb7215cd89 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -138,7 +138,6 @@
138 138
139 139
140 140
141/* */
142#define SFP_EEPROM_CON_TYPE_ADDR 0x2 141#define SFP_EEPROM_CON_TYPE_ADDR 0x2
143 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 142 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
144 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 143 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
@@ -404,8 +403,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
404 403
405 DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n"); 404 DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n");
406 405
407 /* 406 /* mapping between entry priority to client number (0,1,2 -debug and
408 * mapping between entry priority to client number (0,1,2 -debug and
409 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) 407 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
410 * 3bits client num. 408 * 3bits client num.
411 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 409 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
@@ -413,8 +411,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
413 */ 411 */
414 412
415 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); 413 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
416 /* 414 /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
417 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
418 * as strict. Bits 0,1,2 - debug and management entries, 3 - 415 * as strict. Bits 0,1,2 - debug and management entries, 3 -
419 * COS0 entry, 4 - COS1 entry. 416 * COS0 entry, 4 - COS1 entry.
420 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT 417 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
@@ -425,13 +422,11 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
425 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); 422 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
426 /* defines which entries (clients) are subjected to WFQ arbitration */ 423 /* defines which entries (clients) are subjected to WFQ arbitration */
427 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); 424 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
428 /* 425 /* For strict priority entries defines the number of consecutive
429 * For strict priority entries defines the number of consecutive
430 * slots for the highest priority. 426 * slots for the highest priority.
431 */ 427 */
432 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); 428 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
433 /* 429 /* mapping between the CREDIT_WEIGHT registers and actual client
434 * mapping between the CREDIT_WEIGHT registers and actual client
435 * numbers 430 * numbers
436 */ 431 */
437 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0); 432 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
@@ -443,8 +438,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
443 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); 438 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
444 /* ETS mode disable */ 439 /* ETS mode disable */
445 REG_WR(bp, PBF_REG_ETS_ENABLED, 0); 440 REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
446 /* 441 /* If ETS mode is enabled (there is no strict priority) defines a WFQ
447 * If ETS mode is enabled (there is no strict priority) defines a WFQ
448 * weight for COS0/COS1. 442 * weight for COS0/COS1.
449 */ 443 */
450 REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710); 444 REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
@@ -471,10 +465,9 @@ static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
471 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS; 465 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
472 } else 466 } else
473 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; 467 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
474 /** 468 /* If the link isn't up (static configuration for example ) The
475 * If the link isn't up (static configuration for example ) The 469 * link will be according to 20GBPS.
476 * link will be according to 20GBPS. 470 */
477 */
478 return min_w_val; 471 return min_w_val;
479} 472}
480/****************************************************************************** 473/******************************************************************************
@@ -538,8 +531,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
538 struct bnx2x *bp = params->bp; 531 struct bnx2x *bp = params->bp;
539 const u8 port = params->port; 532 const u8 port = params->port;
540 const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars); 533 const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars);
541 /** 534 /* Mapping between entry priority to client number (0,1,2 -debug and
542 * mapping between entry priority to client number (0,1,2 -debug and
543 * management clients, 3 - COS0 client, 4 - COS1, ... 8 - 535 * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
544 * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by 536 * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
545 * reset value or init tool 537 * reset value or init tool
@@ -551,18 +543,14 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
551 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210); 543 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
552 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8); 544 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
553 } 545 }
554 /** 546 /* For strict priority entries defines the number of consecutive
555 * For strict priority entries defines the number of consecutive 547 * slots for the highest priority.
556 * slots for the highest priority. 548 */
557 */
558 /* TODO_ETS - Should be done by reset value or init tool */
559 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : 549 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
560 NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); 550 NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
561 /** 551 /* Mapping between the CREDIT_WEIGHT registers and actual client
562 * mapping between the CREDIT_WEIGHT registers and actual client
563 * numbers 552 * numbers
564 */ 553 */
565 /* TODO_ETS - Should be done by reset value or init tool */
566 if (port) { 554 if (port) {
567 /*Port 1 has 6 COS*/ 555 /*Port 1 has 6 COS*/
568 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543); 556 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
@@ -574,8 +562,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
574 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5); 562 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
575 } 563 }
576 564
577 /** 565 /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
578 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
579 * as strict. Bits 0,1,2 - debug and management entries, 3 - 566 * as strict. Bits 0,1,2 - debug and management entries, 3 -
580 * COS0 entry, 4 - COS1 entry. 567 * COS0 entry, 4 - COS1 entry.
581 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT 568 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
@@ -590,13 +577,12 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
590 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : 577 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
591 NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); 578 NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
592 579
593 /** 580 /* Please notice the register address are note continuous and a
594 * Please notice the register address are note continuous and a 581 * for here is note appropriate.In 2 port mode port0 only COS0-5
595 * for here is note appropriate.In 2 port mode port0 only COS0-5 582 * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
596 * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4 583 * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
597 * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT 584 * are never used for WFQ
598 * are never used for WFQ 585 */
599 */
600 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : 586 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
601 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0); 587 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
602 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : 588 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
@@ -633,10 +619,9 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
633 u32 base_upper_bound = 0; 619 u32 base_upper_bound = 0;
634 u8 max_cos = 0; 620 u8 max_cos = 0;
635 u8 i = 0; 621 u8 i = 0;
636 /** 622 /* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
637 * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4 623 * port mode port1 has COS0-2 that can be used for WFQ.
638 * port mode port1 has COS0-2 that can be used for WFQ. 624 */
639 */
640 if (!port) { 625 if (!port) {
641 base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0; 626 base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
642 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; 627 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
@@ -666,8 +651,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
666 u32 base_weight = 0; 651 u32 base_weight = 0;
667 u8 max_cos = 0; 652 u8 max_cos = 0;
668 653
669 /** 654 /* Mapping between entry priority to client number 0 - COS0
670 * mapping between entry priority to client number 0 - COS0
671 * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num. 655 * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
672 * TODO_ETS - Should be done by reset value or init tool 656 * TODO_ETS - Should be done by reset value or init tool
673 */ 657 */
@@ -695,10 +679,9 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
695 679
696 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : 680 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
697 PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0); 681 PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0);
698 /** 682 /* In 2 port mode port0 has COS0-5 that can be used for WFQ.
699 * In 2 port mode port0 has COS0-5 that can be used for WFQ. 683 * In 4 port mode port1 has COS0-2 that can be used for WFQ.
700 * In 4 port mode port1 has COS0-2 that can be used for WFQ. 684 */
701 */
702 if (!port) { 685 if (!port) {
703 base_weight = PBF_REG_COS0_WEIGHT_P0; 686 base_weight = PBF_REG_COS0_WEIGHT_P0;
704 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; 687 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
@@ -738,7 +721,7 @@ static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
738/****************************************************************************** 721/******************************************************************************
739* Description: 722* Description:
740* Disable will return basicly the values to init values. 723* Disable will return basicly the values to init values.
741*. 724*
742******************************************************************************/ 725******************************************************************************/
743int bnx2x_ets_disabled(struct link_params *params, 726int bnx2x_ets_disabled(struct link_params *params,
744 struct link_vars *vars) 727 struct link_vars *vars)
@@ -867,7 +850,7 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
867/****************************************************************************** 850/******************************************************************************
868* Description: 851* Description:
869* Calculate the total BW.A value of 0 isn't legal. 852* Calculate the total BW.A value of 0 isn't legal.
870*. 853*
871******************************************************************************/ 854******************************************************************************/
872static int bnx2x_ets_e3b0_get_total_bw( 855static int bnx2x_ets_e3b0_get_total_bw(
873 const struct link_params *params, 856 const struct link_params *params,
@@ -879,7 +862,6 @@ static int bnx2x_ets_e3b0_get_total_bw(
879 u8 is_bw_cos_exist = 0; 862 u8 is_bw_cos_exist = 0;
880 863
881 *total_bw = 0 ; 864 *total_bw = 0 ;
882
883 /* Calculate total BW requested */ 865 /* Calculate total BW requested */
884 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { 866 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
885 if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) { 867 if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) {
@@ -887,10 +869,9 @@ static int bnx2x_ets_e3b0_get_total_bw(
887 if (!ets_params->cos[cos_idx].params.bw_params.bw) { 869 if (!ets_params->cos[cos_idx].params.bw_params.bw) {
888 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" 870 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
889 "was set to 0\n"); 871 "was set to 0\n");
890 /* 872 /* This is to prevent a state when ramrods
891 * This is to prevent a state when ramrods
892 * can't be sent 873 * can't be sent
893 */ 874 */
894 ets_params->cos[cos_idx].params.bw_params.bw 875 ets_params->cos[cos_idx].params.bw_params.bw
895 = 1; 876 = 1;
896 } 877 }
@@ -908,8 +889,7 @@ static int bnx2x_ets_e3b0_get_total_bw(
908 } 889 }
909 DP(NETIF_MSG_LINK, 890 DP(NETIF_MSG_LINK,
910 "bnx2x_ets_E3B0_config total BW should be 100\n"); 891 "bnx2x_ets_E3B0_config total BW should be 100\n");
911 /* 892 /* We can handle a case whre the BW isn't 100 this can happen
912 * We can handle a case whre the BW isn't 100 this can happen
913 * if the TC are joined. 893 * if the TC are joined.
914 */ 894 */
915 } 895 }
@@ -919,7 +899,7 @@ static int bnx2x_ets_e3b0_get_total_bw(
919/****************************************************************************** 899/******************************************************************************
920* Description: 900* Description:
921* Invalidate all the sp_pri_to_cos. 901* Invalidate all the sp_pri_to_cos.
922*. 902*
923******************************************************************************/ 903******************************************************************************/
924static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos) 904static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
925{ 905{
@@ -931,7 +911,7 @@ static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
931* Description: 911* Description:
932* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers 912* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
933* according to sp_pri_to_cos. 913* according to sp_pri_to_cos.
934*. 914*
935******************************************************************************/ 915******************************************************************************/
936static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, 916static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
937 u8 *sp_pri_to_cos, const u8 pri, 917 u8 *sp_pri_to_cos, const u8 pri,
@@ -942,6 +922,12 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
942 const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : 922 const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
943 DCBX_E3B0_MAX_NUM_COS_PORT0; 923 DCBX_E3B0_MAX_NUM_COS_PORT0;
944 924
925 if (pri >= max_num_of_cos) {
926 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
927 "parameter Illegal strict priority\n");
928 return -EINVAL;
929 }
930
945 if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) { 931 if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
946 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " 932 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
947 "parameter There can't be two COS's with " 933 "parameter There can't be two COS's with "
@@ -949,12 +935,6 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
949 return -EINVAL; 935 return -EINVAL;
950 } 936 }
951 937
952 if (pri > max_num_of_cos) {
953 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
954 "parameter Illegal strict priority\n");
955 return -EINVAL;
956 }
957
958 sp_pri_to_cos[pri] = cos_entry; 938 sp_pri_to_cos[pri] = cos_entry;
959 return 0; 939 return 0;
960 940
@@ -964,7 +944,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
964* Description: 944* Description:
965* Returns the correct value according to COS and priority in 945* Returns the correct value according to COS and priority in
966* the sp_pri_cli register. 946* the sp_pri_cli register.
967*. 947*
968******************************************************************************/ 948******************************************************************************/
969static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset, 949static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
970 const u8 pri_set, 950 const u8 pri_set,
@@ -981,7 +961,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
981* Description: 961* Description:
982* Returns the correct value according to COS and priority in the 962* Returns the correct value according to COS and priority in the
983* sp_pri_cli register for NIG. 963* sp_pri_cli register for NIG.
984*. 964*
985******************************************************************************/ 965******************************************************************************/
986static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set) 966static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
987{ 967{
@@ -997,7 +977,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
997* Description: 977* Description:
998* Returns the correct value according to COS and priority in the 978* Returns the correct value according to COS and priority in the
999* sp_pri_cli register for PBF. 979* sp_pri_cli register for PBF.
1000*. 980*
1001******************************************************************************/ 981******************************************************************************/
1002static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set) 982static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
1003{ 983{
@@ -1013,7 +993,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
1013* Description: 993* Description:
1014* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers 994* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
1015* according to sp_pri_to_cos.(which COS has higher priority) 995* according to sp_pri_to_cos.(which COS has higher priority)
1016*. 996*
1017******************************************************************************/ 997******************************************************************************/
1018static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params, 998static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
1019 u8 *sp_pri_to_cos) 999 u8 *sp_pri_to_cos)
@@ -1149,8 +1129,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
1149 return -EINVAL; 1129 return -EINVAL;
1150 } 1130 }
1151 1131
1152 /* 1132 /* Upper bound is set according to current link speed (min_w_val
1153 * Upper bound is set according to current link speed (min_w_val
1154 * should be the same for upper bound and COS credit val). 1133 * should be the same for upper bound and COS credit val).
1155 */ 1134 */
1156 bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig); 1135 bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
@@ -1160,8 +1139,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
1160 for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) { 1139 for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
1161 if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) { 1140 if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
1162 cos_bw_bitmap |= (1 << cos_entry); 1141 cos_bw_bitmap |= (1 << cos_entry);
1163 /* 1142 /* The function also sets the BW in HW(not the mappin
1164 * The function also sets the BW in HW(not the mappin
1165 * yet) 1143 * yet)
1166 */ 1144 */
1167 bnx2x_status = bnx2x_ets_e3b0_set_cos_bw( 1145 bnx2x_status = bnx2x_ets_e3b0_set_cos_bw(
@@ -1217,14 +1195,12 @@ static void bnx2x_ets_bw_limit_common(const struct link_params *params)
1217 /* ETS disabled configuration */ 1195 /* ETS disabled configuration */
1218 struct bnx2x *bp = params->bp; 1196 struct bnx2x *bp = params->bp;
1219 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); 1197 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
1220 /* 1198 /* Defines which entries (clients) are subjected to WFQ arbitration
1221 * defines which entries (clients) are subjected to WFQ arbitration
1222 * COS0 0x8 1199 * COS0 0x8
1223 * COS1 0x10 1200 * COS1 0x10
1224 */ 1201 */
1225 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); 1202 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
1226 /* 1203 /* Mapping between the ARB_CREDIT_WEIGHT registers and actual
1227 * mapping between the ARB_CREDIT_WEIGHT registers and actual
1228 * client numbers (WEIGHT_0 does not actually have to represent 1204 * client numbers (WEIGHT_0 does not actually have to represent
1229 * client 0) 1205 * client 0)
1230 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 1206 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
@@ -1242,8 +1218,7 @@ static void bnx2x_ets_bw_limit_common(const struct link_params *params)
1242 1218
1243 /* Defines the number of consecutive slots for the strict priority */ 1219 /* Defines the number of consecutive slots for the strict priority */
1244 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); 1220 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
1245 /* 1221 /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
1246 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
1247 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 1222 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
1248 * entry, 4 - COS1 entry. 1223 * entry, 4 - COS1 entry.
1249 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT 1224 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
@@ -1298,8 +1273,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
1298 u32 val = 0; 1273 u32 val = 0;
1299 1274
1300 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n"); 1275 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
1301 /* 1276 /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
1302 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
1303 * as strict. Bits 0,1,2 - debug and management entries, 1277 * as strict. Bits 0,1,2 - debug and management entries,
1304 * 3 - COS0 entry, 4 - COS1 entry. 1278 * 3 - COS0 entry, 4 - COS1 entry.
1305 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT 1279 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
@@ -1307,8 +1281,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
1307 * MCP and debug are strict 1281 * MCP and debug are strict
1308 */ 1282 */
1309 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); 1283 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
1310 /* 1284 /* For strict priority entries defines the number of consecutive slots
1311 * For strict priority entries defines the number of consecutive slots
1312 * for the highest priority. 1285 * for the highest priority.
1313 */ 1286 */
1314 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); 1287 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
@@ -1320,8 +1293,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
1320 /* Defines the number of consecutive slots for the strict priority */ 1293 /* Defines the number of consecutive slots for the strict priority */
1321 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); 1294 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
1322 1295
1323 /* 1296 /* Mapping between entry priority to client number (0,1,2 -debug and
1324 * mapping between entry priority to client number (0,1,2 -debug and
1325 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) 1297 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
1326 * 3bits client num. 1298 * 3bits client num.
1327 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 1299 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
@@ -1356,15 +1328,12 @@ static void bnx2x_update_pfc_xmac(struct link_params *params,
1356 if (!(params->feature_config_flags & 1328 if (!(params->feature_config_flags &
1357 FEATURE_CONFIG_PFC_ENABLED)) { 1329 FEATURE_CONFIG_PFC_ENABLED)) {
1358 1330
1359 /* 1331 /* RX flow control - Process pause frame in receive direction
1360 * RX flow control - Process pause frame in receive direction
1361 */ 1332 */
1362 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) 1333 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
1363 pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN; 1334 pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
1364 1335
1365 /* 1336 /* TX flow control - Send pause packet when buffer is full */
1366 * TX flow control - Send pause packet when buffer is full
1367 */
1368 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) 1337 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
1369 pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN; 1338 pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
1370 } else {/* PFC support */ 1339 } else {/* PFC support */
@@ -1457,8 +1426,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
1457static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port) 1426static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
1458{ 1427{
1459 u32 mode, emac_base; 1428 u32 mode, emac_base;
1460 /** 1429 /* Set clause 45 mode, slow down the MDIO clock to 2.5MHz
1461 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
1462 * (a value of 49==0x31) and make sure that the AUTO poll is off 1430 * (a value of 49==0x31) and make sure that the AUTO poll is off
1463 */ 1431 */
1464 1432
@@ -1578,15 +1546,6 @@ static void bnx2x_umac_enable(struct link_params *params,
1578 1546
1579 DP(NETIF_MSG_LINK, "enabling UMAC\n"); 1547 DP(NETIF_MSG_LINK, "enabling UMAC\n");
1580 1548
1581 /**
1582 * This register determines on which events the MAC will assert
1583 * error on the i/f to the NIG along w/ EOP.
1584 */
1585
1586 /**
1587 * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK +
1588 * params->port*0x14, 0xfffff.
1589 */
1590 /* This register opens the gate for the UMAC despite its name */ 1549 /* This register opens the gate for the UMAC despite its name */
1591 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); 1550 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
1592 1551
@@ -1649,8 +1608,7 @@ static void bnx2x_umac_enable(struct link_params *params,
1649 val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA; 1608 val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA;
1650 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); 1609 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
1651 1610
1652 /* 1611 /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
1653 * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
1654 * length used by the MAC receive logic to check frames. 1612 * length used by the MAC receive logic to check frames.
1655 */ 1613 */
1656 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); 1614 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
@@ -1666,8 +1624,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
1666 struct bnx2x *bp = params->bp; 1624 struct bnx2x *bp = params->bp;
1667 u32 is_port4mode = bnx2x_is_4_port_mode(bp); 1625 u32 is_port4mode = bnx2x_is_4_port_mode(bp);
1668 1626
1669 /* 1627 /* In 4-port mode, need to set the mode only once, so if XMAC is
1670 * In 4-port mode, need to set the mode only once, so if XMAC is
1671 * already out of reset, it means the mode has already been set, 1628 * already out of reset, it means the mode has already been set,
1672 * and it must not* reset the XMAC again, since it controls both 1629 * and it must not* reset the XMAC again, since it controls both
1673 * ports of the path 1630 * ports of the path
@@ -1691,13 +1648,13 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
1691 if (is_port4mode) { 1648 if (is_port4mode) {
1692 DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n"); 1649 DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n");
1693 1650
1694 /* Set the number of ports on the system side to up to 2 */ 1651 /* Set the number of ports on the system side to up to 2 */
1695 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1); 1652 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1);
1696 1653
1697 /* Set the number of ports on the Warp Core to 10G */ 1654 /* Set the number of ports on the Warp Core to 10G */
1698 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); 1655 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
1699 } else { 1656 } else {
1700 /* Set the number of ports on the system side to 1 */ 1657 /* Set the number of ports on the system side to 1 */
1701 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0); 1658 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0);
1702 if (max_speed == SPEED_10000) { 1659 if (max_speed == SPEED_10000) {
1703 DP(NETIF_MSG_LINK, 1660 DP(NETIF_MSG_LINK,
@@ -1729,8 +1686,7 @@ static void bnx2x_xmac_disable(struct link_params *params)
1729 1686
1730 if (REG_RD(bp, MISC_REG_RESET_REG_2) & 1687 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
1731 MISC_REGISTERS_RESET_REG_2_XMAC) { 1688 MISC_REGISTERS_RESET_REG_2_XMAC) {
1732 /* 1689 /* Send an indication to change the state in the NIG back to XON
1733 * Send an indication to change the state in the NIG back to XON
1734 * Clearing this bit enables the next set of this bit to get 1690 * Clearing this bit enables the next set of this bit to get
1735 * rising edge 1691 * rising edge
1736 */ 1692 */
@@ -1755,13 +1711,11 @@ static int bnx2x_xmac_enable(struct link_params *params,
1755 1711
1756 bnx2x_xmac_init(params, vars->line_speed); 1712 bnx2x_xmac_init(params, vars->line_speed);
1757 1713
1758 /* 1714 /* This register determines on which events the MAC will assert
1759 * This register determines on which events the MAC will assert
1760 * error on the i/f to the NIG along w/ EOP. 1715 * error on the i/f to the NIG along w/ EOP.
1761 */ 1716 */
1762 1717
1763 /* 1718 /* This register tells the NIG whether to send traffic to UMAC
1764 * This register tells the NIG whether to send traffic to UMAC
1765 * or XMAC 1719 * or XMAC
1766 */ 1720 */
1767 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0); 1721 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
@@ -1863,8 +1817,7 @@ static int bnx2x_emac_enable(struct link_params *params,
1863 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); 1817 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
1864 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; 1818 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
1865 1819
1866 /* 1820 /* Setting this bit causes MAC control frames (except for pause
1867 * Setting this bit causes MAC control frames (except for pause
1868 * frames) to be passed on for processing. This setting has no 1821 * frames) to be passed on for processing. This setting has no
1869 * affect on the operation of the pause frames. This bit effects 1822 * affect on the operation of the pause frames. This bit effects
1870 * all packets regardless of RX Parser packet sorting logic. 1823 * all packets regardless of RX Parser packet sorting logic.
@@ -1963,8 +1916,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
1963 struct link_vars *vars, 1916 struct link_vars *vars,
1964 u8 is_lb) 1917 u8 is_lb)
1965{ 1918{
1966 /* 1919 /* Set rx control: Strip CRC and enable BigMAC to relay
1967 * Set rx control: Strip CRC and enable BigMAC to relay
1968 * control packets to the system as well 1920 * control packets to the system as well
1969 */ 1921 */
1970 u32 wb_data[2]; 1922 u32 wb_data[2];
@@ -2016,8 +1968,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
2016 1968
2017 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); 1969 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
2018 1970
2019 /* 1971 /* Set Time (based unit is 512 bit time) between automatic
2020 * Set Time (based unit is 512 bit time) between automatic
2021 * re-sending of PP packets amd enable automatic re-send of 1972 * re-sending of PP packets amd enable automatic re-send of
2022 * Per-Priroity Packet as long as pp_gen is asserted and 1973 * Per-Priroity Packet as long as pp_gen is asserted and
2023 * pp_disable is low. 1974 * pp_disable is low.
@@ -2086,7 +2037,7 @@ static int bnx2x_pfc_brb_get_config_params(
2086 config_val->default_class1.full_xon = 0; 2037 config_val->default_class1.full_xon = 0;
2087 2038
2088 if (CHIP_IS_E2(bp)) { 2039 if (CHIP_IS_E2(bp)) {
2089 /* class0 defaults */ 2040 /* Class0 defaults */
2090 config_val->default_class0.pause_xoff = 2041 config_val->default_class0.pause_xoff =
2091 DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR; 2042 DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
2092 config_val->default_class0.pause_xon = 2043 config_val->default_class0.pause_xon =
@@ -2095,7 +2046,7 @@ static int bnx2x_pfc_brb_get_config_params(
2095 DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR; 2046 DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
2096 config_val->default_class0.full_xon = 2047 config_val->default_class0.full_xon =
2097 DEFAULT0_E2_BRB_MAC_FULL_XON_THR; 2048 DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
2098 /* pause able*/ 2049 /* Pause able*/
2099 config_val->pauseable_th.pause_xoff = 2050 config_val->pauseable_th.pause_xoff =
2100 PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE; 2051 PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2101 config_val->pauseable_th.pause_xon = 2052 config_val->pauseable_th.pause_xon =
@@ -2114,7 +2065,7 @@ static int bnx2x_pfc_brb_get_config_params(
2114 config_val->non_pauseable_th.full_xon = 2065 config_val->non_pauseable_th.full_xon =
2115 PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE; 2066 PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2116 } else if (CHIP_IS_E3A0(bp)) { 2067 } else if (CHIP_IS_E3A0(bp)) {
2117 /* class0 defaults */ 2068 /* Class0 defaults */
2118 config_val->default_class0.pause_xoff = 2069 config_val->default_class0.pause_xoff =
2119 DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR; 2070 DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
2120 config_val->default_class0.pause_xon = 2071 config_val->default_class0.pause_xon =
@@ -2123,7 +2074,7 @@ static int bnx2x_pfc_brb_get_config_params(
2123 DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR; 2074 DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
2124 config_val->default_class0.full_xon = 2075 config_val->default_class0.full_xon =
2125 DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR; 2076 DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
2126 /* pause able */ 2077 /* Pause able */
2127 config_val->pauseable_th.pause_xoff = 2078 config_val->pauseable_th.pause_xoff =
2128 PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE; 2079 PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2129 config_val->pauseable_th.pause_xon = 2080 config_val->pauseable_th.pause_xon =
@@ -2142,7 +2093,7 @@ static int bnx2x_pfc_brb_get_config_params(
2142 config_val->non_pauseable_th.full_xon = 2093 config_val->non_pauseable_th.full_xon =
2143 PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE; 2094 PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2144 } else if (CHIP_IS_E3B0(bp)) { 2095 } else if (CHIP_IS_E3B0(bp)) {
2145 /* class0 defaults */ 2096 /* Class0 defaults */
2146 config_val->default_class0.pause_xoff = 2097 config_val->default_class0.pause_xoff =
2147 DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR; 2098 DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
2148 config_val->default_class0.pause_xon = 2099 config_val->default_class0.pause_xon =
@@ -2305,27 +2256,23 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
2305 reg_th_config = &config_val.non_pauseable_th; 2256 reg_th_config = &config_val.non_pauseable_th;
2306 } else 2257 } else
2307 reg_th_config = &config_val.default_class0; 2258 reg_th_config = &config_val.default_class0;
2308 /* 2259 /* The number of free blocks below which the pause signal to class 0
2309 * The number of free blocks below which the pause signal to class 0
2310 * of MAC #n is asserted. n=0,1 2260 * of MAC #n is asserted. n=0,1
2311 */ 2261 */
2312 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 : 2262 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
2313 BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , 2263 BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
2314 reg_th_config->pause_xoff); 2264 reg_th_config->pause_xoff);
2315 /* 2265 /* The number of free blocks above which the pause signal to class 0
2316 * The number of free blocks above which the pause signal to class 0
2317 * of MAC #n is de-asserted. n=0,1 2266 * of MAC #n is de-asserted. n=0,1
2318 */ 2267 */
2319 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 : 2268 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
2320 BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon); 2269 BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
2321 /* 2270 /* The number of free blocks below which the full signal to class 0
2322 * The number of free blocks below which the full signal to class 0
2323 * of MAC #n is asserted. n=0,1 2271 * of MAC #n is asserted. n=0,1
2324 */ 2272 */
2325 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 : 2273 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
2326 BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff); 2274 BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
2327 /* 2275 /* The number of free blocks above which the full signal to class 0
2328 * The number of free blocks above which the full signal to class 0
2329 * of MAC #n is de-asserted. n=0,1 2276 * of MAC #n is de-asserted. n=0,1
2330 */ 2277 */
2331 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 : 2278 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
@@ -2339,30 +2286,26 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
2339 reg_th_config = &config_val.non_pauseable_th; 2286 reg_th_config = &config_val.non_pauseable_th;
2340 } else 2287 } else
2341 reg_th_config = &config_val.default_class1; 2288 reg_th_config = &config_val.default_class1;
2342 /* 2289 /* The number of free blocks below which the pause signal to
2343 * The number of free blocks below which the pause signal to
2344 * class 1 of MAC #n is asserted. n=0,1 2290 * class 1 of MAC #n is asserted. n=0,1
2345 */ 2291 */
2346 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 : 2292 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
2347 BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, 2293 BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
2348 reg_th_config->pause_xoff); 2294 reg_th_config->pause_xoff);
2349 2295
2350 /* 2296 /* The number of free blocks above which the pause signal to
2351 * The number of free blocks above which the pause signal to
2352 * class 1 of MAC #n is de-asserted. n=0,1 2297 * class 1 of MAC #n is de-asserted. n=0,1
2353 */ 2298 */
2354 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 : 2299 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
2355 BRB1_REG_PAUSE_1_XON_THRESHOLD_0, 2300 BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
2356 reg_th_config->pause_xon); 2301 reg_th_config->pause_xon);
2357 /* 2302 /* The number of free blocks below which the full signal to
2358 * The number of free blocks below which the full signal to
2359 * class 1 of MAC #n is asserted. n=0,1 2303 * class 1 of MAC #n is asserted. n=0,1
2360 */ 2304 */
2361 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 : 2305 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
2362 BRB1_REG_FULL_1_XOFF_THRESHOLD_0, 2306 BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
2363 reg_th_config->full_xoff); 2307 reg_th_config->full_xoff);
2364 /* 2308 /* The number of free blocks above which the full signal to
2365 * The number of free blocks above which the full signal to
2366 * class 1 of MAC #n is de-asserted. n=0,1 2309 * class 1 of MAC #n is de-asserted. n=0,1
2367 */ 2310 */
2368 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 : 2311 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
@@ -2379,49 +2322,41 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
2379 REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE, 2322 REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
2380 e3b0_val.per_class_guaranty_mode); 2323 e3b0_val.per_class_guaranty_mode);
2381 2324
2382 /* 2325 /* The hysteresis on the guarantied buffer space for the Lb
2383 * The hysteresis on the guarantied buffer space for the Lb
2384 * port before signaling XON. 2326 * port before signaling XON.
2385 */ 2327 */
2386 REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 2328 REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
2387 e3b0_val.lb_guarantied_hyst); 2329 e3b0_val.lb_guarantied_hyst);
2388 2330
2389 /* 2331 /* The number of free blocks below which the full signal to the
2390 * The number of free blocks below which the full signal to the
2391 * LB port is asserted. 2332 * LB port is asserted.
2392 */ 2333 */
2393 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 2334 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
2394 e3b0_val.full_lb_xoff_th); 2335 e3b0_val.full_lb_xoff_th);
2395 /* 2336 /* The number of free blocks above which the full signal to the
2396 * The number of free blocks above which the full signal to the
2397 * LB port is de-asserted. 2337 * LB port is de-asserted.
2398 */ 2338 */
2399 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 2339 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
2400 e3b0_val.full_lb_xon_threshold); 2340 e3b0_val.full_lb_xon_threshold);
2401 /* 2341 /* The number of blocks guarantied for the MAC #n port. n=0,1
2402 * The number of blocks guarantied for the MAC #n port. n=0,1
2403 */ 2342 */
2404 2343
2405 /* The number of blocks guarantied for the LB port.*/ 2344 /* The number of blocks guarantied for the LB port. */
2406 REG_WR(bp, BRB1_REG_LB_GUARANTIED, 2345 REG_WR(bp, BRB1_REG_LB_GUARANTIED,
2407 e3b0_val.lb_guarantied); 2346 e3b0_val.lb_guarantied);
2408 2347
2409 /* 2348 /* The number of blocks guarantied for the MAC #n port. */
2410 * The number of blocks guarantied for the MAC #n port.
2411 */
2412 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0, 2349 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
2413 2 * e3b0_val.mac_0_class_t_guarantied); 2350 2 * e3b0_val.mac_0_class_t_guarantied);
2414 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1, 2351 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
2415 2 * e3b0_val.mac_1_class_t_guarantied); 2352 2 * e3b0_val.mac_1_class_t_guarantied);
2416 /* 2353 /* The number of blocks guarantied for class #t in MAC0. t=0,1
2417 * The number of blocks guarantied for class #t in MAC0. t=0,1
2418 */ 2354 */
2419 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED, 2355 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
2420 e3b0_val.mac_0_class_t_guarantied); 2356 e3b0_val.mac_0_class_t_guarantied);
2421 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED, 2357 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
2422 e3b0_val.mac_0_class_t_guarantied); 2358 e3b0_val.mac_0_class_t_guarantied);
2423 /* 2359 /* The hysteresis on the guarantied buffer space for class in
2424 * The hysteresis on the guarantied buffer space for class in
2425 * MAC0. t=0,1 2360 * MAC0. t=0,1
2426 */ 2361 */
2427 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST, 2362 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
@@ -2429,15 +2364,13 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
2429 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST, 2364 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
2430 e3b0_val.mac_0_class_t_guarantied_hyst); 2365 e3b0_val.mac_0_class_t_guarantied_hyst);
2431 2366
2432 /* 2367 /* The number of blocks guarantied for class #t in MAC1.t=0,1
2433 * The number of blocks guarantied for class #t in MAC1.t=0,1
2434 */ 2368 */
2435 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED, 2369 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
2436 e3b0_val.mac_1_class_t_guarantied); 2370 e3b0_val.mac_1_class_t_guarantied);
2437 REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED, 2371 REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
2438 e3b0_val.mac_1_class_t_guarantied); 2372 e3b0_val.mac_1_class_t_guarantied);
2439 /* 2373 /* The hysteresis on the guarantied buffer space for class #t
2440 * The hysteresis on the guarantied buffer space for class #t
2441 * in MAC1. t=0,1 2374 * in MAC1. t=0,1
2442 */ 2375 */
2443 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST, 2376 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
@@ -2520,15 +2453,13 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
2520 FEATURE_CONFIG_PFC_ENABLED; 2453 FEATURE_CONFIG_PFC_ENABLED;
2521 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); 2454 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
2522 2455
2523 /* 2456 /* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
2524 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
2525 * MAC control frames (that are not pause packets) 2457 * MAC control frames (that are not pause packets)
2526 * will be forwarded to the XCM. 2458 * will be forwarded to the XCM.
2527 */ 2459 */
2528 xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK : 2460 xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK :
2529 NIG_REG_LLH0_XCM_MASK); 2461 NIG_REG_LLH0_XCM_MASK);
2530 /* 2462 /* NIG params will override non PFC params, since it's possible to
2531 * nig params will override non PFC params, since it's possible to
2532 * do transition from PFC to SAFC 2463 * do transition from PFC to SAFC
2533 */ 2464 */
2534 if (set_pfc) { 2465 if (set_pfc) {
@@ -2548,7 +2479,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
2548 llfc_out_en = nig_params->llfc_out_en; 2479 llfc_out_en = nig_params->llfc_out_en;
2549 llfc_enable = nig_params->llfc_enable; 2480 llfc_enable = nig_params->llfc_enable;
2550 pause_enable = nig_params->pause_enable; 2481 pause_enable = nig_params->pause_enable;
2551 } else /*defaul non PFC mode - PAUSE */ 2482 } else /* Default non PFC mode - PAUSE */
2552 pause_enable = 1; 2483 pause_enable = 1;
2553 2484
2554 xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : 2485 xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
@@ -2608,8 +2539,7 @@ int bnx2x_update_pfc(struct link_params *params,
2608 struct link_vars *vars, 2539 struct link_vars *vars,
2609 struct bnx2x_nig_brb_pfc_port_params *pfc_params) 2540 struct bnx2x_nig_brb_pfc_port_params *pfc_params)
2610{ 2541{
2611 /* 2542 /* The PFC and pause are orthogonal to one another, meaning when
2612 * The PFC and pause are orthogonal to one another, meaning when
2613 * PFC is enabled, the pause are disabled, and when PFC is 2543 * PFC is enabled, the pause are disabled, and when PFC is
2614 * disabled, pause are set according to the pause result. 2544 * disabled, pause are set according to the pause result.
2615 */ 2545 */
@@ -3148,7 +3078,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
3148 EMAC_MDIO_STATUS_10MB); 3078 EMAC_MDIO_STATUS_10MB);
3149 3079
3150 /* address */ 3080 /* address */
3151
3152 tmp = ((phy->addr << 21) | (devad << 16) | reg | 3081 tmp = ((phy->addr << 21) | (devad << 16) | reg |
3153 EMAC_MDIO_COMM_COMMAND_ADDRESS | 3082 EMAC_MDIO_COMM_COMMAND_ADDRESS |
3154 EMAC_MDIO_COMM_START_BUSY); 3083 EMAC_MDIO_COMM_START_BUSY);
@@ -3337,8 +3266,7 @@ int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
3337 u8 devad, u16 reg, u16 *ret_val) 3266 u8 devad, u16 reg, u16 *ret_val)
3338{ 3267{
3339 u8 phy_index; 3268 u8 phy_index;
3340 /* 3269 /* Probe for the phy according to the given phy_addr, and execute
3341 * Probe for the phy according to the given phy_addr, and execute
3342 * the read request on it 3270 * the read request on it
3343 */ 3271 */
3344 for (phy_index = 0; phy_index < params->num_phys; phy_index++) { 3272 for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
@@ -3355,8 +3283,7 @@ int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
3355 u8 devad, u16 reg, u16 val) 3283 u8 devad, u16 reg, u16 val)
3356{ 3284{
3357 u8 phy_index; 3285 u8 phy_index;
3358 /* 3286 /* Probe for the phy according to the given phy_addr, and execute
3359 * Probe for the phy according to the given phy_addr, and execute
3360 * the write request on it 3287 * the write request on it
3361 */ 3288 */
3362 for (phy_index = 0; phy_index < params->num_phys; phy_index++) { 3289 for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
@@ -3382,7 +3309,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
3382 if (bnx2x_is_4_port_mode(bp)) { 3309 if (bnx2x_is_4_port_mode(bp)) {
3383 u32 port_swap, port_swap_ovr; 3310 u32 port_swap, port_swap_ovr;
3384 3311
3385 /*figure out path swap value */ 3312 /* Figure out path swap value */
3386 path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR); 3313 path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
3387 if (path_swap_ovr & 0x1) 3314 if (path_swap_ovr & 0x1)
3388 path_swap = (path_swap_ovr & 0x2); 3315 path_swap = (path_swap_ovr & 0x2);
@@ -3392,7 +3319,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
3392 if (path_swap) 3319 if (path_swap)
3393 path = path ^ 1; 3320 path = path ^ 1;
3394 3321
3395 /*figure out port swap value */ 3322 /* Figure out port swap value */
3396 port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR); 3323 port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
3397 if (port_swap_ovr & 0x1) 3324 if (port_swap_ovr & 0x1)
3398 port_swap = (port_swap_ovr & 0x2); 3325 port_swap = (port_swap_ovr & 0x2);
@@ -3405,7 +3332,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
3405 lane = (port<<1) + path; 3332 lane = (port<<1) + path;
3406 } else { /* two port mode - no port swap */ 3333 } else { /* two port mode - no port swap */
3407 3334
3408 /*figure out path swap value */ 3335 /* Figure out path swap value */
3409 path_swap_ovr = 3336 path_swap_ovr =
3410 REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR); 3337 REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
3411 if (path_swap_ovr & 0x1) { 3338 if (path_swap_ovr & 0x1) {
@@ -3437,8 +3364,7 @@ static void bnx2x_set_aer_mmd(struct link_params *params,
3437 3364
3438 if (USES_WARPCORE(bp)) { 3365 if (USES_WARPCORE(bp)) {
3439 aer_val = bnx2x_get_warpcore_lane(phy, params); 3366 aer_val = bnx2x_get_warpcore_lane(phy, params);
3440 /* 3367 /* In Dual-lane mode, two lanes are joined together,
3441 * In Dual-lane mode, two lanes are joined together,
3442 * so in order to configure them, the AER broadcast method is 3368 * so in order to configure them, the AER broadcast method is
3443 * used here. 3369 * used here.
3444 * 0x200 is the broadcast address for lanes 0,1 3370 * 0x200 is the broadcast address for lanes 0,1
@@ -3518,8 +3444,7 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
3518{ 3444{
3519 struct bnx2x *bp = params->bp; 3445 struct bnx2x *bp = params->bp;
3520 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; 3446 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
3521 /** 3447 /* Resolve pause mode and advertisement Please refer to Table
3522 * resolve pause mode and advertisement Please refer to Table
3523 * 28B-3 of the 802.3ab-1999 spec 3448 * 28B-3 of the 802.3ab-1999 spec
3524 */ 3449 */
3525 3450
@@ -3642,6 +3567,7 @@ static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
3642 vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE; 3567 vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
3643 if (pause_result & (1<<1)) 3568 if (pause_result & (1<<1))
3644 vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE; 3569 vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
3570
3645} 3571}
3646 3572
3647static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy, 3573static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
@@ -3698,6 +3624,7 @@ static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
3698 bnx2x_pause_resolve(vars, pause_result); 3624 bnx2x_pause_resolve(vars, pause_result);
3699 3625
3700} 3626}
3627
3701static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, 3628static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3702 struct link_params *params, 3629 struct link_params *params,
3703 struct link_vars *vars) 3630 struct link_vars *vars)
@@ -3819,9 +3746,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3819 3746
3820 /* Advertise pause */ 3747 /* Advertise pause */
3821 bnx2x_ext_phy_set_pause(params, phy, vars); 3748 bnx2x_ext_phy_set_pause(params, phy, vars);
3822 3749 /* Set KR Autoneg Work-Around flag for Warpcore version older than D108
3823 /*
3824 * Set KR Autoneg Work-Around flag for Warpcore version older than D108
3825 */ 3750 */
3826 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3751 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3827 MDIO_WC_REG_UC_INFO_B1_VERSION, &val16); 3752 MDIO_WC_REG_UC_INFO_B1_VERSION, &val16);
@@ -3829,7 +3754,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3829 DP(NETIF_MSG_LINK, "Enable AN KR work-around\n"); 3754 DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
3830 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; 3755 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3831 } 3756 }
3832
3833 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3757 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3834 MDIO_WC_REG_DIGITAL5_MISC7, &val16); 3758 MDIO_WC_REG_DIGITAL5_MISC7, &val16);
3835 3759
@@ -3903,7 +3827,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3903 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 3827 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3904 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB); 3828 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB);
3905 3829
3906 /*Enable encoded forced speed */ 3830 /* Enable encoded forced speed */
3907 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3831 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3908 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30); 3832 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
3909 3833
@@ -4265,8 +4189,7 @@ static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
4265 PORT_HW_CFG_E3_MOD_ABS_MASK) >> 4189 PORT_HW_CFG_E3_MOD_ABS_MASK) >>
4266 PORT_HW_CFG_E3_MOD_ABS_SHIFT; 4190 PORT_HW_CFG_E3_MOD_ABS_SHIFT;
4267 4191
4268 /* 4192 /* Should not happen. This function called upon interrupt
4269 * Should not happen. This function called upon interrupt
4270 * triggered by GPIO ( since EPIO can only generate interrupts 4193 * triggered by GPIO ( since EPIO can only generate interrupts
4271 * to MCP). 4194 * to MCP).
4272 * So if this function was called and none of the GPIOs was set, 4195 * So if this function was called and none of the GPIOs was set,
@@ -4366,7 +4289,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4366 "link up, rx_tx_asic_rst 0x%x\n", 4289 "link up, rx_tx_asic_rst 0x%x\n",
4367 vars->rx_tx_asic_rst); 4290 vars->rx_tx_asic_rst);
4368 } else { 4291 } else {
4369 /*reset the lane to see if link comes up.*/ 4292 /* Reset the lane to see if link comes up.*/
4370 bnx2x_warpcore_reset_lane(bp, phy, 1); 4293 bnx2x_warpcore_reset_lane(bp, phy, 1);
4371 bnx2x_warpcore_reset_lane(bp, phy, 0); 4294 bnx2x_warpcore_reset_lane(bp, phy, 0);
4372 4295
@@ -4387,7 +4310,6 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4387 } /*params->rx_tx_asic_rst*/ 4310 } /*params->rx_tx_asic_rst*/
4388 4311
4389} 4312}
4390
4391static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, 4313static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4392 struct link_params *params, 4314 struct link_params *params,
4393 struct link_vars *vars) 4315 struct link_vars *vars)
@@ -4545,7 +4467,7 @@ static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
4545 /* Update those 1-copy registers */ 4467 /* Update those 1-copy registers */
4546 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 4468 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
4547 MDIO_AER_BLOCK_AER_REG, 0); 4469 MDIO_AER_BLOCK_AER_REG, 0);
4548 /* Enable 1G MDIO (1-copy) */ 4470 /* Enable 1G MDIO (1-copy) */
4549 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4471 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4550 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, 4472 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
4551 &val16); 4473 &val16);
@@ -4624,43 +4546,43 @@ void bnx2x_sync_link(struct link_params *params,
4624 vars->duplex = DUPLEX_FULL; 4546 vars->duplex = DUPLEX_FULL;
4625 switch (vars->link_status & 4547 switch (vars->link_status &
4626 LINK_STATUS_SPEED_AND_DUPLEX_MASK) { 4548 LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
4627 case LINK_10THD: 4549 case LINK_10THD:
4628 vars->duplex = DUPLEX_HALF; 4550 vars->duplex = DUPLEX_HALF;
4629 /* fall thru */ 4551 /* Fall thru */
4630 case LINK_10TFD: 4552 case LINK_10TFD:
4631 vars->line_speed = SPEED_10; 4553 vars->line_speed = SPEED_10;
4632 break; 4554 break;
4633 4555
4634 case LINK_100TXHD: 4556 case LINK_100TXHD:
4635 vars->duplex = DUPLEX_HALF; 4557 vars->duplex = DUPLEX_HALF;
4636 /* fall thru */ 4558 /* Fall thru */
4637 case LINK_100T4: 4559 case LINK_100T4:
4638 case LINK_100TXFD: 4560 case LINK_100TXFD:
4639 vars->line_speed = SPEED_100; 4561 vars->line_speed = SPEED_100;
4640 break; 4562 break;
4641 4563
4642 case LINK_1000THD: 4564 case LINK_1000THD:
4643 vars->duplex = DUPLEX_HALF; 4565 vars->duplex = DUPLEX_HALF;
4644 /* fall thru */ 4566 /* Fall thru */
4645 case LINK_1000TFD: 4567 case LINK_1000TFD:
4646 vars->line_speed = SPEED_1000; 4568 vars->line_speed = SPEED_1000;
4647 break; 4569 break;
4648 4570
4649 case LINK_2500THD: 4571 case LINK_2500THD:
4650 vars->duplex = DUPLEX_HALF; 4572 vars->duplex = DUPLEX_HALF;
4651 /* fall thru */ 4573 /* Fall thru */
4652 case LINK_2500TFD: 4574 case LINK_2500TFD:
4653 vars->line_speed = SPEED_2500; 4575 vars->line_speed = SPEED_2500;
4654 break; 4576 break;
4655 4577
4656 case LINK_10GTFD: 4578 case LINK_10GTFD:
4657 vars->line_speed = SPEED_10000; 4579 vars->line_speed = SPEED_10000;
4658 break; 4580 break;
4659 case LINK_20GTFD: 4581 case LINK_20GTFD:
4660 vars->line_speed = SPEED_20000; 4582 vars->line_speed = SPEED_20000;
4661 break; 4583 break;
4662 default: 4584 default:
4663 break; 4585 break;
4664 } 4586 }
4665 vars->flow_ctrl = 0; 4587 vars->flow_ctrl = 0;
4666 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED) 4588 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
@@ -4835,9 +4757,8 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
4835 struct bnx2x_phy *phy) 4757 struct bnx2x_phy *phy)
4836{ 4758{
4837 struct bnx2x *bp = params->bp; 4759 struct bnx2x *bp = params->bp;
4838 /* 4760 /* Each two bits represents a lane number:
4839 * Each two bits represents a lane number: 4761 * No swap is 0123 => 0x1b no need to enable the swap
4840 * No swap is 0123 => 0x1b no need to enable the swap
4841 */ 4762 */
4842 u16 rx_lane_swap, tx_lane_swap; 4763 u16 rx_lane_swap, tx_lane_swap;
4843 4764
@@ -5051,8 +4972,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
5051 MDIO_REG_BANK_COMBO_IEEE0, 4972 MDIO_REG_BANK_COMBO_IEEE0,
5052 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 4973 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
5053 4974
5054 /* 4975 /* Program speed
5055 * program speed
5056 * - needed only if the speed is greater than 1G (2.5G or 10G) 4976 * - needed only if the speed is greater than 1G (2.5G or 10G)
5057 */ 4977 */
5058 CL22_RD_OVER_CL45(bp, phy, 4978 CL22_RD_OVER_CL45(bp, phy,
@@ -5087,8 +5007,6 @@ static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
5087 struct bnx2x *bp = params->bp; 5007 struct bnx2x *bp = params->bp;
5088 u16 val = 0; 5008 u16 val = 0;
5089 5009
5090 /* configure the 48 bits for BAM AN */
5091
5092 /* set extended capabilities */ 5010 /* set extended capabilities */
5093 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) 5011 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
5094 val |= MDIO_OVER_1G_UP1_2_5G; 5012 val |= MDIO_OVER_1G_UP1_2_5G;
@@ -5234,11 +5152,8 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
5234 } 5152 }
5235} 5153}
5236 5154
5237 5155/* Link management
5238/*
5239 * link management
5240 */ 5156 */
5241
5242static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy, 5157static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
5243 struct link_params *params) 5158 struct link_params *params)
5244{ 5159{
@@ -5383,8 +5298,7 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
5383 "ustat_val(0x8371) = 0x%x\n", ustat_val); 5298 "ustat_val(0x8371) = 0x%x\n", ustat_val);
5384 return; 5299 return;
5385 } 5300 }
5386 /* 5301 /* Step 3: Check CL37 Message Pages received to indicate LP
5387 * Step 3: Check CL37 Message Pages received to indicate LP
5388 * supports only CL37 5302 * supports only CL37
5389 */ 5303 */
5390 CL22_RD_OVER_CL45(bp, phy, 5304 CL22_RD_OVER_CL45(bp, phy,
@@ -5401,8 +5315,7 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
5401 cl37_fsm_received); 5315 cl37_fsm_received);
5402 return; 5316 return;
5403 } 5317 }
5404 /* 5318 /* The combined cl37/cl73 fsm state information indicating that
5405 * The combined cl37/cl73 fsm state information indicating that
5406 * we are connected to a device which does not support cl73, but 5319 * we are connected to a device which does not support cl73, but
5407 * does support cl37 BAM. In this case we disable cl73 and 5320 * does support cl37 BAM. In this case we disable cl73 and
5408 * restart cl37 auto-neg 5321 * restart cl37 auto-neg
@@ -5973,8 +5886,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
5973{ 5886{
5974 u32 latch_status = 0; 5887 u32 latch_status = 0;
5975 5888
5976 /* 5889 /* Disable the MI INT ( external phy int ) by writing 1 to the
5977 * Disable the MI INT ( external phy int ) by writing 1 to the
5978 * status register. Link down indication is high-active-signal, 5890 * status register. Link down indication is high-active-signal,
5979 * so in this case we need to write the status to clear the XOR 5891 * so in this case we need to write the status to clear the XOR
5980 */ 5892 */
@@ -6009,8 +5921,7 @@ static void bnx2x_link_int_ack(struct link_params *params,
6009 struct bnx2x *bp = params->bp; 5921 struct bnx2x *bp = params->bp;
6010 u8 port = params->port; 5922 u8 port = params->port;
6011 u32 mask; 5923 u32 mask;
6012 /* 5924 /* First reset all status we assume only one line will be
6013 * First reset all status we assume only one line will be
6014 * change at a time 5925 * change at a time
6015 */ 5926 */
6016 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 5927 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
@@ -6024,8 +5935,7 @@ static void bnx2x_link_int_ack(struct link_params *params,
6024 if (is_10g_plus) 5935 if (is_10g_plus)
6025 mask = NIG_STATUS_XGXS0_LINK10G; 5936 mask = NIG_STATUS_XGXS0_LINK10G;
6026 else if (params->switch_cfg == SWITCH_CFG_10G) { 5937 else if (params->switch_cfg == SWITCH_CFG_10G) {
6027 /* 5938 /* Disable the link interrupt by writing 1 to
6028 * Disable the link interrupt by writing 1 to
6029 * the relevant lane in the status register 5939 * the relevant lane in the status register
6030 */ 5940 */
6031 u32 ser_lane = 5941 u32 ser_lane =
@@ -6227,8 +6137,7 @@ int bnx2x_set_led(struct link_params *params,
6227 break; 6137 break;
6228 6138
6229 case LED_MODE_OPER: 6139 case LED_MODE_OPER:
6230 /* 6140 /* For all other phys, OPER mode is same as ON, so in case
6231 * For all other phys, OPER mode is same as ON, so in case
6232 * link is down, do nothing 6141 * link is down, do nothing
6233 */ 6142 */
6234 if (!vars->link_up) 6143 if (!vars->link_up)
@@ -6239,9 +6148,7 @@ int bnx2x_set_led(struct link_params *params,
6239 (params->phy[EXT_PHY1].type == 6148 (params->phy[EXT_PHY1].type ==
6240 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) && 6149 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) &&
6241 CHIP_IS_E2(bp) && params->num_phys == 2) { 6150 CHIP_IS_E2(bp) && params->num_phys == 2) {
6242 /* 6151 /* This is a work-around for E2+8727 Configurations */
6243 * This is a work-around for E2+8727 Configurations
6244 */
6245 if (mode == LED_MODE_ON || 6152 if (mode == LED_MODE_ON ||
6246 speed == SPEED_10000){ 6153 speed == SPEED_10000){
6247 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); 6154 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
@@ -6250,8 +6157,7 @@ int bnx2x_set_led(struct link_params *params,
6250 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 6157 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
6251 EMAC_WR(bp, EMAC_REG_EMAC_LED, 6158 EMAC_WR(bp, EMAC_REG_EMAC_LED,
6252 (tmp | EMAC_LED_OVERRIDE)); 6159 (tmp | EMAC_LED_OVERRIDE));
6253 /* 6160 /* Return here without enabling traffic
6254 * return here without enabling traffic
6255 * LED blink and setting rate in ON mode. 6161 * LED blink and setting rate in ON mode.
6256 * In oper mode, enabling LED blink 6162 * In oper mode, enabling LED blink
6257 * and setting rate is needed. 6163 * and setting rate is needed.
@@ -6260,8 +6166,7 @@ int bnx2x_set_led(struct link_params *params,
6260 return rc; 6166 return rc;
6261 } 6167 }
6262 } else if (SINGLE_MEDIA_DIRECT(params)) { 6168 } else if (SINGLE_MEDIA_DIRECT(params)) {
6263 /* 6169 /* This is a work-around for HW issue found when link
6264 * This is a work-around for HW issue found when link
6265 * is up in CL73 6170 * is up in CL73
6266 */ 6171 */
6267 if ((!CHIP_IS_E3(bp)) || 6172 if ((!CHIP_IS_E3(bp)) ||
@@ -6310,10 +6215,7 @@ int bnx2x_set_led(struct link_params *params,
6310 (speed == SPEED_1000) || 6215 (speed == SPEED_1000) ||
6311 (speed == SPEED_100) || 6216 (speed == SPEED_100) ||
6312 (speed == SPEED_10))) { 6217 (speed == SPEED_10))) {
6313 /* 6218 /* For speeds less than 10G LED scheme is different */
6314 * On Everest 1 Ax chip versions for speeds less than
6315 * 10G LED scheme is different
6316 */
6317 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 6219 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
6318 + port*4, 1); 6220 + port*4, 1);
6319 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + 6221 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
@@ -6333,8 +6235,7 @@ int bnx2x_set_led(struct link_params *params,
6333 6235
6334} 6236}
6335 6237
6336/* 6238/* This function comes to reflect the actual link state read DIRECTLY from the
6337 * This function comes to reflect the actual link state read DIRECTLY from the
6338 * HW 6239 * HW
6339 */ 6240 */
6340int bnx2x_test_link(struct link_params *params, struct link_vars *vars, 6241int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
@@ -6422,16 +6323,14 @@ static int bnx2x_link_initialize(struct link_params *params,
6422 int rc = 0; 6323 int rc = 0;
6423 u8 phy_index, non_ext_phy; 6324 u8 phy_index, non_ext_phy;
6424 struct bnx2x *bp = params->bp; 6325 struct bnx2x *bp = params->bp;
6425 /* 6326 /* In case of external phy existence, the line speed would be the
6426 * In case of external phy existence, the line speed would be the
6427 * line speed linked up by the external phy. In case it is direct 6327 * line speed linked up by the external phy. In case it is direct
6428 * only, then the line_speed during initialization will be 6328 * only, then the line_speed during initialization will be
6429 * equal to the req_line_speed 6329 * equal to the req_line_speed
6430 */ 6330 */
6431 vars->line_speed = params->phy[INT_PHY].req_line_speed; 6331 vars->line_speed = params->phy[INT_PHY].req_line_speed;
6432 6332
6433 /* 6333 /* Initialize the internal phy in case this is a direct board
6434 * Initialize the internal phy in case this is a direct board
6435 * (no external phys), or this board has external phy which requires 6334 * (no external phys), or this board has external phy which requires
6436 * to first. 6335 * to first.
6437 */ 6336 */
@@ -6463,8 +6362,7 @@ static int bnx2x_link_initialize(struct link_params *params,
6463 } else { 6362 } else {
6464 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 6363 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
6465 phy_index++) { 6364 phy_index++) {
6466 /* 6365 /* No need to initialize second phy in case of first
6467 * No need to initialize second phy in case of first
6468 * phy only selection. In case of second phy, we do 6366 * phy only selection. In case of second phy, we do
6469 * need to initialize the first phy, since they are 6367 * need to initialize the first phy, since they are
6470 * connected. 6368 * connected.
@@ -6492,7 +6390,6 @@ static int bnx2x_link_initialize(struct link_params *params,
6492 NIG_STATUS_XGXS0_LINK_STATUS | 6390 NIG_STATUS_XGXS0_LINK_STATUS |
6493 NIG_STATUS_SERDES0_LINK_STATUS | 6391 NIG_STATUS_SERDES0_LINK_STATUS |
6494 NIG_MASK_MI_INT)); 6392 NIG_MASK_MI_INT));
6495 bnx2x_update_mng(params, vars->link_status);
6496 return rc; 6393 return rc;
6497} 6394}
6498 6395
@@ -6577,7 +6474,7 @@ static int bnx2x_update_link_up(struct link_params *params,
6577 u8 link_10g) 6474 u8 link_10g)
6578{ 6475{
6579 struct bnx2x *bp = params->bp; 6476 struct bnx2x *bp = params->bp;
6580 u8 port = params->port; 6477 u8 phy_idx, port = params->port;
6581 int rc = 0; 6478 int rc = 0;
6582 6479
6583 vars->link_status |= (LINK_STATUS_LINK_UP | 6480 vars->link_status |= (LINK_STATUS_LINK_UP |
@@ -6641,11 +6538,18 @@ static int bnx2x_update_link_up(struct link_params *params,
6641 6538
6642 /* update shared memory */ 6539 /* update shared memory */
6643 bnx2x_update_mng(params, vars->link_status); 6540 bnx2x_update_mng(params, vars->link_status);
6541
6542 /* Check remote fault */
6543 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
6544 if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
6545 bnx2x_check_half_open_conn(params, vars, 0);
6546 break;
6547 }
6548 }
6644 msleep(20); 6549 msleep(20);
6645 return rc; 6550 return rc;
6646} 6551}
6647/* 6552/* The bnx2x_link_update function should be called upon link
6648 * The bnx2x_link_update function should be called upon link
6649 * interrupt. 6553 * interrupt.
6650 * Link is considered up as follows: 6554 * Link is considered up as follows:
6651 * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs 6555 * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
@@ -6702,8 +6606,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6702 if (!CHIP_IS_E3(bp)) 6606 if (!CHIP_IS_E3(bp))
6703 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 6607 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6704 6608
6705 /* 6609 /* Step 1:
6706 * Step 1:
6707 * Check external link change only for external phys, and apply 6610 * Check external link change only for external phys, and apply
6708 * priority selection between them in case the link on both phys 6611 * priority selection between them in case the link on both phys
6709 * is up. Note that instead of the common vars, a temporary 6612 * is up. Note that instead of the common vars, a temporary
@@ -6734,23 +6637,20 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6734 switch (bnx2x_phy_selection(params)) { 6637 switch (bnx2x_phy_selection(params)) {
6735 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 6638 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
6736 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 6639 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
6737 /* 6640 /* In this option, the first PHY makes sure to pass the
6738 * In this option, the first PHY makes sure to pass the
6739 * traffic through itself only. 6641 * traffic through itself only.
6740 * Its not clear how to reset the link on the second phy 6642 * Its not clear how to reset the link on the second phy
6741 */ 6643 */
6742 active_external_phy = EXT_PHY1; 6644 active_external_phy = EXT_PHY1;
6743 break; 6645 break;
6744 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 6646 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
6745 /* 6647 /* In this option, the first PHY makes sure to pass the
6746 * In this option, the first PHY makes sure to pass the
6747 * traffic through the second PHY. 6648 * traffic through the second PHY.
6748 */ 6649 */
6749 active_external_phy = EXT_PHY2; 6650 active_external_phy = EXT_PHY2;
6750 break; 6651 break;
6751 default: 6652 default:
6752 /* 6653 /* Link indication on both PHYs with the following cases
6753 * Link indication on both PHYs with the following cases
6754 * is invalid: 6654 * is invalid:
6755 * - FIRST_PHY means that second phy wasn't initialized, 6655 * - FIRST_PHY means that second phy wasn't initialized,
6756 * hence its link is expected to be down 6656 * hence its link is expected to be down
@@ -6767,8 +6667,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6767 } 6667 }
6768 } 6668 }
6769 prev_line_speed = vars->line_speed; 6669 prev_line_speed = vars->line_speed;
6770 /* 6670 /* Step 2:
6771 * Step 2:
6772 * Read the status of the internal phy. In case of 6671 * Read the status of the internal phy. In case of
6773 * DIRECT_SINGLE_MEDIA board, this link is the external link, 6672 * DIRECT_SINGLE_MEDIA board, this link is the external link,
6774 * otherwise this is the link between the 577xx and the first 6673 * otherwise this is the link between the 577xx and the first
@@ -6778,8 +6677,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6778 params->phy[INT_PHY].read_status( 6677 params->phy[INT_PHY].read_status(
6779 &params->phy[INT_PHY], 6678 &params->phy[INT_PHY],
6780 params, vars); 6679 params, vars);
6781 /* 6680 /* The INT_PHY flow control reside in the vars. This include the
6782 * The INT_PHY flow control reside in the vars. This include the
6783 * case where the speed or flow control are not set to AUTO. 6681 * case where the speed or flow control are not set to AUTO.
6784 * Otherwise, the active external phy flow control result is set 6682 * Otherwise, the active external phy flow control result is set
6785 * to the vars. The ext_phy_line_speed is needed to check if the 6683 * to the vars. The ext_phy_line_speed is needed to check if the
@@ -6788,14 +6686,12 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6788 */ 6686 */
6789 if (active_external_phy > INT_PHY) { 6687 if (active_external_phy > INT_PHY) {
6790 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl; 6688 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
6791 /* 6689 /* Link speed is taken from the XGXS. AN and FC result from
6792 * Link speed is taken from the XGXS. AN and FC result from
6793 * the external phy. 6690 * the external phy.
6794 */ 6691 */
6795 vars->link_status |= phy_vars[active_external_phy].link_status; 6692 vars->link_status |= phy_vars[active_external_phy].link_status;
6796 6693
6797 /* 6694 /* if active_external_phy is first PHY and link is up - disable
6798 * if active_external_phy is first PHY and link is up - disable
6799 * disable TX on second external PHY 6695 * disable TX on second external PHY
6800 */ 6696 */
6801 if (active_external_phy == EXT_PHY1) { 6697 if (active_external_phy == EXT_PHY1) {
@@ -6832,8 +6728,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6832 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x," 6728 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
6833 " ext_phy_line_speed = %d\n", vars->flow_ctrl, 6729 " ext_phy_line_speed = %d\n", vars->flow_ctrl,
6834 vars->link_status, ext_phy_line_speed); 6730 vars->link_status, ext_phy_line_speed);
6835 /* 6731 /* Upon link speed change set the NIG into drain mode. Comes to
6836 * Upon link speed change set the NIG into drain mode. Comes to
6837 * deals with possible FIFO glitch due to clk change when speed 6732 * deals with possible FIFO glitch due to clk change when speed
6838 * is decreased without link down indicator 6733 * is decreased without link down indicator
6839 */ 6734 */
@@ -6858,8 +6753,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6858 6753
6859 bnx2x_link_int_ack(params, vars, link_10g_plus); 6754 bnx2x_link_int_ack(params, vars, link_10g_plus);
6860 6755
6861 /* 6756 /* In case external phy link is up, and internal link is down
6862 * In case external phy link is up, and internal link is down
6863 * (not initialized yet probably after link initialization, it 6757 * (not initialized yet probably after link initialization, it
6864 * needs to be initialized. 6758 * needs to be initialized.
6865 * Note that after link down-up as result of cable plug, the xgxs 6759 * Note that after link down-up as result of cable plug, the xgxs
@@ -6887,8 +6781,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6887 vars); 6781 vars);
6888 } 6782 }
6889 } 6783 }
6890 /* 6784 /* Link is up only if both local phy and external phy (in case of
6891 * Link is up only if both local phy and external phy (in case of
6892 * non-direct board) are up and no fault detected on active PHY. 6785 * non-direct board) are up and no fault detected on active PHY.
6893 */ 6786 */
6894 vars->link_up = (vars->phy_link_up && 6787 vars->link_up = (vars->phy_link_up &&
@@ -6907,6 +6800,10 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6907 else 6800 else
6908 rc = bnx2x_update_link_down(params, vars); 6801 rc = bnx2x_update_link_down(params, vars);
6909 6802
6803 /* Update MCP link status was changed */
6804 if (params->feature_config_flags & FEATURE_CONFIG_BC_SUPPORTS_AFEX)
6805 bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0);
6806
6910 return rc; 6807 return rc;
6911} 6808}
6912 6809
@@ -7120,8 +7017,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
7120 } 7017 }
7121 /* XAUI workaround in 8073 A0: */ 7018 /* XAUI workaround in 8073 A0: */
7122 7019
7123 /* 7020 /* After loading the boot ROM and restarting Autoneg, poll
7124 * After loading the boot ROM and restarting Autoneg, poll
7125 * Dev1, Reg $C820: 7021 * Dev1, Reg $C820:
7126 */ 7022 */
7127 7023
@@ -7130,8 +7026,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
7130 MDIO_PMA_DEVAD, 7026 MDIO_PMA_DEVAD,
7131 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, 7027 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
7132 &val); 7028 &val);
7133 /* 7029 /* If bit [14] = 0 or bit [13] = 0, continue on with
7134 * If bit [14] = 0 or bit [13] = 0, continue on with
7135 * system initialization (XAUI work-around not required, as 7030 * system initialization (XAUI work-around not required, as
7136 * these bits indicate 2.5G or 1G link up). 7031 * these bits indicate 2.5G or 1G link up).
7137 */ 7032 */
@@ -7140,8 +7035,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
7140 return 0; 7035 return 0;
7141 } else if (!(val & (1<<15))) { 7036 } else if (!(val & (1<<15))) {
7142 DP(NETIF_MSG_LINK, "bit 15 went off\n"); 7037 DP(NETIF_MSG_LINK, "bit 15 went off\n");
7143 /* 7038 /* If bit 15 is 0, then poll Dev1, Reg $C841 until it's
7144 * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
7145 * MSB (bit15) goes to 1 (indicating that the XAUI 7039 * MSB (bit15) goes to 1 (indicating that the XAUI
7146 * workaround has completed), then continue on with 7040 * workaround has completed), then continue on with
7147 * system initialization. 7041 * system initialization.
@@ -7291,8 +7185,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
7291 val = (1<<7); 7185 val = (1<<7);
7292 } else if (phy->req_line_speed == SPEED_2500) { 7186 } else if (phy->req_line_speed == SPEED_2500) {
7293 val = (1<<5); 7187 val = (1<<5);
7294 /* 7188 /* Note that 2.5G works only when used with 1G
7295 * Note that 2.5G works only when used with 1G
7296 * advertisement 7189 * advertisement
7297 */ 7190 */
7298 } else 7191 } else
@@ -7343,8 +7236,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
7343 /* Add support for CL37 (passive mode) III */ 7236 /* Add support for CL37 (passive mode) III */
7344 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); 7237 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
7345 7238
7346 /* 7239 /* The SNR will improve about 2db by changing BW and FEE main
7347 * The SNR will improve about 2db by changing BW and FEE main
7348 * tap. Rest commands are executed after link is up 7240 * tap. Rest commands are executed after link is up
7349 * Change FFE main cursor to 5 in EDC register 7241 * Change FFE main cursor to 5 in EDC register
7350 */ 7242 */
@@ -7431,8 +7323,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
7431 7323
7432 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1))); 7324 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
7433 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) { 7325 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
7434 /* 7326 /* The SNR will improve about 2dbby changing the BW and FEE main
7435 * The SNR will improve about 2dbby changing the BW and FEE main
7436 * tap. The 1st write to change FFE main tap is set before 7327 * tap. The 1st write to change FFE main tap is set before
7437 * restart AN. Change PLL Bandwidth in EDC register 7328 * restart AN. Change PLL Bandwidth in EDC register
7438 */ 7329 */
@@ -7479,8 +7370,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
7479 bnx2x_cl45_read(bp, phy, 7370 bnx2x_cl45_read(bp, phy,
7480 MDIO_XS_DEVAD, 7371 MDIO_XS_DEVAD,
7481 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1); 7372 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
7482 /* 7373 /* Set bit 3 to invert Rx in 1G mode and clear this bit
7483 * Set bit 3 to invert Rx in 1G mode and clear this bit
7484 * when it`s in 10G mode. 7374 * when it`s in 10G mode.
7485 */ 7375 */
7486 if (vars->line_speed == SPEED_1000) { 7376 if (vars->line_speed == SPEED_1000) {
@@ -7602,8 +7492,7 @@ static void bnx2x_set_disable_pmd_transmit(struct link_params *params,
7602 u8 pmd_dis) 7492 u8 pmd_dis)
7603{ 7493{
7604 struct bnx2x *bp = params->bp; 7494 struct bnx2x *bp = params->bp;
7605 /* 7495 /* Disable transmitter only for bootcodes which can enable it afterwards
7606 * Disable transmitter only for bootcodes which can enable it afterwards
7607 * (for D3 link) 7496 * (for D3 link)
7608 */ 7497 */
7609 if (pmd_dis) { 7498 if (pmd_dis) {
@@ -7780,9 +7669,6 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7780 u32 data_array[4]; 7669 u32 data_array[4];
7781 u16 addr32; 7670 u16 addr32;
7782 struct bnx2x *bp = params->bp; 7671 struct bnx2x *bp = params->bp;
7783 /*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:"
7784 " addr %d, cnt %d\n",
7785 addr, byte_cnt);*/
7786 if (byte_cnt > 16) { 7672 if (byte_cnt > 16) {
7787 DP(NETIF_MSG_LINK, 7673 DP(NETIF_MSG_LINK,
7788 "Reading from eeprom is limited to 16 bytes\n"); 7674 "Reading from eeprom is limited to 16 bytes\n");
@@ -7847,8 +7733,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7847 MDIO_PMA_DEVAD, 7733 MDIO_PMA_DEVAD,
7848 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 7734 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
7849 0x8002); 7735 0x8002);
7850 /* 7736 /* Wait appropriate time for two-wire command to finish before
7851 * Wait appropriate time for two-wire command to finish before
7852 * polling the status register 7737 * polling the status register
7853 */ 7738 */
7854 msleep(1); 7739 msleep(1);
@@ -7941,8 +7826,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
7941 { 7826 {
7942 u8 copper_module_type; 7827 u8 copper_module_type;
7943 phy->media_type = ETH_PHY_DA_TWINAX; 7828 phy->media_type = ETH_PHY_DA_TWINAX;
7944 /* 7829 /* Check if its active cable (includes SFP+ module)
7945 * Check if its active cable (includes SFP+ module)
7946 * of passive cable 7830 * of passive cable
7947 */ 7831 */
7948 if (bnx2x_read_sfp_module_eeprom(phy, 7832 if (bnx2x_read_sfp_module_eeprom(phy,
@@ -8019,8 +7903,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8019 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); 7903 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
8020 return 0; 7904 return 0;
8021} 7905}
8022/* 7906/* This function read the relevant field from the module (SFP+), and verify it
8023 * This function read the relevant field from the module (SFP+), and verify it
8024 * is compliant with this board 7907 * is compliant with this board
8025 */ 7908 */
8026static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy, 7909static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
@@ -8102,8 +7985,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
8102 u8 val; 7985 u8 val;
8103 struct bnx2x *bp = params->bp; 7986 struct bnx2x *bp = params->bp;
8104 u16 timeout; 7987 u16 timeout;
8105 /* 7988 /* Initialization time after hot-plug may take up to 300ms for
8106 * Initialization time after hot-plug may take up to 300ms for
8107 * some phys type ( e.g. JDSU ) 7989 * some phys type ( e.g. JDSU )
8108 */ 7990 */
8109 7991
@@ -8125,8 +8007,7 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
8125 u8 is_power_up) { 8007 u8 is_power_up) {
8126 /* Make sure GPIOs are not using for LED mode */ 8008 /* Make sure GPIOs are not using for LED mode */
8127 u16 val; 8009 u16 val;
8128 /* 8010 /* In the GPIO register, bit 4 is use to determine if the GPIOs are
8129 * In the GPIO register, bit 4 is use to determine if the GPIOs are
8130 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for 8011 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
8131 * output 8012 * output
8132 * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0 8013 * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0
@@ -8142,8 +8023,7 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
8142 if (is_power_up) 8023 if (is_power_up)
8143 val = (1<<4); 8024 val = (1<<4);
8144 else 8025 else
8145 /* 8026 /* Set GPIO control to OUTPUT, and set the power bit
8146 * Set GPIO control to OUTPUT, and set the power bit
8147 * to according to the is_power_up 8027 * to according to the is_power_up
8148 */ 8028 */
8149 val = (1<<1); 8029 val = (1<<1);
@@ -8177,8 +8057,7 @@ static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
8177 8057
8178 DP(NETIF_MSG_LINK, "Setting LRM MODE\n"); 8058 DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
8179 8059
8180 /* 8060 /* Changing to LRM mode takes quite few seconds. So do it only
8181 * Changing to LRM mode takes quite few seconds. So do it only
8182 * if current mode is limiting (default is LRM) 8061 * if current mode is limiting (default is LRM)
8183 */ 8062 */
8184 if (cur_limiting_mode != EDC_MODE_LIMITING) 8063 if (cur_limiting_mode != EDC_MODE_LIMITING)
@@ -8313,8 +8192,7 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
8313 struct bnx2x *bp = params->bp; 8192 struct bnx2x *bp = params->bp;
8314 DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode); 8193 DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode);
8315 if (CHIP_IS_E3(bp)) { 8194 if (CHIP_IS_E3(bp)) {
8316 /* 8195 /* Low ==> if SFP+ module is supported otherwise
8317 * Low ==> if SFP+ module is supported otherwise
8318 * High ==> if SFP+ module is not on the approved vendor list 8196 * High ==> if SFP+ module is not on the approved vendor list
8319 */ 8197 */
8320 bnx2x_set_e3_module_fault_led(params, gpio_mode); 8198 bnx2x_set_e3_module_fault_led(params, gpio_mode);
@@ -8339,8 +8217,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
8339 return; 8217 return;
8340 DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n", 8218 DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
8341 power, pin_cfg); 8219 power, pin_cfg);
8342 /* 8220 /* Low ==> corresponding SFP+ module is powered
8343 * Low ==> corresponding SFP+ module is powered
8344 * high ==> the SFP+ module is powered down 8221 * high ==> the SFP+ module is powered down
8345 */ 8222 */
8346 bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); 8223 bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
@@ -8474,14 +8351,12 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
8474 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW); 8351 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
8475 } 8352 }
8476 8353
8477 /* 8354 /* Check and set limiting mode / LRM mode on 8726. On 8727 it
8478 * Check and set limiting mode / LRM mode on 8726. On 8727 it
8479 * is done automatically 8355 * is done automatically
8480 */ 8356 */
8481 bnx2x_set_limiting_mode(params, phy, edc_mode); 8357 bnx2x_set_limiting_mode(params, phy, edc_mode);
8482 8358
8483 /* 8359 /* Enable transmit for this module if the module is approved, or
8484 * Enable transmit for this module if the module is approved, or
8485 * if unapproved modules should also enable the Tx laser 8360 * if unapproved modules should also enable the Tx laser
8486 */ 8361 */
8487 if (rc == 0 || 8362 if (rc == 0 ||
@@ -8536,8 +8411,7 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
8536 bnx2x_set_gpio_int(bp, gpio_num, 8411 bnx2x_set_gpio_int(bp, gpio_num,
8537 MISC_REGISTERS_GPIO_INT_OUTPUT_SET, 8412 MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
8538 gpio_port); 8413 gpio_port);
8539 /* 8414 /* Module was plugged out.
8540 * Module was plugged out.
8541 * Disable transmit for this module 8415 * Disable transmit for this module
8542 */ 8416 */
8543 phy->media_type = ETH_PHY_NOT_PRESENT; 8417 phy->media_type = ETH_PHY_NOT_PRESENT;
@@ -8607,8 +8481,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
8607 8481
8608 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps" 8482 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
8609 " link_status 0x%x\n", rx_sd, pcs_status, val2); 8483 " link_status 0x%x\n", rx_sd, pcs_status, val2);
8610 /* 8484 /* Link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
8611 * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
8612 * are set, or if the autoneg bit 1 is set 8485 * are set, or if the autoneg bit 1 is set
8613 */ 8486 */
8614 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1))); 8487 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
@@ -8722,8 +8595,7 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
8722 } 8595 }
8723 bnx2x_save_bcm_spirom_ver(bp, phy, params->port); 8596 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
8724 8597
8725 /* 8598 /* If TX Laser is controlled by GPIO_0, do not let PHY go into low
8726 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
8727 * power mode, if TX Laser is disabled 8599 * power mode, if TX Laser is disabled
8728 */ 8600 */
8729 8601
@@ -8833,8 +8705,7 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
8833 8705
8834 bnx2x_8726_external_rom_boot(phy, params); 8706 bnx2x_8726_external_rom_boot(phy, params);
8835 8707
8836 /* 8708 /* Need to call module detected on initialization since the module
8837 * Need to call module detected on initialization since the module
8838 * detection triggered by actual module insertion might occur before 8709 * detection triggered by actual module insertion might occur before
8839 * driver is loaded, and when driver is loaded, it reset all 8710 * driver is loaded, and when driver is loaded, it reset all
8840 * registers, including the transmitter 8711 * registers, including the transmitter
@@ -8871,8 +8742,7 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
8871 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); 8742 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
8872 bnx2x_cl45_write(bp, phy, 8743 bnx2x_cl45_write(bp, phy,
8873 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); 8744 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
8874 /* 8745 /* Enable RX-ALARM control to receive interrupt for 1G speed
8875 * Enable RX-ALARM control to receive interrupt for 1G speed
8876 * change 8746 * change
8877 */ 8747 */
8878 bnx2x_cl45_write(bp, phy, 8748 bnx2x_cl45_write(bp, phy,
@@ -8973,8 +8843,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
8973 struct link_params *params) { 8843 struct link_params *params) {
8974 u32 swap_val, swap_override; 8844 u32 swap_val, swap_override;
8975 u8 port; 8845 u8 port;
8976 /* 8846 /* The PHY reset is controlled by GPIO 1. Fake the port number
8977 * The PHY reset is controlled by GPIO 1. Fake the port number
8978 * to cancel the swap done in set_gpio() 8847 * to cancel the swap done in set_gpio()
8979 */ 8848 */
8980 struct bnx2x *bp = params->bp; 8849 struct bnx2x *bp = params->bp;
@@ -9012,14 +8881,12 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9012 bnx2x_cl45_write(bp, phy, 8881 bnx2x_cl45_write(bp, phy,
9013 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val); 8882 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
9014 8883
9015 /* 8884 /* Initially configure MOD_ABS to interrupt when module is
9016 * Initially configure MOD_ABS to interrupt when module is
9017 * presence( bit 8) 8885 * presence( bit 8)
9018 */ 8886 */
9019 bnx2x_cl45_read(bp, phy, 8887 bnx2x_cl45_read(bp, phy,
9020 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); 8888 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
9021 /* 8889 /* Set EDC off by setting OPTXLOS signal input to low (bit 9).
9022 * Set EDC off by setting OPTXLOS signal input to low (bit 9).
9023 * When the EDC is off it locks onto a reference clock and avoids 8890 * When the EDC is off it locks onto a reference clock and avoids
9024 * becoming 'lost' 8891 * becoming 'lost'
9025 */ 8892 */
@@ -9040,8 +8907,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9040 if (phy->flags & FLAGS_NOC) 8907 if (phy->flags & FLAGS_NOC)
9041 val |= (3<<5); 8908 val |= (3<<5);
9042 8909
9043 /* 8910 /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
9044 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
9045 * status which reflect SFP+ module over-current 8911 * status which reflect SFP+ module over-current
9046 */ 8912 */
9047 if (!(phy->flags & FLAGS_NOC)) 8913 if (!(phy->flags & FLAGS_NOC))
@@ -9067,8 +8933,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9067 bnx2x_cl45_read(bp, phy, 8933 bnx2x_cl45_read(bp, phy,
9068 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); 8934 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
9069 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); 8935 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
9070 /* 8936 /* Power down the XAUI until link is up in case of dual-media
9071 * Power down the XAUI until link is up in case of dual-media
9072 * and 1G 8937 * and 1G
9073 */ 8938 */
9074 if (DUAL_MEDIA(params)) { 8939 if (DUAL_MEDIA(params)) {
@@ -9093,8 +8958,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9093 bnx2x_cl45_write(bp, phy, 8958 bnx2x_cl45_write(bp, phy,
9094 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); 8959 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
9095 } else { 8960 } else {
9096 /* 8961 /* Since the 8727 has only single reset pin, need to set the 10G
9097 * Since the 8727 has only single reset pin, need to set the 10G
9098 * registers although it is default 8962 * registers although it is default
9099 */ 8963 */
9100 bnx2x_cl45_write(bp, phy, 8964 bnx2x_cl45_write(bp, phy,
@@ -9109,8 +8973,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9109 0x0008); 8973 0x0008);
9110 } 8974 }
9111 8975
9112 /* 8976 /* Set 2-wire transfer rate of SFP+ module EEPROM
9113 * Set 2-wire transfer rate of SFP+ module EEPROM
9114 * to 100Khz since some DACs(direct attached cables) do 8977 * to 100Khz since some DACs(direct attached cables) do
9115 * not work at 400Khz. 8978 * not work at 400Khz.
9116 */ 8979 */
@@ -9133,8 +8996,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9133 phy->tx_preemphasis[1]); 8996 phy->tx_preemphasis[1]);
9134 } 8997 }
9135 8998
9136 /* 8999 /* If TX Laser is controlled by GPIO_0, do not let PHY go into low
9137 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
9138 * power mode, if TX Laser is disabled 9000 * power mode, if TX Laser is disabled
9139 */ 9001 */
9140 tx_en_mode = REG_RD(bp, params->shmem_base + 9002 tx_en_mode = REG_RD(bp, params->shmem_base +
@@ -9180,8 +9042,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
9180 DP(NETIF_MSG_LINK, 9042 DP(NETIF_MSG_LINK,
9181 "MOD_ABS indication show module is absent\n"); 9043 "MOD_ABS indication show module is absent\n");
9182 phy->media_type = ETH_PHY_NOT_PRESENT; 9044 phy->media_type = ETH_PHY_NOT_PRESENT;
9183 /* 9045 /* 1. Set mod_abs to detect next module
9184 * 1. Set mod_abs to detect next module
9185 * presence event 9046 * presence event
9186 * 2. Set EDC off by setting OPTXLOS signal input to low 9047 * 2. Set EDC off by setting OPTXLOS signal input to low
9187 * (bit 9). 9048 * (bit 9).
@@ -9195,8 +9056,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
9195 MDIO_PMA_DEVAD, 9056 MDIO_PMA_DEVAD,
9196 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 9057 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
9197 9058
9198 /* 9059 /* Clear RX alarm since it stays up as long as
9199 * Clear RX alarm since it stays up as long as
9200 * the mod_abs wasn't changed 9060 * the mod_abs wasn't changed
9201 */ 9061 */
9202 bnx2x_cl45_read(bp, phy, 9062 bnx2x_cl45_read(bp, phy,
@@ -9207,8 +9067,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
9207 /* Module is present */ 9067 /* Module is present */
9208 DP(NETIF_MSG_LINK, 9068 DP(NETIF_MSG_LINK,
9209 "MOD_ABS indication show module is present\n"); 9069 "MOD_ABS indication show module is present\n");
9210 /* 9070 /* First disable transmitter, and if the module is ok, the
9211 * First disable transmitter, and if the module is ok, the
9212 * module_detection will enable it 9071 * module_detection will enable it
9213 * 1. Set mod_abs to detect next module absent event ( bit 8) 9072 * 1. Set mod_abs to detect next module absent event ( bit 8)
9214 * 2. Restore the default polarity of the OPRXLOS signal and 9073 * 2. Restore the default polarity of the OPRXLOS signal and
@@ -9222,8 +9081,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
9222 MDIO_PMA_DEVAD, 9081 MDIO_PMA_DEVAD,
9223 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 9082 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
9224 9083
9225 /* 9084 /* Clear RX alarm since it stays up as long as the mod_abs
9226 * Clear RX alarm since it stays up as long as the mod_abs
9227 * wasn't changed. This is need to be done before calling the 9085 * wasn't changed. This is need to be done before calling the
9228 * module detection, otherwise it will clear* the link update 9086 * module detection, otherwise it will clear* the link update
9229 * alarm 9087 * alarm
@@ -9284,8 +9142,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
9284 bnx2x_cl45_read(bp, phy, 9142 bnx2x_cl45_read(bp, phy,
9285 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); 9143 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
9286 9144
9287 /* 9145 /* If a module is present and there is need to check
9288 * If a module is present and there is need to check
9289 * for over current 9146 * for over current
9290 */ 9147 */
9291 if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) { 9148 if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
@@ -9350,8 +9207,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
9350 MDIO_PMA_DEVAD, 9207 MDIO_PMA_DEVAD,
9351 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status); 9208 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
9352 9209
9353 /* 9210 /* Bits 0..2 --> speed detected,
9354 * Bits 0..2 --> speed detected,
9355 * Bits 13..15--> link is down 9211 * Bits 13..15--> link is down
9356 */ 9212 */
9357 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { 9213 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
@@ -9394,8 +9250,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
9394 bnx2x_cl45_read(bp, phy, 9250 bnx2x_cl45_read(bp, phy,
9395 MDIO_PMA_DEVAD, 9251 MDIO_PMA_DEVAD,
9396 MDIO_PMA_REG_8727_PCS_GP, &val1); 9252 MDIO_PMA_REG_8727_PCS_GP, &val1);
9397 /* 9253 /* In case of dual-media board and 1G, power up the XAUI side,
9398 * In case of dual-media board and 1G, power up the XAUI side,
9399 * otherwise power it down. For 10G it is done automatically 9254 * otherwise power it down. For 10G it is done automatically
9400 */ 9255 */
9401 if (link_up) 9256 if (link_up)
@@ -9561,8 +9416,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9561 /* Save spirom version */ 9416 /* Save spirom version */
9562 bnx2x_save_848xx_spirom_version(phy, bp, params->port); 9417 bnx2x_save_848xx_spirom_version(phy, bp, params->port);
9563 } 9418 }
9564 /* 9419 /* This phy uses the NIG latch mechanism since link indication
9565 * This phy uses the NIG latch mechanism since link indication
9566 * arrives through its LED4 and not via its LASI signal, so we 9420 * arrives through its LED4 and not via its LASI signal, so we
9567 * get steady signal instead of clear on read 9421 * get steady signal instead of clear on read
9568 */ 9422 */
@@ -9667,8 +9521,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9667 if (phy->req_duplex == DUPLEX_FULL) 9521 if (phy->req_duplex == DUPLEX_FULL)
9668 autoneg_val |= (1<<8); 9522 autoneg_val |= (1<<8);
9669 9523
9670 /* 9524 /* Always write this if this is not 84833.
9671 * Always write this if this is not 84833.
9672 * For 84833, write it only when it's a forced speed. 9525 * For 84833, write it only when it's a forced speed.
9673 */ 9526 */
9674 if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || 9527 if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
@@ -9916,8 +9769,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9916 /* Wait for GPHY to come out of reset */ 9769 /* Wait for GPHY to come out of reset */
9917 msleep(50); 9770 msleep(50);
9918 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 9771 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
9919 /* 9772 /* BCM84823 requires that XGXS links up first @ 10G for normal
9920 * BCM84823 requires that XGXS links up first @ 10G for normal
9921 * behavior. 9773 * behavior.
9922 */ 9774 */
9923 u16 temp; 9775 u16 temp;
@@ -10393,8 +10245,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10393 break; 10245 break;
10394 } 10246 }
10395 10247
10396 /* 10248 /* This is a workaround for E3+84833 until autoneg
10397 * This is a workaround for E3+84833 until autoneg
10398 * restart is fixed in f/w 10249 * restart is fixed in f/w
10399 */ 10250 */
10400 if (CHIP_IS_E3(bp)) { 10251 if (CHIP_IS_E3(bp)) {
@@ -10418,8 +10269,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10418 DP(NETIF_MSG_LINK, "54618SE cfg init\n"); 10269 DP(NETIF_MSG_LINK, "54618SE cfg init\n");
10419 usleep_range(1000, 1000); 10270 usleep_range(1000, 1000);
10420 10271
10421 /* 10272 /* This works with E3 only, no need to check the chip
10422 * This works with E3 only, no need to check the chip
10423 * before determining the port. 10273 * before determining the port.
10424 */ 10274 */
10425 port = params->port; 10275 port = params->port;
@@ -10441,7 +10291,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10441 MDIO_PMA_REG_CTRL, 0x8000); 10291 MDIO_PMA_REG_CTRL, 0x8000);
10442 bnx2x_wait_reset_complete(bp, phy, params); 10292 bnx2x_wait_reset_complete(bp, phy, params);
10443 10293
10444 /*wait for GPHY to reset */ 10294 /* Wait for GPHY to reset */
10445 msleep(50); 10295 msleep(50);
10446 10296
10447 /* Configure LED4: set to INTR (0x6). */ 10297 /* Configure LED4: set to INTR (0x6). */
@@ -10647,13 +10497,11 @@ static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
10647 u32 cfg_pin; 10497 u32 cfg_pin;
10648 u8 port; 10498 u8 port;
10649 10499
10650 /* 10500 /* In case of no EPIO routed to reset the GPHY, put it
10651 * In case of no EPIO routed to reset the GPHY, put it
10652 * in low power mode. 10501 * in low power mode.
10653 */ 10502 */
10654 bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800); 10503 bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800);
10655 /* 10504 /* This works with E3 only, no need to check the chip
10656 * This works with E3 only, no need to check the chip
10657 * before determining the port. 10505 * before determining the port.
10658 */ 10506 */
10659 port = params->port; 10507 port = params->port;
@@ -10762,7 +10610,7 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
10762 bnx2x_ext_phy_resolve_fc(phy, params, vars); 10610 bnx2x_ext_phy_resolve_fc(phy, params, vars);
10763 10611
10764 if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { 10612 if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
10765 /* report LP advertised speeds */ 10613 /* Report LP advertised speeds */
10766 bnx2x_cl22_read(bp, phy, 0x5, &val); 10614 bnx2x_cl22_read(bp, phy, 0x5, &val);
10767 10615
10768 if (val & (1<<5)) 10616 if (val & (1<<5))
@@ -10827,8 +10675,7 @@ static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy,
10827 /* This register opens the gate for the UMAC despite its name */ 10675 /* This register opens the gate for the UMAC despite its name */
10828 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); 10676 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
10829 10677
10830 /* 10678 /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
10831 * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
10832 * length used by the MAC receive logic to check frames. 10679 * length used by the MAC receive logic to check frames.
10833 */ 10680 */
10834 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); 10681 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
@@ -11101,22 +10948,23 @@ static struct bnx2x_phy phy_warpcore = {
11101 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 10948 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
11102 .addr = 0xff, 10949 .addr = 0xff,
11103 .def_md_devad = 0, 10950 .def_md_devad = 0,
11104 .flags = FLAGS_HW_LOCK_REQUIRED, 10951 .flags = (FLAGS_HW_LOCK_REQUIRED |
10952 FLAGS_TX_ERROR_CHECK),
11105 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10953 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11106 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10954 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11107 .mdio_ctrl = 0, 10955 .mdio_ctrl = 0,
11108 .supported = (SUPPORTED_10baseT_Half | 10956 .supported = (SUPPORTED_10baseT_Half |
11109 SUPPORTED_10baseT_Full | 10957 SUPPORTED_10baseT_Full |
11110 SUPPORTED_100baseT_Half | 10958 SUPPORTED_100baseT_Half |
11111 SUPPORTED_100baseT_Full | 10959 SUPPORTED_100baseT_Full |
11112 SUPPORTED_1000baseT_Full | 10960 SUPPORTED_1000baseT_Full |
11113 SUPPORTED_10000baseT_Full | 10961 SUPPORTED_10000baseT_Full |
11114 SUPPORTED_20000baseKR2_Full | 10962 SUPPORTED_20000baseKR2_Full |
11115 SUPPORTED_20000baseMLD2_Full | 10963 SUPPORTED_20000baseMLD2_Full |
11116 SUPPORTED_FIBRE | 10964 SUPPORTED_FIBRE |
11117 SUPPORTED_Autoneg | 10965 SUPPORTED_Autoneg |
11118 SUPPORTED_Pause | 10966 SUPPORTED_Pause |
11119 SUPPORTED_Asym_Pause), 10967 SUPPORTED_Asym_Pause),
11120 .media_type = ETH_PHY_UNSPECIFIED, 10968 .media_type = ETH_PHY_UNSPECIFIED,
11121 .ver_addr = 0, 10969 .ver_addr = 0,
11122 .req_flow_ctrl = 0, 10970 .req_flow_ctrl = 0,
@@ -11258,7 +11106,8 @@ static struct bnx2x_phy phy_8726 = {
11258 .addr = 0xff, 11106 .addr = 0xff,
11259 .def_md_devad = 0, 11107 .def_md_devad = 0,
11260 .flags = (FLAGS_HW_LOCK_REQUIRED | 11108 .flags = (FLAGS_HW_LOCK_REQUIRED |
11261 FLAGS_INIT_XGXS_FIRST), 11109 FLAGS_INIT_XGXS_FIRST |
11110 FLAGS_TX_ERROR_CHECK),
11262 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11111 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11263 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11112 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11264 .mdio_ctrl = 0, 11113 .mdio_ctrl = 0,
@@ -11289,7 +11138,8 @@ static struct bnx2x_phy phy_8727 = {
11289 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 11138 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
11290 .addr = 0xff, 11139 .addr = 0xff,
11291 .def_md_devad = 0, 11140 .def_md_devad = 0,
11292 .flags = FLAGS_FAN_FAILURE_DET_REQ, 11141 .flags = (FLAGS_FAN_FAILURE_DET_REQ |
11142 FLAGS_TX_ERROR_CHECK),
11293 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11143 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11294 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11144 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11295 .mdio_ctrl = 0, 11145 .mdio_ctrl = 0,
@@ -11354,8 +11204,9 @@ static struct bnx2x_phy phy_84823 = {
11354 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823, 11204 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
11355 .addr = 0xff, 11205 .addr = 0xff,
11356 .def_md_devad = 0, 11206 .def_md_devad = 0,
11357 .flags = FLAGS_FAN_FAILURE_DET_REQ | 11207 .flags = (FLAGS_FAN_FAILURE_DET_REQ |
11358 FLAGS_REARM_LATCH_SIGNAL, 11208 FLAGS_REARM_LATCH_SIGNAL |
11209 FLAGS_TX_ERROR_CHECK),
11359 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11210 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11360 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11211 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11361 .mdio_ctrl = 0, 11212 .mdio_ctrl = 0,
@@ -11390,8 +11241,9 @@ static struct bnx2x_phy phy_84833 = {
11390 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833, 11241 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
11391 .addr = 0xff, 11242 .addr = 0xff,
11392 .def_md_devad = 0, 11243 .def_md_devad = 0,
11393 .flags = FLAGS_FAN_FAILURE_DET_REQ | 11244 .flags = (FLAGS_FAN_FAILURE_DET_REQ |
11394 FLAGS_REARM_LATCH_SIGNAL, 11245 FLAGS_REARM_LATCH_SIGNAL |
11246 FLAGS_TX_ERROR_CHECK),
11395 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11247 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11396 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11248 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11397 .mdio_ctrl = 0, 11249 .mdio_ctrl = 0,
@@ -11466,9 +11318,8 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
11466 /* Get the 4 lanes xgxs config rx and tx */ 11318 /* Get the 4 lanes xgxs config rx and tx */
11467 u32 rx = 0, tx = 0, i; 11319 u32 rx = 0, tx = 0, i;
11468 for (i = 0; i < 2; i++) { 11320 for (i = 0; i < 2; i++) {
11469 /* 11321 /* INT_PHY and EXT_PHY1 share the same value location in
11470 * INT_PHY and EXT_PHY1 share the same value location in the 11322 * the shmem. When num_phys is greater than 1, than this value
11471 * shmem. When num_phys is greater than 1, than this value
11472 * applies only to EXT_PHY1 11323 * applies only to EXT_PHY1
11473 */ 11324 */
11474 if (phy_index == INT_PHY || phy_index == EXT_PHY1) { 11325 if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
@@ -11546,8 +11397,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
11546 offsetof(struct shmem_region, dev_info. 11397 offsetof(struct shmem_region, dev_info.
11547 port_hw_config[port].default_cfg)) & 11398 port_hw_config[port].default_cfg)) &
11548 PORT_HW_CFG_NET_SERDES_IF_MASK); 11399 PORT_HW_CFG_NET_SERDES_IF_MASK);
11549 /* 11400 /* Set the appropriate supported and flags indications per
11550 * Set the appropriate supported and flags indications per
11551 * interface type of the chip 11401 * interface type of the chip
11552 */ 11402 */
11553 switch (serdes_net_if) { 11403 switch (serdes_net_if) {
@@ -11605,8 +11455,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
11605 break; 11455 break;
11606 } 11456 }
11607 11457
11608 /* 11458 /* Enable MDC/MDIO work-around for E3 A0 since free running MDC
11609 * Enable MDC/MDIO work-around for E3 A0 since free running MDC
11610 * was not set as expected. For B0, ECO will be enabled so there 11459 * was not set as expected. For B0, ECO will be enabled so there
11611 * won't be an issue there 11460 * won't be an issue there
11612 */ 11461 */
@@ -11719,8 +11568,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
11719 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config); 11568 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
11720 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index); 11569 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
11721 11570
11722 /* 11571 /* The shmem address of the phy version is located on different
11723 * The shmem address of the phy version is located on different
11724 * structures. In case this structure is too old, do not set 11572 * structures. In case this structure is too old, do not set
11725 * the address 11573 * the address
11726 */ 11574 */
@@ -11754,8 +11602,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
11754 11602
11755 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && 11603 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
11756 (phy->ver_addr)) { 11604 (phy->ver_addr)) {
11757 /* 11605 /* Remove 100Mb link supported for BCM84833 when phy fw
11758 * Remove 100Mb link supported for BCM84833 when phy fw
11759 * version lower than or equal to 1.39 11606 * version lower than or equal to 1.39
11760 */ 11607 */
11761 u32 raw_ver = REG_RD(bp, phy->ver_addr); 11608 u32 raw_ver = REG_RD(bp, phy->ver_addr);
@@ -11765,8 +11612,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
11765 SUPPORTED_100baseT_Full); 11612 SUPPORTED_100baseT_Full);
11766 } 11613 }
11767 11614
11768 /* 11615 /* In case mdc/mdio_access of the external phy is different than the
11769 * In case mdc/mdio_access of the external phy is different than the
11770 * mdc/mdio access of the XGXS, a HW lock must be taken in each access 11616 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
11771 * to prevent one port interfere with another port's CL45 operations. 11617 * to prevent one port interfere with another port's CL45 operations.
11772 */ 11618 */
@@ -11936,13 +11782,16 @@ int bnx2x_phy_probe(struct link_params *params)
11936 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) 11782 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
11937 break; 11783 break;
11938 11784
11785 if (params->feature_config_flags &
11786 FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET)
11787 phy->flags &= ~FLAGS_TX_ERROR_CHECK;
11788
11939 sync_offset = params->shmem_base + 11789 sync_offset = params->shmem_base +
11940 offsetof(struct shmem_region, 11790 offsetof(struct shmem_region,
11941 dev_info.port_hw_config[params->port].media_type); 11791 dev_info.port_hw_config[params->port].media_type);
11942 media_types = REG_RD(bp, sync_offset); 11792 media_types = REG_RD(bp, sync_offset);
11943 11793
11944 /* 11794 /* Update media type for non-PMF sync only for the first time
11945 * Update media type for non-PMF sync only for the first time
11946 * In case the media type changes afterwards, it will be updated 11795 * In case the media type changes afterwards, it will be updated
11947 * using the update_status function 11796 * using the update_status function
11948 */ 11797 */
@@ -12016,8 +11865,7 @@ void bnx2x_init_xmac_loopback(struct link_params *params,
12016 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 11865 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
12017 vars->mac_type = MAC_TYPE_XMAC; 11866 vars->mac_type = MAC_TYPE_XMAC;
12018 vars->phy_flags = PHY_XGXS_FLAG; 11867 vars->phy_flags = PHY_XGXS_FLAG;
12019 /* 11868 /* Set WC to loopback mode since link is required to provide clock
12020 * Set WC to loopback mode since link is required to provide clock
12021 * to the XMAC in 20G mode 11869 * to the XMAC in 20G mode
12022 */ 11870 */
12023 bnx2x_set_aer_mmd(params, &params->phy[0]); 11871 bnx2x_set_aer_mmd(params, &params->phy[0]);
@@ -12162,6 +12010,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
12162 bnx2x_link_int_enable(params); 12010 bnx2x_link_int_enable(params);
12163 break; 12011 break;
12164 } 12012 }
12013 bnx2x_update_mng(params, vars->link_status);
12165 return 0; 12014 return 0;
12166} 12015}
12167 12016
@@ -12302,7 +12151,8 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
12302 NIG_MASK_MI_INT)); 12151 NIG_MASK_MI_INT));
12303 12152
12304 /* Need to take the phy out of low power mode in order 12153 /* Need to take the phy out of low power mode in order
12305 to write to access its registers */ 12154 * to write to access its registers
12155 */
12306 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 12156 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
12307 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 12157 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
12308 port); 12158 port);
@@ -12350,8 +12200,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
12350 (val | 1<<10)); 12200 (val | 1<<10));
12351 } 12201 }
12352 12202
12353 /* 12203 /* Toggle Transmitter: Power down and then up with 600ms delay
12354 * Toggle Transmitter: Power down and then up with 600ms delay
12355 * between 12204 * between
12356 */ 12205 */
12357 msleep(600); 12206 msleep(600);
@@ -12494,8 +12343,7 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
12494 reset_gpio = MISC_REGISTERS_GPIO_1; 12343 reset_gpio = MISC_REGISTERS_GPIO_1;
12495 port = 1; 12344 port = 1;
12496 12345
12497 /* 12346 /* Retrieve the reset gpio/port which control the reset.
12498 * Retrieve the reset gpio/port which control the reset.
12499 * Default is GPIO1, PORT1 12347 * Default is GPIO1, PORT1
12500 */ 12348 */
12501 bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0], 12349 bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
@@ -12670,8 +12518,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
12670 break; 12518 break;
12671 12519
12672 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 12520 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
12673 /* 12521 /* GPIO1 affects both ports, so there's need to pull
12674 * GPIO1 affects both ports, so there's need to pull
12675 * it for single port alone 12522 * it for single port alone
12676 */ 12523 */
12677 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path, 12524 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
@@ -12679,8 +12526,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
12679 phy_index, chip_id); 12526 phy_index, chip_id);
12680 break; 12527 break;
12681 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: 12528 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
12682 /* 12529 /* GPIO3's are linked, and so both need to be toggled
12683 * GPIO3's are linked, and so both need to be toggled
12684 * to obtain required 2us pulse. 12530 * to obtain required 2us pulse.
12685 */ 12531 */
12686 rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, 12532 rc = bnx2x_84833_common_init_phy(bp, shmem_base_path,
@@ -12779,7 +12625,8 @@ static void bnx2x_check_over_curr(struct link_params *params,
12779} 12625}
12780 12626
12781static void bnx2x_analyze_link_error(struct link_params *params, 12627static void bnx2x_analyze_link_error(struct link_params *params,
12782 struct link_vars *vars, u32 lss_status) 12628 struct link_vars *vars, u32 lss_status,
12629 u8 notify)
12783{ 12630{
12784 struct bnx2x *bp = params->bp; 12631 struct bnx2x *bp = params->bp;
12785 /* Compare new value with previous value */ 12632 /* Compare new value with previous value */
@@ -12793,8 +12640,7 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12793 DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up, 12640 DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
12794 half_open_conn, lss_status); 12641 half_open_conn, lss_status);
12795 12642
12796 /* 12643 /* a. Update shmem->link_status accordingly
12797 * a. Update shmem->link_status accordingly
12798 * b. Update link_vars->link_up 12644 * b. Update link_vars->link_up
12799 */ 12645 */
12800 if (lss_status) { 12646 if (lss_status) {
@@ -12802,8 +12648,10 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12802 vars->link_status &= ~LINK_STATUS_LINK_UP; 12648 vars->link_status &= ~LINK_STATUS_LINK_UP;
12803 vars->link_up = 0; 12649 vars->link_up = 0;
12804 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; 12650 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
12805 /* 12651
12806 * Set LED mode to off since the PHY doesn't know about these 12652 /* activate nig drain */
12653 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
12654 /* Set LED mode to off since the PHY doesn't know about these
12807 * errors 12655 * errors
12808 */ 12656 */
12809 led_mode = LED_MODE_OFF; 12657 led_mode = LED_MODE_OFF;
@@ -12813,7 +12661,11 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12813 vars->link_up = 1; 12661 vars->link_up = 1;
12814 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; 12662 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
12815 led_mode = LED_MODE_OPER; 12663 led_mode = LED_MODE_OPER;
12664
12665 /* Clear nig drain */
12666 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
12816 } 12667 }
12668 bnx2x_sync_link(params, vars);
12817 /* Update the LED according to the link state */ 12669 /* Update the LED according to the link state */
12818 bnx2x_set_led(params, vars, led_mode, SPEED_10000); 12670 bnx2x_set_led(params, vars, led_mode, SPEED_10000);
12819 12671
@@ -12822,7 +12674,8 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12822 12674
12823 /* C. Trigger General Attention */ 12675 /* C. Trigger General Attention */
12824 vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT; 12676 vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
12825 bnx2x_notify_link_changed(bp); 12677 if (notify)
12678 bnx2x_notify_link_changed(bp);
12826} 12679}
12827 12680
12828/****************************************************************************** 12681/******************************************************************************
@@ -12834,22 +12687,23 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12834* a fault, for example, due to break in the TX side of fiber. 12687* a fault, for example, due to break in the TX side of fiber.
12835* 12688*
12836******************************************************************************/ 12689******************************************************************************/
12837static void bnx2x_check_half_open_conn(struct link_params *params, 12690int bnx2x_check_half_open_conn(struct link_params *params,
12838 struct link_vars *vars) 12691 struct link_vars *vars,
12692 u8 notify)
12839{ 12693{
12840 struct bnx2x *bp = params->bp; 12694 struct bnx2x *bp = params->bp;
12841 u32 lss_status = 0; 12695 u32 lss_status = 0;
12842 u32 mac_base; 12696 u32 mac_base;
12843 /* In case link status is physically up @ 10G do */ 12697 /* In case link status is physically up @ 10G do */
12844 if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) 12698 if (((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) ||
12845 return; 12699 (REG_RD(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4)))
12700 return 0;
12846 12701
12847 if (CHIP_IS_E3(bp) && 12702 if (CHIP_IS_E3(bp) &&
12848 (REG_RD(bp, MISC_REG_RESET_REG_2) & 12703 (REG_RD(bp, MISC_REG_RESET_REG_2) &
12849 (MISC_REGISTERS_RESET_REG_2_XMAC))) { 12704 (MISC_REGISTERS_RESET_REG_2_XMAC))) {
12850 /* Check E3 XMAC */ 12705 /* Check E3 XMAC */
12851 /* 12706 /* Note that link speed cannot be queried here, since it may be
12852 * Note that link speed cannot be queried here, since it may be
12853 * zero while link is down. In case UMAC is active, LSS will 12707 * zero while link is down. In case UMAC is active, LSS will
12854 * simply not be set 12708 * simply not be set
12855 */ 12709 */
@@ -12863,7 +12717,7 @@ static void bnx2x_check_half_open_conn(struct link_params *params,
12863 if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS)) 12717 if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
12864 lss_status = 1; 12718 lss_status = 1;
12865 12719
12866 bnx2x_analyze_link_error(params, vars, lss_status); 12720 bnx2x_analyze_link_error(params, vars, lss_status, notify);
12867 } else if (REG_RD(bp, MISC_REG_RESET_REG_2) & 12721 } else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
12868 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) { 12722 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
12869 /* Check E1X / E2 BMAC */ 12723 /* Check E1X / E2 BMAC */
@@ -12880,18 +12734,21 @@ static void bnx2x_check_half_open_conn(struct link_params *params,
12880 REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2); 12734 REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
12881 lss_status = (wb_data[0] > 0); 12735 lss_status = (wb_data[0] > 0);
12882 12736
12883 bnx2x_analyze_link_error(params, vars, lss_status); 12737 bnx2x_analyze_link_error(params, vars, lss_status, notify);
12884 } 12738 }
12739 return 0;
12885} 12740}
12886 12741
12887void bnx2x_period_func(struct link_params *params, struct link_vars *vars) 12742void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
12888{ 12743{
12889 struct bnx2x *bp = params->bp;
12890 u16 phy_idx; 12744 u16 phy_idx;
12745 struct bnx2x *bp = params->bp;
12891 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { 12746 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
12892 if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { 12747 if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
12893 bnx2x_set_aer_mmd(params, &params->phy[phy_idx]); 12748 bnx2x_set_aer_mmd(params, &params->phy[phy_idx]);
12894 bnx2x_check_half_open_conn(params, vars); 12749 if (bnx2x_check_half_open_conn(params, vars, 1) !=
12750 0)
12751 DP(NETIF_MSG_LINK, "Fault detection failed\n");
12895 break; 12752 break;
12896 } 12753 }
12897 } 12754 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 763535ee4832..ea4371f4335f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -254,8 +254,10 @@ struct link_params {
254#define FEATURE_CONFIG_PFC_ENABLED (1<<1) 254#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
255#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) 255#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
256#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) 256#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
257#define FEATURE_CONFIG_BC_SUPPORTS_AFEX (1<<8)
257#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9) 258#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9)
258#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10) 259#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10)
260#define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1<<11)
259 /* Will be populated during common init */ 261 /* Will be populated during common init */
260 struct bnx2x_phy phy[MAX_PHYS]; 262 struct bnx2x_phy phy[MAX_PHYS];
261 263
@@ -495,4 +497,6 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
495 497
496void bnx2x_period_func(struct link_params *params, struct link_vars *vars); 498void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
497 499
500int bnx2x_check_half_open_conn(struct link_params *params,
501 struct link_vars *vars, u8 notify);
498#endif /* BNX2X_LINK_H */ 502#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e077d2508727..0708cb803335 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -39,7 +39,6 @@
39#include <linux/time.h> 39#include <linux/time.h>
40#include <linux/ethtool.h> 40#include <linux/ethtool.h>
41#include <linux/mii.h> 41#include <linux/mii.h>
42#include <linux/if.h>
43#include <linux/if_vlan.h> 42#include <linux/if_vlan.h>
44#include <net/ip.h> 43#include <net/ip.h>
45#include <net/ipv6.h> 44#include <net/ipv6.h>
@@ -93,15 +92,11 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1);
93MODULE_FIRMWARE(FW_FILE_NAME_E1H); 92MODULE_FIRMWARE(FW_FILE_NAME_E1H);
94MODULE_FIRMWARE(FW_FILE_NAME_E2); 93MODULE_FIRMWARE(FW_FILE_NAME_E2);
95 94
96static int multi_mode = 1;
97module_param(multi_mode, int, 0);
98MODULE_PARM_DESC(multi_mode, " Multi queue mode "
99 "(0 Disable; 1 Enable (default))");
100 95
101int num_queues; 96int num_queues;
102module_param(num_queues, int, 0); 97module_param(num_queues, int, 0);
103MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1" 98MODULE_PARM_DESC(num_queues,
104 " (default is as a number of CPUs)"); 99 " Set number of queues (default is as a number of CPUs)");
105 100
106static int disable_tpa; 101static int disable_tpa;
107module_param(disable_tpa, int, 0); 102module_param(disable_tpa, int, 0);
@@ -141,7 +136,9 @@ enum bnx2x_board_type {
141 BCM57810, 136 BCM57810,
142 BCM57810_MF, 137 BCM57810_MF,
143 BCM57840, 138 BCM57840,
144 BCM57840_MF 139 BCM57840_MF,
140 BCM57811,
141 BCM57811_MF
145}; 142};
146 143
147/* indexed by board_type, above */ 144/* indexed by board_type, above */
@@ -158,8 +155,9 @@ static struct {
158 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, 155 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
159 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, 156 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
160 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, 157 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
161 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit " 158 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
162 "Ethernet Multi Function"} 159 { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
160 { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
163}; 161};
164 162
165#ifndef PCI_DEVICE_ID_NX2_57710 163#ifndef PCI_DEVICE_ID_NX2_57710
@@ -195,6 +193,12 @@ static struct {
195#ifndef PCI_DEVICE_ID_NX2_57840_MF 193#ifndef PCI_DEVICE_ID_NX2_57840_MF
196#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF 194#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
197#endif 195#endif
196#ifndef PCI_DEVICE_ID_NX2_57811
197#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
198#endif
199#ifndef PCI_DEVICE_ID_NX2_57811_MF
200#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
201#endif
198static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { 202static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
199 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 203 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
200 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 204 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
@@ -207,6 +211,8 @@ static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
207 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, 211 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
208 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 }, 212 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
209 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, 213 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
214 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
215 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
210 { 0 } 216 { 0 }
211}; 217};
212 218
@@ -309,67 +315,6 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
309#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" 315#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
310#define DMAE_DP_DST_NONE "dst_addr [none]" 316#define DMAE_DP_DST_NONE "dst_addr [none]"
311 317
312static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
313 int msglvl)
314{
315 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
316
317 switch (dmae->opcode & DMAE_COMMAND_DST) {
318 case DMAE_CMD_DST_PCI:
319 if (src_type == DMAE_CMD_SRC_PCI)
320 DP(msglvl, "DMAE: opcode 0x%08x\n"
321 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
322 "comp_addr [%x:%08x], comp_val 0x%08x\n",
323 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
324 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
325 dmae->comp_addr_hi, dmae->comp_addr_lo,
326 dmae->comp_val);
327 else
328 DP(msglvl, "DMAE: opcode 0x%08x\n"
329 "src [%08x], len [%d*4], dst [%x:%08x]\n"
330 "comp_addr [%x:%08x], comp_val 0x%08x\n",
331 dmae->opcode, dmae->src_addr_lo >> 2,
332 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
333 dmae->comp_addr_hi, dmae->comp_addr_lo,
334 dmae->comp_val);
335 break;
336 case DMAE_CMD_DST_GRC:
337 if (src_type == DMAE_CMD_SRC_PCI)
338 DP(msglvl, "DMAE: opcode 0x%08x\n"
339 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
340 "comp_addr [%x:%08x], comp_val 0x%08x\n",
341 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
342 dmae->len, dmae->dst_addr_lo >> 2,
343 dmae->comp_addr_hi, dmae->comp_addr_lo,
344 dmae->comp_val);
345 else
346 DP(msglvl, "DMAE: opcode 0x%08x\n"
347 "src [%08x], len [%d*4], dst [%08x]\n"
348 "comp_addr [%x:%08x], comp_val 0x%08x\n",
349 dmae->opcode, dmae->src_addr_lo >> 2,
350 dmae->len, dmae->dst_addr_lo >> 2,
351 dmae->comp_addr_hi, dmae->comp_addr_lo,
352 dmae->comp_val);
353 break;
354 default:
355 if (src_type == DMAE_CMD_SRC_PCI)
356 DP(msglvl, "DMAE: opcode 0x%08x\n"
357 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
358 "comp_addr [%x:%08x] comp_val 0x%08x\n",
359 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
360 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
361 dmae->comp_val);
362 else
363 DP(msglvl, "DMAE: opcode 0x%08x\n"
364 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
365 "comp_addr [%x:%08x] comp_val 0x%08x\n",
366 dmae->opcode, dmae->src_addr_lo >> 2,
367 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
368 dmae->comp_val);
369 break;
370 }
371
372}
373 318
374/* copy command into DMAE command memory and set DMAE command go */ 319/* copy command into DMAE command memory and set DMAE command go */
375void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) 320void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
@@ -506,8 +451,6 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
506 dmae.dst_addr_hi = 0; 451 dmae.dst_addr_hi = 0;
507 dmae.len = len32; 452 dmae.len = len32;
508 453
509 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
510
511 /* issue the command and wait for completion */ 454 /* issue the command and wait for completion */
512 bnx2x_issue_dmae_with_comp(bp, &dmae); 455 bnx2x_issue_dmae_with_comp(bp, &dmae);
513} 456}
@@ -540,8 +483,6 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
540 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); 483 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
541 dmae.len = len32; 484 dmae.len = len32;
542 485
543 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
544
545 /* issue the command and wait for completion */ 486 /* issue the command and wait for completion */
546 bnx2x_issue_dmae_with_comp(bp, &dmae); 487 bnx2x_issue_dmae_with_comp(bp, &dmae);
547} 488}
@@ -562,27 +503,6 @@ static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
562 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); 503 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
563} 504}
564 505
565/* used only for slowpath so not inlined */
566static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
567{
568 u32 wb_write[2];
569
570 wb_write[0] = val_hi;
571 wb_write[1] = val_lo;
572 REG_WR_DMAE(bp, reg, wb_write, 2);
573}
574
575#ifdef USE_WB_RD
576static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
577{
578 u32 wb_data[2];
579
580 REG_RD_DMAE(bp, reg, wb_data, 2);
581
582 return HILO_U64(wb_data[0], wb_data[1]);
583}
584#endif
585
586static int bnx2x_mc_assert(struct bnx2x *bp) 506static int bnx2x_mc_assert(struct bnx2x *bp)
587{ 507{
588 char last_idx; 508 char last_idx;
@@ -1425,8 +1345,9 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
1425static void bnx2x_igu_int_enable(struct bnx2x *bp) 1345static void bnx2x_igu_int_enable(struct bnx2x *bp)
1426{ 1346{
1427 u32 val; 1347 u32 val;
1428 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1348 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1429 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; 1349 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1350 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1430 1351
1431 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 1352 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1432 1353
@@ -1436,6 +1357,9 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
1436 val |= (IGU_PF_CONF_FUNC_EN | 1357 val |= (IGU_PF_CONF_FUNC_EN |
1437 IGU_PF_CONF_MSI_MSIX_EN | 1358 IGU_PF_CONF_MSI_MSIX_EN |
1438 IGU_PF_CONF_ATTN_BIT_EN); 1359 IGU_PF_CONF_ATTN_BIT_EN);
1360
1361 if (single_msix)
1362 val |= IGU_PF_CONF_SINGLE_ISR_EN;
1439 } else if (msi) { 1363 } else if (msi) {
1440 val &= ~IGU_PF_CONF_INT_LINE_EN; 1364 val &= ~IGU_PF_CONF_INT_LINE_EN;
1441 val |= (IGU_PF_CONF_FUNC_EN | 1365 val |= (IGU_PF_CONF_FUNC_EN |
@@ -1455,6 +1379,9 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
1455 1379
1456 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1380 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1457 1381
1382 if (val & IGU_PF_CONF_INT_LINE_EN)
1383 pci_intx(bp->pdev, true);
1384
1458 barrier(); 1385 barrier();
1459 1386
1460 /* init leading/trailing edge */ 1387 /* init leading/trailing edge */
@@ -1719,6 +1646,27 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1719 1646
1720 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); 1647 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1721 1648
1649 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1650 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1651 /* if Q update ramrod is completed for last Q in AFEX vif set
1652 * flow, then ACK MCP at the end
1653 *
1654 * mark pending ACK to MCP bit.
1655 * prevent case that both bits are cleared.
1656 * At the end of load/unload driver checks that
1657 * sp_state is cleaerd, and this order prevents
1658 * races
1659 */
1660 smp_mb__before_clear_bit();
1661 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1662 wmb();
1663 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1664 smp_mb__after_clear_bit();
1665
1666 /* schedule workqueue to send ack to MCP */
1667 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1668 }
1669
1722 return; 1670 return;
1723} 1671}
1724 1672
@@ -2229,40 +2177,6 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2229 return rc; 2177 return rc;
2230} 2178}
2231 2179
2232static void bnx2x_init_port_minmax(struct bnx2x *bp)
2233{
2234 u32 r_param = bp->link_vars.line_speed / 8;
2235 u32 fair_periodic_timeout_usec;
2236 u32 t_fair;
2237
2238 memset(&(bp->cmng.rs_vars), 0,
2239 sizeof(struct rate_shaping_vars_per_port));
2240 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2241
2242 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2243 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2244
2245 /* this is the threshold below which no timer arming will occur
2246 1.25 coefficient is for the threshold to be a little bigger
2247 than the real time, to compensate for timer in-accuracy */
2248 bp->cmng.rs_vars.rs_threshold =
2249 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2250
2251 /* resolution of fairness timer */
2252 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2253 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2254 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2255
2256 /* this is the threshold below which we won't arm the timer anymore */
2257 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2258
2259 /* we multiply by 1e3/8 to get bytes/msec.
2260 We don't want the credits to pass a credit
2261 of the t_fair*FAIR_MEM (algorithm resolution) */
2262 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2263 /* since each tick is 4 usec */
2264 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2265}
2266 2180
2267/* Calculates the sum of vn_min_rates. 2181/* Calculates the sum of vn_min_rates.
2268 It's needed for further normalizing of the min_rates. 2182 It's needed for further normalizing of the min_rates.
@@ -2273,12 +2187,12 @@ static void bnx2x_init_port_minmax(struct bnx2x *bp)
2273 In the later case fainess algorithm should be deactivated. 2187 In the later case fainess algorithm should be deactivated.
2274 If not all min_rates are zero then those that are zeroes will be set to 1. 2188 If not all min_rates are zero then those that are zeroes will be set to 1.
2275 */ 2189 */
2276static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) 2190static void bnx2x_calc_vn_min(struct bnx2x *bp,
2191 struct cmng_init_input *input)
2277{ 2192{
2278 int all_zero = 1; 2193 int all_zero = 1;
2279 int vn; 2194 int vn;
2280 2195
2281 bp->vn_weight_sum = 0;
2282 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2196 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2283 u32 vn_cfg = bp->mf_config[vn]; 2197 u32 vn_cfg = bp->mf_config[vn];
2284 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2198 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
@@ -2286,106 +2200,56 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2286 2200
2287 /* Skip hidden vns */ 2201 /* Skip hidden vns */
2288 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2202 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2289 continue; 2203 vn_min_rate = 0;
2290
2291 /* If min rate is zero - set it to 1 */ 2204 /* If min rate is zero - set it to 1 */
2292 if (!vn_min_rate) 2205 else if (!vn_min_rate)
2293 vn_min_rate = DEF_MIN_RATE; 2206 vn_min_rate = DEF_MIN_RATE;
2294 else 2207 else
2295 all_zero = 0; 2208 all_zero = 0;
2296 2209
2297 bp->vn_weight_sum += vn_min_rate; 2210 input->vnic_min_rate[vn] = vn_min_rate;
2298 } 2211 }
2299 2212
2300 /* if ETS or all min rates are zeros - disable fairness */ 2213 /* if ETS or all min rates are zeros - disable fairness */
2301 if (BNX2X_IS_ETS_ENABLED(bp)) { 2214 if (BNX2X_IS_ETS_ENABLED(bp)) {
2302 bp->cmng.flags.cmng_enables &= 2215 input->flags.cmng_enables &=
2303 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2216 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2304 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); 2217 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2305 } else if (all_zero) { 2218 } else if (all_zero) {
2306 bp->cmng.flags.cmng_enables &= 2219 input->flags.cmng_enables &=
2307 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2220 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2308 DP(NETIF_MSG_IFUP, "All MIN values are zeroes" 2221 DP(NETIF_MSG_IFUP,
2309 " fairness will be disabled\n"); 2222 "All MIN values are zeroes fairness will be disabled\n");
2310 } else 2223 } else
2311 bp->cmng.flags.cmng_enables |= 2224 input->flags.cmng_enables |=
2312 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2225 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2313} 2226}
2314 2227
2315static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) 2228static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2229 struct cmng_init_input *input)
2316{ 2230{
2317 struct rate_shaping_vars_per_vn m_rs_vn; 2231 u16 vn_max_rate;
2318 struct fairness_vars_per_vn m_fair_vn;
2319 u32 vn_cfg = bp->mf_config[vn]; 2232 u32 vn_cfg = bp->mf_config[vn];
2320 int func = func_by_vn(bp, vn);
2321 u16 vn_min_rate, vn_max_rate;
2322 int i;
2323 2233
2324 /* If function is hidden - set min and max to zeroes */ 2234 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2325 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2326 vn_min_rate = 0;
2327 vn_max_rate = 0; 2235 vn_max_rate = 0;
2328 2236 else {
2329 } else {
2330 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); 2237 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2331 2238
2332 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2239 if (IS_MF_SI(bp)) {
2333 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2334 /* If fairness is enabled (not all min rates are zeroes) and
2335 if current min rate is zero - set it to 1.
2336 This is a requirement of the algorithm. */
2337 if (bp->vn_weight_sum && (vn_min_rate == 0))
2338 vn_min_rate = DEF_MIN_RATE;
2339
2340 if (IS_MF_SI(bp))
2341 /* maxCfg in percents of linkspeed */ 2240 /* maxCfg in percents of linkspeed */
2342 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; 2241 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2343 else 2242 } else /* SD modes */
2344 /* maxCfg is absolute in 100Mb units */ 2243 /* maxCfg is absolute in 100Mb units */
2345 vn_max_rate = maxCfg * 100; 2244 vn_max_rate = maxCfg * 100;
2346 } 2245 }
2347 2246
2348 DP(NETIF_MSG_IFUP, 2247 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2349 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n", 2248
2350 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); 2249 input->vnic_max_rate[vn] = vn_max_rate;
2351
2352 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2353 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2354
2355 /* global vn counter - maximal Mbps for this vn */
2356 m_rs_vn.vn_counter.rate = vn_max_rate;
2357
2358 /* quota - number of bytes transmitted in this period */
2359 m_rs_vn.vn_counter.quota =
2360 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2361
2362 if (bp->vn_weight_sum) {
2363 /* credit for each period of the fairness algorithm:
2364 number of bytes in T_FAIR (the vn share the port rate).
2365 vn_weight_sum should not be larger than 10000, thus
2366 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2367 than zero */
2368 m_fair_vn.vn_credit_delta =
2369 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2370 (8 * bp->vn_weight_sum))),
2371 (bp->cmng.fair_vars.fair_threshold +
2372 MIN_ABOVE_THRESH));
2373 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2374 m_fair_vn.vn_credit_delta);
2375 }
2376
2377 /* Store it to internal memory */
2378 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2379 REG_WR(bp, BAR_XSTRORM_INTMEM +
2380 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2381 ((u32 *)(&m_rs_vn))[i]);
2382
2383 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2384 REG_WR(bp, BAR_XSTRORM_INTMEM +
2385 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2386 ((u32 *)(&m_fair_vn))[i]);
2387} 2250}
2388 2251
2252
2389static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) 2253static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2390{ 2254{
2391 if (CHIP_REV_IS_SLOW(bp)) 2255 if (CHIP_REV_IS_SLOW(bp))
@@ -2423,38 +2287,42 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
2423 bp->mf_config[vn] = 2287 bp->mf_config[vn] =
2424 MF_CFG_RD(bp, func_mf_config[func].config); 2288 MF_CFG_RD(bp, func_mf_config[func].config);
2425 } 2289 }
2290 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2291 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2292 bp->flags |= MF_FUNC_DIS;
2293 } else {
2294 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2295 bp->flags &= ~MF_FUNC_DIS;
2296 }
2426} 2297}
2427 2298
2428static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) 2299static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2429{ 2300{
2301 struct cmng_init_input input;
2302 memset(&input, 0, sizeof(struct cmng_init_input));
2303
2304 input.port_rate = bp->link_vars.line_speed;
2430 2305
2431 if (cmng_type == CMNG_FNS_MINMAX) { 2306 if (cmng_type == CMNG_FNS_MINMAX) {
2432 int vn; 2307 int vn;
2433 2308
2434 /* clear cmng_enables */
2435 bp->cmng.flags.cmng_enables = 0;
2436
2437 /* read mf conf from shmem */ 2309 /* read mf conf from shmem */
2438 if (read_cfg) 2310 if (read_cfg)
2439 bnx2x_read_mf_cfg(bp); 2311 bnx2x_read_mf_cfg(bp);
2440 2312
2441 /* Init rate shaping and fairness contexts */
2442 bnx2x_init_port_minmax(bp);
2443
2444 /* vn_weight_sum and enable fairness if not 0 */ 2313 /* vn_weight_sum and enable fairness if not 0 */
2445 bnx2x_calc_vn_weight_sum(bp); 2314 bnx2x_calc_vn_min(bp, &input);
2446 2315
2447 /* calculate and set min-max rate for each vn */ 2316 /* calculate and set min-max rate for each vn */
2448 if (bp->port.pmf) 2317 if (bp->port.pmf)
2449 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) 2318 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2450 bnx2x_init_vn_minmax(bp, vn); 2319 bnx2x_calc_vn_max(bp, vn, &input);
2451 2320
2452 /* always enable rate shaping and fairness */ 2321 /* always enable rate shaping and fairness */
2453 bp->cmng.flags.cmng_enables |= 2322 input.flags.cmng_enables |=
2454 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 2323 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2455 if (!bp->vn_weight_sum) 2324
2456 DP(NETIF_MSG_IFUP, "All MIN values are zeroes" 2325 bnx2x_init_cmng(&input, &bp->cmng);
2457 " fairness will be disabled\n");
2458 return; 2326 return;
2459 } 2327 }
2460 2328
@@ -2535,6 +2403,190 @@ void bnx2x__link_status_update(struct bnx2x *bp)
2535 bnx2x_link_report(bp); 2403 bnx2x_link_report(bp);
2536} 2404}
2537 2405
2406static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2407 u16 vlan_val, u8 allowed_prio)
2408{
2409 struct bnx2x_func_state_params func_params = {0};
2410 struct bnx2x_func_afex_update_params *f_update_params =
2411 &func_params.params.afex_update;
2412
2413 func_params.f_obj = &bp->func_obj;
2414 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2415
2416 /* no need to wait for RAMROD completion, so don't
2417 * set RAMROD_COMP_WAIT flag
2418 */
2419
2420 f_update_params->vif_id = vifid;
2421 f_update_params->afex_default_vlan = vlan_val;
2422 f_update_params->allowed_priorities = allowed_prio;
2423
2424 /* if ramrod can not be sent, response to MCP immediately */
2425 if (bnx2x_func_state_change(bp, &func_params) < 0)
2426 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2427
2428 return 0;
2429}
2430
2431static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2432 u16 vif_index, u8 func_bit_map)
2433{
2434 struct bnx2x_func_state_params func_params = {0};
2435 struct bnx2x_func_afex_viflists_params *update_params =
2436 &func_params.params.afex_viflists;
2437 int rc;
2438 u32 drv_msg_code;
2439
2440 /* validate only LIST_SET and LIST_GET are received from switch */
2441 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2442 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2443 cmd_type);
2444
2445 func_params.f_obj = &bp->func_obj;
2446 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2447
2448 /* set parameters according to cmd_type */
2449 update_params->afex_vif_list_command = cmd_type;
2450 update_params->vif_list_index = cpu_to_le16(vif_index);
2451 update_params->func_bit_map =
2452 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2453 update_params->func_to_clear = 0;
2454 drv_msg_code =
2455 (cmd_type == VIF_LIST_RULE_GET) ?
2456 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2457 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2458
2459 /* if ramrod can not be sent, respond to MCP immediately for
2460 * SET and GET requests (other are not triggered from MCP)
2461 */
2462 rc = bnx2x_func_state_change(bp, &func_params);
2463 if (rc < 0)
2464 bnx2x_fw_command(bp, drv_msg_code, 0);
2465
2466 return 0;
2467}
2468
2469static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2470{
2471 struct afex_stats afex_stats;
2472 u32 func = BP_ABS_FUNC(bp);
2473 u32 mf_config;
2474 u16 vlan_val;
2475 u32 vlan_prio;
2476 u16 vif_id;
2477 u8 allowed_prio;
2478 u8 vlan_mode;
2479 u32 addr_to_write, vifid, addrs, stats_type, i;
2480
2481 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2482 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2483 DP(BNX2X_MSG_MCP,
2484 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2485 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2486 }
2487
2488 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2489 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2490 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2491 DP(BNX2X_MSG_MCP,
2492 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2493 vifid, addrs);
2494 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2495 addrs);
2496 }
2497
2498 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2499 addr_to_write = SHMEM2_RD(bp,
2500 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2501 stats_type = SHMEM2_RD(bp,
2502 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2503
2504 DP(BNX2X_MSG_MCP,
2505 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2506 addr_to_write);
2507
2508 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2509
2510 /* write response to scratchpad, for MCP */
2511 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2512 REG_WR(bp, addr_to_write + i*sizeof(u32),
2513 *(((u32 *)(&afex_stats))+i));
2514
2515 /* send ack message to MCP */
2516 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2517 }
2518
2519 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2520 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2521 bp->mf_config[BP_VN(bp)] = mf_config;
2522 DP(BNX2X_MSG_MCP,
2523 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2524 mf_config);
2525
2526 /* if VIF_SET is "enabled" */
2527 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2528 /* set rate limit directly to internal RAM */
2529 struct cmng_init_input cmng_input;
2530 struct rate_shaping_vars_per_vn m_rs_vn;
2531 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2532 u32 addr = BAR_XSTRORM_INTMEM +
2533 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2534
2535 bp->mf_config[BP_VN(bp)] = mf_config;
2536
2537 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2538 m_rs_vn.vn_counter.rate =
2539 cmng_input.vnic_max_rate[BP_VN(bp)];
2540 m_rs_vn.vn_counter.quota =
2541 (m_rs_vn.vn_counter.rate *
2542 RS_PERIODIC_TIMEOUT_USEC) / 8;
2543
2544 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2545
2546 /* read relevant values from mf_cfg struct in shmem */
2547 vif_id =
2548 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2549 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2550 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2551 vlan_val =
2552 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2553 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2554 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2555 vlan_prio = (mf_config &
2556 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2557 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2558 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2559 vlan_mode =
2560 (MF_CFG_RD(bp,
2561 func_mf_config[func].afex_config) &
2562 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2563 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2564 allowed_prio =
2565 (MF_CFG_RD(bp,
2566 func_mf_config[func].afex_config) &
2567 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2568 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2569
2570 /* send ramrod to FW, return in case of failure */
2571 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2572 allowed_prio))
2573 return;
2574
2575 bp->afex_def_vlan_tag = vlan_val;
2576 bp->afex_vlan_mode = vlan_mode;
2577 } else {
2578 /* notify link down because BP->flags is disabled */
2579 bnx2x_link_report(bp);
2580
2581 /* send INVALID VIF ramrod to FW */
2582 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2583
2584 /* Reset the default afex VLAN */
2585 bp->afex_def_vlan_tag = -1;
2586 }
2587 }
2588}
2589
2538static void bnx2x_pmf_update(struct bnx2x *bp) 2590static void bnx2x_pmf_update(struct bnx2x *bp)
2539{ 2591{
2540 int port = BP_PORT(bp); 2592 int port = BP_PORT(bp);
@@ -2680,8 +2732,11 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2680 if (IS_MF_SD(bp)) 2732 if (IS_MF_SD(bp))
2681 __set_bit(BNX2X_Q_FLG_OV, &flags); 2733 __set_bit(BNX2X_Q_FLG_OV, &flags);
2682 2734
2683 if (IS_FCOE_FP(fp)) 2735 if (IS_FCOE_FP(fp)) {
2684 __set_bit(BNX2X_Q_FLG_FCOE, &flags); 2736 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
2737 /* For FCoE - force usage of default priority (for afex) */
2738 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
2739 }
2685 2740
2686 if (!fp->disable_tpa) { 2741 if (!fp->disable_tpa) {
2687 __set_bit(BNX2X_Q_FLG_TPA, &flags); 2742 __set_bit(BNX2X_Q_FLG_TPA, &flags);
@@ -2698,6 +2753,10 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2698 /* Always set HW VLAN stripping */ 2753 /* Always set HW VLAN stripping */
2699 __set_bit(BNX2X_Q_FLG_VLAN, &flags); 2754 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
2700 2755
2756 /* configure silent vlan removal */
2757 if (IS_MF_AFEX(bp))
2758 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
2759
2701 2760
2702 return flags | bnx2x_get_common_flags(bp, fp, true); 2761 return flags | bnx2x_get_common_flags(bp, fp, true);
2703} 2762}
@@ -2800,6 +2859,13 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2800 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; 2859 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2801 else 2860 else
2802 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 2861 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
2862 /* configure silent vlan removal
2863 * if multi function mode is afex, then mask default vlan
2864 */
2865 if (IS_MF_AFEX(bp)) {
2866 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
2867 rxq_init->silent_removal_mask = VLAN_VID_MASK;
2868 }
2803} 2869}
2804 2870
2805static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, 2871static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
@@ -3606,6 +3672,7 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3606 int func = BP_FUNC(bp); 3672 int func = BP_FUNC(bp);
3607 3673
3608 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 3674 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3675 bnx2x_read_mf_cfg(bp);
3609 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, 3676 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3610 func_mf_config[BP_ABS_FUNC(bp)].config); 3677 func_mf_config[BP_ABS_FUNC(bp)].config);
3611 val = SHMEM_RD(bp, 3678 val = SHMEM_RD(bp,
@@ -3628,6 +3695,9 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3628 /* start dcbx state machine */ 3695 /* start dcbx state machine */
3629 bnx2x_dcbx_set_params(bp, 3696 bnx2x_dcbx_set_params(bp,
3630 BNX2X_DCBX_STATE_NEG_RECEIVED); 3697 BNX2X_DCBX_STATE_NEG_RECEIVED);
3698 if (val & DRV_STATUS_AFEX_EVENT_MASK)
3699 bnx2x_handle_afex_cmd(bp,
3700 val & DRV_STATUS_AFEX_EVENT_MASK);
3631 if (bp->link_vars.periodic_flags & 3701 if (bp->link_vars.periodic_flags &
3632 PERIODIC_FLAGS_LINK_EVENT) { 3702 PERIODIC_FLAGS_LINK_EVENT) {
3633 /* sync with link */ 3703 /* sync with link */
@@ -4555,6 +4625,93 @@ static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4555 netif_addr_unlock_bh(bp->dev); 4625 netif_addr_unlock_bh(bp->dev);
4556} 4626}
4557 4627
4628static inline void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
4629 union event_ring_elem *elem)
4630{
4631 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
4632 DP(BNX2X_MSG_SP,
4633 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
4634 elem->message.data.vif_list_event.func_bit_map);
4635 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
4636 elem->message.data.vif_list_event.func_bit_map);
4637 } else if (elem->message.data.vif_list_event.echo ==
4638 VIF_LIST_RULE_SET) {
4639 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
4640 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
4641 }
4642}
4643
4644/* called with rtnl_lock */
4645static inline void bnx2x_after_function_update(struct bnx2x *bp)
4646{
4647 int q, rc;
4648 struct bnx2x_fastpath *fp;
4649 struct bnx2x_queue_state_params queue_params = {NULL};
4650 struct bnx2x_queue_update_params *q_update_params =
4651 &queue_params.params.update;
4652
4653 /* Send Q update command with afex vlan removal values for all Qs */
4654 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
4655
4656 /* set silent vlan removal values according to vlan mode */
4657 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4658 &q_update_params->update_flags);
4659 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
4660 &q_update_params->update_flags);
4661 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
4662
4663 /* in access mode mark mask and value are 0 to strip all vlans */
4664 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
4665 q_update_params->silent_removal_value = 0;
4666 q_update_params->silent_removal_mask = 0;
4667 } else {
4668 q_update_params->silent_removal_value =
4669 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
4670 q_update_params->silent_removal_mask = VLAN_VID_MASK;
4671 }
4672
4673 for_each_eth_queue(bp, q) {
4674 /* Set the appropriate Queue object */
4675 fp = &bp->fp[q];
4676 queue_params.q_obj = &fp->q_obj;
4677
4678 /* send the ramrod */
4679 rc = bnx2x_queue_state_change(bp, &queue_params);
4680 if (rc < 0)
4681 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
4682 q);
4683 }
4684
4685#ifdef BCM_CNIC
4686 if (!NO_FCOE(bp)) {
4687 fp = &bp->fp[FCOE_IDX];
4688 queue_params.q_obj = &fp->q_obj;
4689
4690 /* clear pending completion bit */
4691 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
4692
4693 /* mark latest Q bit */
4694 smp_mb__before_clear_bit();
4695 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
4696 smp_mb__after_clear_bit();
4697
4698 /* send Q update ramrod for FCoE Q */
4699 rc = bnx2x_queue_state_change(bp, &queue_params);
4700 if (rc < 0)
4701 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
4702 q);
4703 } else {
4704 /* If no FCoE ring - ACK MCP now */
4705 bnx2x_link_report(bp);
4706 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
4707 }
4708#else
4709 /* If no FCoE ring - ACK MCP now */
4710 bnx2x_link_report(bp);
4711 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
4712#endif /* BCM_CNIC */
4713}
4714
4558static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( 4715static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
4559 struct bnx2x *bp, u32 cid) 4716 struct bnx2x *bp, u32 cid)
4560{ 4717{
@@ -4653,6 +4810,28 @@ static void bnx2x_eq_int(struct bnx2x *bp)
4653 break; 4810 break;
4654 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); 4811 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
4655 goto next_spqe; 4812 goto next_spqe;
4813 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
4814 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
4815 "AFEX: ramrod completed FUNCTION_UPDATE\n");
4816 f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE);
4817
4818 /* We will perform the Queues update from sp_rtnl task
4819 * as all Queue SP operations should run under
4820 * rtnl_lock.
4821 */
4822 smp_mb__before_clear_bit();
4823 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
4824 &bp->sp_rtnl_state);
4825 smp_mb__after_clear_bit();
4826
4827 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4828 goto next_spqe;
4829
4830 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
4831 f_obj->complete_cmd(bp, f_obj,
4832 BNX2X_F_CMD_AFEX_VIFLISTS);
4833 bnx2x_after_afex_vif_lists(bp, elem);
4834 goto next_spqe;
4656 case EVENT_RING_OPCODE_FUNCTION_START: 4835 case EVENT_RING_OPCODE_FUNCTION_START:
4657 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 4836 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4658 "got FUNC_START ramrod\n"); 4837 "got FUNC_START ramrod\n");
@@ -4784,6 +4963,13 @@ static void bnx2x_sp_task(struct work_struct *work)
4784 4963
4785 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, 4964 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
4786 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); 4965 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
4966
4967 /* afex - poll to check if VIFSET_ACK should be sent to MFW */
4968 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
4969 &bp->sp_state)) {
4970 bnx2x_link_report(bp);
4971 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
4972 }
4787} 4973}
4788 4974
4789irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 4975irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -6255,12 +6441,24 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6255 if (!CHIP_IS_E1(bp)) 6441 if (!CHIP_IS_E1(bp))
6256 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); 6442 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
6257 6443
6258 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) 6444 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
6259 /* Bit-map indicating which L2 hdrs may appear 6445 if (IS_MF_AFEX(bp)) {
6260 * after the basic Ethernet header 6446 /* configure that VNTag and VLAN headers must be
6261 */ 6447 * received in afex mode
6262 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 6448 */
6263 bp->path_has_ovlan ? 7 : 6); 6449 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
6450 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
6451 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
6452 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
6453 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
6454 } else {
6455 /* Bit-map indicating which L2 hdrs may appear
6456 * after the basic Ethernet header
6457 */
6458 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
6459 bp->path_has_ovlan ? 7 : 6);
6460 }
6461 }
6264 6462
6265 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); 6463 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
6266 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); 6464 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
@@ -6294,9 +6492,21 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6294 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); 6492 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
6295 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); 6493 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
6296 6494
6297 if (!CHIP_IS_E1x(bp)) 6495 if (!CHIP_IS_E1x(bp)) {
6298 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 6496 if (IS_MF_AFEX(bp)) {
6299 bp->path_has_ovlan ? 7 : 6); 6497 /* configure that VNTag and VLAN headers must be
6498 * sent in afex mode
6499 */
6500 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
6501 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
6502 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
6503 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
6504 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
6505 } else {
6506 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
6507 bp->path_has_ovlan ? 7 : 6);
6508 }
6509 }
6300 6510
6301 REG_WR(bp, SRC_REG_SOFT_RST, 1); 6511 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6302 6512
@@ -6514,15 +6724,29 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6514 6724
6515 6725
6516 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 6726 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
6517 if (CHIP_IS_E3B0(bp)) 6727 if (CHIP_IS_E3B0(bp)) {
6518 /* Ovlan exists only if we are in multi-function + 6728 if (IS_MF_AFEX(bp)) {
6519 * switch-dependent mode, in switch-independent there 6729 /* configure headers for AFEX mode */
6520 * is no ovlan headers 6730 REG_WR(bp, BP_PORT(bp) ?
6521 */ 6731 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
6522 REG_WR(bp, BP_PORT(bp) ? 6732 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
6523 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 6733 REG_WR(bp, BP_PORT(bp) ?
6524 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 6734 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
6525 (bp->path_has_ovlan ? 7 : 6)); 6735 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
6736 REG_WR(bp, BP_PORT(bp) ?
6737 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
6738 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
6739 } else {
6740 /* Ovlan exists only if we are in multi-function +
6741 * switch-dependent mode, in switch-independent there
6742 * is no ovlan headers
6743 */
6744 REG_WR(bp, BP_PORT(bp) ?
6745 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
6746 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
6747 (bp->path_has_ovlan ? 7 : 6));
6748 }
6749 }
6526 6750
6527 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 6751 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
6528 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 6752 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
@@ -6584,10 +6808,15 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6584 /* Bit-map indicating which L2 hdrs may appear after the 6808 /* Bit-map indicating which L2 hdrs may appear after the
6585 * basic Ethernet header 6809 * basic Ethernet header
6586 */ 6810 */
6587 REG_WR(bp, BP_PORT(bp) ? 6811 if (IS_MF_AFEX(bp))
6588 NIG_REG_P1_HDRS_AFTER_BASIC : 6812 REG_WR(bp, BP_PORT(bp) ?
6589 NIG_REG_P0_HDRS_AFTER_BASIC, 6813 NIG_REG_P1_HDRS_AFTER_BASIC :
6590 IS_MF_SD(bp) ? 7 : 6); 6814 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
6815 else
6816 REG_WR(bp, BP_PORT(bp) ?
6817 NIG_REG_P1_HDRS_AFTER_BASIC :
6818 NIG_REG_P0_HDRS_AFTER_BASIC,
6819 IS_MF_SD(bp) ? 7 : 6);
6591 6820
6592 if (CHIP_IS_E3(bp)) 6821 if (CHIP_IS_E3(bp))
6593 REG_WR(bp, BP_PORT(bp) ? 6822 REG_WR(bp, BP_PORT(bp) ?
@@ -6609,6 +6838,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6609 val = 1; 6838 val = 1;
6610 break; 6839 break;
6611 case MULTI_FUNCTION_SI: 6840 case MULTI_FUNCTION_SI:
6841 case MULTI_FUNCTION_AFEX:
6612 val = 2; 6842 val = 2;
6613 break; 6843 break;
6614 } 6844 }
@@ -6640,13 +6870,16 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6640static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) 6870static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6641{ 6871{
6642 int reg; 6872 int reg;
6873 u32 wb_write[2];
6643 6874
6644 if (CHIP_IS_E1(bp)) 6875 if (CHIP_IS_E1(bp))
6645 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 6876 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6646 else 6877 else
6647 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 6878 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6648 6879
6649 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); 6880 wb_write[0] = ONCHIP_ADDR1(addr);
6881 wb_write[1] = ONCHIP_ADDR2(addr);
6882 REG_WR_DMAE(bp, reg, wb_write, 2);
6650} 6883}
6651 6884
6652static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) 6885static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
@@ -7192,7 +7425,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
7192 unsigned long ramrod_flags = 0; 7425 unsigned long ramrod_flags = 0;
7193 7426
7194#ifdef BCM_CNIC 7427#ifdef BCM_CNIC
7195 if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_STORAGE_SD(bp)) { 7428 if (is_zero_ether_addr(bp->dev->dev_addr) &&
7429 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
7196 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, 7430 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
7197 "Ignoring Zero MAC for STORAGE SD mode\n"); 7431 "Ignoring Zero MAC for STORAGE SD mode\n");
7198 return 0; 7432 return 0;
@@ -7230,7 +7464,7 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
7230 BNX2X_DEV_INFO("set number of queues to 1\n"); 7464 BNX2X_DEV_INFO("set number of queues to 1\n");
7231 break; 7465 break;
7232 default: 7466 default:
7233 /* Set number of queues according to bp->multi_mode value */ 7467 /* Set number of queues for MSI-X mode */
7234 bnx2x_set_num_queues(bp); 7468 bnx2x_set_num_queues(bp);
7235 7469
7236 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); 7470 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
@@ -7239,15 +7473,17 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
7239 * so try to enable MSI-X with the requested number of fp's 7473 * so try to enable MSI-X with the requested number of fp's
7240 * and fallback to MSI or legacy INTx with one fp 7474 * and fallback to MSI or legacy INTx with one fp
7241 */ 7475 */
7242 if (bnx2x_enable_msix(bp)) { 7476 if (bnx2x_enable_msix(bp) ||
7243 /* failed to enable MSI-X */ 7477 bp->flags & USING_SINGLE_MSIX_FLAG) {
7244 BNX2X_DEV_INFO("Failed to enable MSI-X (%d), set number of queues to %d\n", 7478 /* failed to enable multiple MSI-X */
7479 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
7245 bp->num_queues, 1 + NON_ETH_CONTEXT_USE); 7480 bp->num_queues, 1 + NON_ETH_CONTEXT_USE);
7246 7481
7247 bp->num_queues = 1 + NON_ETH_CONTEXT_USE; 7482 bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
7248 7483
7249 /* Try to enable MSI */ 7484 /* Try to enable MSI */
7250 if (!(bp->flags & DISABLE_MSI_FLAG)) 7485 if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
7486 !(bp->flags & DISABLE_MSI_FLAG))
7251 bnx2x_enable_msi(bp); 7487 bnx2x_enable_msi(bp);
7252 } 7488 }
7253 break; 7489 break;
@@ -8727,7 +8963,8 @@ sp_rtnl_not_reset:
8727#endif 8963#endif
8728 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) 8964 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
8729 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); 8965 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
8730 8966 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
8967 bnx2x_after_function_update(bp);
8731 /* 8968 /*
8732 * in case of fan failure we need to reset id if the "stop on error" 8969 * in case of fan failure we need to reset id if the "stop on error"
8733 * debug flag is set, since we trying to prevent permanent overheating 8970 * debug flag is set, since we trying to prevent permanent overheating
@@ -9201,6 +9438,17 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9201 id |= (val & 0xf); 9438 id |= (val & 0xf);
9202 bp->common.chip_id = id; 9439 bp->common.chip_id = id;
9203 9440
9441 /* force 57811 according to MISC register */
9442 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
9443 if (CHIP_IS_57810(bp))
9444 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
9445 (bp->common.chip_id & 0x0000FFFF);
9446 else if (CHIP_IS_57810_MF(bp))
9447 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
9448 (bp->common.chip_id & 0x0000FFFF);
9449 bp->common.chip_id |= 0x1;
9450 }
9451
9204 /* Set doorbell size */ 9452 /* Set doorbell size */
9205 bp->db_size = (1 << BNX2X_DB_SHIFT); 9453 bp->db_size = (1 << BNX2X_DB_SHIFT);
9206 9454
@@ -9293,7 +9541,9 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9293 bp->link_params.feature_config_flags |= 9541 bp->link_params.feature_config_flags |=
9294 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? 9542 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
9295 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; 9543 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
9296 9544 bp->link_params.feature_config_flags |=
9545 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
9546 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
9297 bp->link_params.feature_config_flags |= 9547 bp->link_params.feature_config_flags |=
9298 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? 9548 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
9299 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; 9549 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
@@ -9925,6 +10175,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
9925 10175
9926 } else 10176 } else
9927 bp->flags |= NO_FCOE_FLAG; 10177 bp->flags |= NO_FCOE_FLAG;
10178
10179 bp->mf_ext_config = cfg;
10180
9928 } else { /* SD MODE */ 10181 } else { /* SD MODE */
9929 if (IS_MF_STORAGE_SD(bp)) { 10182 if (IS_MF_STORAGE_SD(bp)) {
9930 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { 10183 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
@@ -9946,6 +10199,11 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
9946 memset(bp->dev->dev_addr, 0, ETH_ALEN); 10199 memset(bp->dev->dev_addr, 0, ETH_ALEN);
9947 } 10200 }
9948 } 10201 }
10202
10203 if (IS_MF_FCOE_AFEX(bp))
10204 /* use FIP MAC as primary MAC */
10205 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
10206
9949#endif 10207#endif
9950 } else { 10208 } else {
9951 /* in SF read MACs from port configuration */ 10209 /* in SF read MACs from port configuration */
@@ -10118,6 +10376,19 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
10118 } else 10376 } else
10119 BNX2X_DEV_INFO("illegal MAC address for SI\n"); 10377 BNX2X_DEV_INFO("illegal MAC address for SI\n");
10120 break; 10378 break;
10379 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
10380 if ((!CHIP_IS_E1x(bp)) &&
10381 (MF_CFG_RD(bp, func_mf_config[func].
10382 mac_upper) != 0xffff) &&
10383 (SHMEM2_HAS(bp,
10384 afex_driver_support))) {
10385 bp->mf_mode = MULTI_FUNCTION_AFEX;
10386 bp->mf_config[vn] = MF_CFG_RD(bp,
10387 func_mf_config[func].config);
10388 } else {
10389 BNX2X_DEV_INFO("can not configure afex mode\n");
10390 }
10391 break;
10121 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 10392 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
10122 /* get OV configuration */ 10393 /* get OV configuration */
10123 val = MF_CFG_RD(bp, 10394 val = MF_CFG_RD(bp,
@@ -10158,6 +10429,9 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
10158 return -EPERM; 10429 return -EPERM;
10159 } 10430 }
10160 break; 10431 break;
10432 case MULTI_FUNCTION_AFEX:
10433 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
10434 break;
10161 case MULTI_FUNCTION_SI: 10435 case MULTI_FUNCTION_SI:
10162 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", 10436 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
10163 func); 10437 func);
@@ -10325,6 +10599,9 @@ static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
10325 case MULTI_FUNCTION_SI: 10599 case MULTI_FUNCTION_SI:
10326 SET_FLAGS(flags, MODE_MF_SI); 10600 SET_FLAGS(flags, MODE_MF_SI);
10327 break; 10601 break;
10602 case MULTI_FUNCTION_AFEX:
10603 SET_FLAGS(flags, MODE_MF_AFEX);
10604 break;
10328 } 10605 }
10329 } else 10606 } else
10330 SET_FLAGS(flags, MODE_SF); 10607 SET_FLAGS(flags, MODE_SF);
@@ -10384,12 +10661,10 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10384 if (BP_NOMCP(bp) && (func == 0)) 10661 if (BP_NOMCP(bp) && (func == 0))
10385 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); 10662 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
10386 10663
10387 bp->multi_mode = multi_mode;
10388
10389 bp->disable_tpa = disable_tpa; 10664 bp->disable_tpa = disable_tpa;
10390 10665
10391#ifdef BCM_CNIC 10666#ifdef BCM_CNIC
10392 bp->disable_tpa |= IS_MF_STORAGE_SD(bp); 10667 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
10393#endif 10668#endif
10394 10669
10395 /* Set TPA flags */ 10670 /* Set TPA flags */
@@ -10408,7 +10683,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10408 10683
10409 bp->mrrs = mrrs; 10684 bp->mrrs = mrrs;
10410 10685
10411 bp->tx_ring_size = MAX_TX_AVAIL; 10686 bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
10412 10687
10413 /* make sure that the numbers are in the right granularity */ 10688 /* make sure that the numbers are in the right granularity */
10414 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; 10689 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
@@ -10439,8 +10714,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10439 if (CHIP_IS_E3B0(bp)) 10714 if (CHIP_IS_E3B0(bp))
10440 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; 10715 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
10441 10716
10442 bp->gro_check = bnx2x_need_gro_check(bp->dev->mtu);
10443
10444 return rc; 10717 return rc;
10445} 10718}
10446 10719
@@ -11244,6 +11517,8 @@ void bnx2x__init_func_obj(struct bnx2x *bp)
11244 bnx2x_init_func_obj(bp, &bp->func_obj, 11517 bnx2x_init_func_obj(bp, &bp->func_obj,
11245 bnx2x_sp(bp, func_rdata), 11518 bnx2x_sp(bp, func_rdata),
11246 bnx2x_sp_mapping(bp, func_rdata), 11519 bnx2x_sp_mapping(bp, func_rdata),
11520 bnx2x_sp(bp, func_afex_rdata),
11521 bnx2x_sp_mapping(bp, func_afex_rdata),
11247 &bnx2x_func_sp_drv); 11522 &bnx2x_func_sp_drv);
11248} 11523}
11249 11524
@@ -11325,6 +11600,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11325 case BCM57810_MF: 11600 case BCM57810_MF:
11326 case BCM57840: 11601 case BCM57840:
11327 case BCM57840_MF: 11602 case BCM57840_MF:
11603 case BCM57811:
11604 case BCM57811_MF:
11328 max_cos_est = BNX2X_MULTI_TX_COS_E3B0; 11605 max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
11329 break; 11606 break;
11330 11607
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index c25803b9c0ca..bbd387492a80 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1483,6 +1483,11 @@
1483 starts at 0x0 for the A0 tape-out and increments by one for each 1483 starts at 0x0 for the A0 tape-out and increments by one for each
1484 all-layer tape-out. */ 1484 all-layer tape-out. */
1485#define MISC_REG_CHIP_REV 0xa40c 1485#define MISC_REG_CHIP_REV 0xa40c
1486/* [R 14] otp_misc_do[100:0] spare bits collection: 13:11-
1487 * otp_misc_do[100:98]; 10:7 - otp_misc_do[87:84]; 6:3 - otp_misc_do[75:72];
1488 * 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */
1489#define MISC_REG_CHIP_TYPE 0xac60
1490#define MISC_REG_CHIP_TYPE_57811_MASK (1<<1)
1486/* [RW 32] The following driver registers(1...16) represent 16 drivers and 1491/* [RW 32] The following driver registers(1...16) represent 16 drivers and
1487 32 clients. Each client can be controlled by one driver only. One in each 1492 32 clients. Each client can be controlled by one driver only. One in each
1488 bit represent that this driver control the appropriate client (Ex: bit 5 1493 bit represent that this driver control the appropriate client (Ex: bit 5
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 513573321625..6c14b4a4e82c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -633,14 +633,17 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
633} 633}
634 634
635 635
636static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp, 636void bnx2x_set_mac_in_nig(struct bnx2x *bp,
637 bool add, unsigned char *dev_addr, int index) 637 bool add, unsigned char *dev_addr, int index)
638{ 638{
639 u32 wb_data[2]; 639 u32 wb_data[2];
640 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : 640 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
641 NIG_REG_LLH0_FUNC_MEM; 641 NIG_REG_LLH0_FUNC_MEM;
642 642
643 if (!IS_MF_SI(bp) || index > BNX2X_LLH_CAM_MAX_PF_LINE) 643 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
644 return;
645
646 if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
644 return; 647 return;
645 648
646 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n", 649 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
@@ -4090,12 +4093,6 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4090 rss_mode = ETH_RSS_MODE_DISABLED; 4093 rss_mode = ETH_RSS_MODE_DISABLED;
4091 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags)) 4094 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4092 rss_mode = ETH_RSS_MODE_REGULAR; 4095 rss_mode = ETH_RSS_MODE_REGULAR;
4093 else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
4094 rss_mode = ETH_RSS_MODE_VLAN_PRI;
4095 else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
4096 rss_mode = ETH_RSS_MODE_E1HOV_PRI;
4097 else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
4098 rss_mode = ETH_RSS_MODE_IP_DSCP;
4099 4096
4100 data->rss_mode = rss_mode; 4097 data->rss_mode = rss_mode;
4101 4098
@@ -4404,6 +4401,9 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4404 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags); 4401 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4405 tx_data->anti_spoofing_flg = 4402 tx_data->anti_spoofing_flg =
4406 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); 4403 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4404 tx_data->force_default_pri_flg =
4405 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4406
4407 tx_data->tx_status_block_id = params->fw_sb_id; 4407 tx_data->tx_status_block_id = params->fw_sb_id;
4408 tx_data->tx_sb_index_number = params->sb_cq_index; 4408 tx_data->tx_sb_index_number = params->sb_cq_index;
4409 tx_data->tss_leading_client_id = params->tss_leading_cl_id; 4409 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
@@ -5331,6 +5331,17 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
5331 case BNX2X_F_STATE_STARTED: 5331 case BNX2X_F_STATE_STARTED:
5332 if (cmd == BNX2X_F_CMD_STOP) 5332 if (cmd == BNX2X_F_CMD_STOP)
5333 next_state = BNX2X_F_STATE_INITIALIZED; 5333 next_state = BNX2X_F_STATE_INITIALIZED;
5334 /* afex ramrods can be sent only in started mode, and only
5335 * if not pending for function_stop ramrod completion
5336 * for these events - next state remained STARTED.
5337 */
5338 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5339 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5340 next_state = BNX2X_F_STATE_STARTED;
5341
5342 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5343 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5344 next_state = BNX2X_F_STATE_STARTED;
5334 else if (cmd == BNX2X_F_CMD_TX_STOP) 5345 else if (cmd == BNX2X_F_CMD_TX_STOP)
5335 next_state = BNX2X_F_STATE_TX_STOPPED; 5346 next_state = BNX2X_F_STATE_TX_STOPPED;
5336 5347
@@ -5618,6 +5629,83 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
5618 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5629 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5619} 5630}
5620 5631
5632static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5633 struct bnx2x_func_state_params *params)
5634{
5635 struct bnx2x_func_sp_obj *o = params->f_obj;
5636 struct function_update_data *rdata =
5637 (struct function_update_data *)o->afex_rdata;
5638 dma_addr_t data_mapping = o->afex_rdata_mapping;
5639 struct bnx2x_func_afex_update_params *afex_update_params =
5640 &params->params.afex_update;
5641
5642 memset(rdata, 0, sizeof(*rdata));
5643
5644 /* Fill the ramrod data with provided parameters */
5645 rdata->vif_id_change_flg = 1;
5646 rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5647 rdata->afex_default_vlan_change_flg = 1;
5648 rdata->afex_default_vlan =
5649 cpu_to_le16(afex_update_params->afex_default_vlan);
5650 rdata->allowed_priorities_change_flg = 1;
5651 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5652
5653 /* No need for an explicit memory barrier here as long we would
5654 * need to ensure the ordering of writing to the SPQ element
5655 * and updating of the SPQ producer which involves a memory
5656 * read and we will have to put a full memory barrier there
5657 * (inside bnx2x_sp_post()).
5658 */
5659 DP(BNX2X_MSG_SP,
5660 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5661 rdata->vif_id,
5662 rdata->afex_default_vlan, rdata->allowed_priorities);
5663
5664 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5665 U64_HI(data_mapping),
5666 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5667}
5668
5669static
5670inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5671 struct bnx2x_func_state_params *params)
5672{
5673 struct bnx2x_func_sp_obj *o = params->f_obj;
5674 struct afex_vif_list_ramrod_data *rdata =
5675 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5676 struct bnx2x_func_afex_viflists_params *afex_viflist_params =
5677 &params->params.afex_viflists;
5678 u64 *p_rdata = (u64 *)rdata;
5679
5680 memset(rdata, 0, sizeof(*rdata));
5681
5682 /* Fill the ramrod data with provided parameters */
5683 rdata->vif_list_index = afex_viflist_params->vif_list_index;
5684 rdata->func_bit_map = afex_viflist_params->func_bit_map;
5685 rdata->afex_vif_list_command =
5686 afex_viflist_params->afex_vif_list_command;
5687 rdata->func_to_clear = afex_viflist_params->func_to_clear;
5688
5689 /* send in echo type of sub command */
5690 rdata->echo = afex_viflist_params->afex_vif_list_command;
5691
5692 /* No need for an explicit memory barrier here as long we would
5693 * need to ensure the ordering of writing to the SPQ element
5694 * and updating of the SPQ producer which involves a memory
5695 * read and we will have to put a full memory barrier there
5696 * (inside bnx2x_sp_post()).
5697 */
5698
5699 DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5700 rdata->afex_vif_list_command, rdata->vif_list_index,
5701 rdata->func_bit_map, rdata->func_to_clear);
5702
5703 /* this ramrod sends data directly and not through DMA mapping */
5704 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5705 U64_HI(*p_rdata), U64_LO(*p_rdata),
5706 NONE_CONNECTION_TYPE);
5707}
5708
5621static inline int bnx2x_func_send_stop(struct bnx2x *bp, 5709static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5622 struct bnx2x_func_state_params *params) 5710 struct bnx2x_func_state_params *params)
5623{ 5711{
@@ -5669,6 +5757,10 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
5669 return bnx2x_func_send_stop(bp, params); 5757 return bnx2x_func_send_stop(bp, params);
5670 case BNX2X_F_CMD_HW_RESET: 5758 case BNX2X_F_CMD_HW_RESET:
5671 return bnx2x_func_hw_reset(bp, params); 5759 return bnx2x_func_hw_reset(bp, params);
5760 case BNX2X_F_CMD_AFEX_UPDATE:
5761 return bnx2x_func_send_afex_update(bp, params);
5762 case BNX2X_F_CMD_AFEX_VIFLISTS:
5763 return bnx2x_func_send_afex_viflists(bp, params);
5672 case BNX2X_F_CMD_TX_STOP: 5764 case BNX2X_F_CMD_TX_STOP:
5673 return bnx2x_func_send_tx_stop(bp, params); 5765 return bnx2x_func_send_tx_stop(bp, params);
5674 case BNX2X_F_CMD_TX_START: 5766 case BNX2X_F_CMD_TX_START:
@@ -5682,6 +5774,7 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
5682void bnx2x_init_func_obj(struct bnx2x *bp, 5774void bnx2x_init_func_obj(struct bnx2x *bp,
5683 struct bnx2x_func_sp_obj *obj, 5775 struct bnx2x_func_sp_obj *obj,
5684 void *rdata, dma_addr_t rdata_mapping, 5776 void *rdata, dma_addr_t rdata_mapping,
5777 void *afex_rdata, dma_addr_t afex_rdata_mapping,
5685 struct bnx2x_func_sp_drv_ops *drv_iface) 5778 struct bnx2x_func_sp_drv_ops *drv_iface)
5686{ 5779{
5687 memset(obj, 0, sizeof(*obj)); 5780 memset(obj, 0, sizeof(*obj));
@@ -5690,7 +5783,8 @@ void bnx2x_init_func_obj(struct bnx2x *bp,
5690 5783
5691 obj->rdata = rdata; 5784 obj->rdata = rdata;
5692 obj->rdata_mapping = rdata_mapping; 5785 obj->rdata_mapping = rdata_mapping;
5693 5786 obj->afex_rdata = afex_rdata;
5787 obj->afex_rdata_mapping = afex_rdata_mapping;
5694 obj->send_cmd = bnx2x_func_send_cmd; 5788 obj->send_cmd = bnx2x_func_send_cmd;
5695 obj->check_transition = bnx2x_func_chk_transition; 5789 obj->check_transition = bnx2x_func_chk_transition;
5696 obj->complete_cmd = bnx2x_func_comp_cmd; 5790 obj->complete_cmd = bnx2x_func_comp_cmd;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 61a7670adfcd..efd80bdd0dfe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -62,6 +62,8 @@ enum {
62 BNX2X_FILTER_MCAST_PENDING, 62 BNX2X_FILTER_MCAST_PENDING,
63 BNX2X_FILTER_MCAST_SCHED, 63 BNX2X_FILTER_MCAST_SCHED,
64 BNX2X_FILTER_RSS_CONF_PENDING, 64 BNX2X_FILTER_RSS_CONF_PENDING,
65 BNX2X_AFEX_FCOE_Q_UPDATE_PENDING,
66 BNX2X_AFEX_PENDING_VIFSET_MCP_ACK
65}; 67};
66 68
67struct bnx2x_raw_obj { 69struct bnx2x_raw_obj {
@@ -432,6 +434,8 @@ enum {
432 BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 434 BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
433}; 435};
434 436
437void bnx2x_set_mac_in_nig(struct bnx2x *bp,
438 bool add, unsigned char *dev_addr, int index);
435 439
436/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ 440/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
437 441
@@ -685,9 +689,6 @@ enum {
685 /* RSS_MODE bits are mutually exclusive */ 689 /* RSS_MODE bits are mutually exclusive */
686 BNX2X_RSS_MODE_DISABLED, 690 BNX2X_RSS_MODE_DISABLED,
687 BNX2X_RSS_MODE_REGULAR, 691 BNX2X_RSS_MODE_REGULAR,
688 BNX2X_RSS_MODE_VLAN_PRI,
689 BNX2X_RSS_MODE_E1HOV_PRI,
690 BNX2X_RSS_MODE_IP_DSCP,
691 692
692 BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */ 693 BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
693 694
@@ -801,7 +802,8 @@ enum {
801 BNX2X_Q_FLG_TX_SWITCH, 802 BNX2X_Q_FLG_TX_SWITCH,
802 BNX2X_Q_FLG_TX_SEC, 803 BNX2X_Q_FLG_TX_SEC,
803 BNX2X_Q_FLG_ANTI_SPOOF, 804 BNX2X_Q_FLG_ANTI_SPOOF,
804 BNX2X_Q_FLG_SILENT_VLAN_REM 805 BNX2X_Q_FLG_SILENT_VLAN_REM,
806 BNX2X_Q_FLG_FORCE_DEFAULT_PRI
805}; 807};
806 808
807/* Queue type options: queue type may be a compination of below. */ 809/* Queue type options: queue type may be a compination of below. */
@@ -963,6 +965,11 @@ struct bnx2x_queue_state_params {
963 } params; 965 } params;
964}; 966};
965 967
968struct bnx2x_viflist_params {
969 u8 echo_res;
970 u8 func_bit_map_res;
971};
972
966struct bnx2x_queue_sp_obj { 973struct bnx2x_queue_sp_obj {
967 u32 cids[BNX2X_MULTI_TX_COS]; 974 u32 cids[BNX2X_MULTI_TX_COS];
968 u8 cl_id; 975 u8 cl_id;
@@ -1045,6 +1052,8 @@ enum bnx2x_func_cmd {
1045 BNX2X_F_CMD_START, 1052 BNX2X_F_CMD_START,
1046 BNX2X_F_CMD_STOP, 1053 BNX2X_F_CMD_STOP,
1047 BNX2X_F_CMD_HW_RESET, 1054 BNX2X_F_CMD_HW_RESET,
1055 BNX2X_F_CMD_AFEX_UPDATE,
1056 BNX2X_F_CMD_AFEX_VIFLISTS,
1048 BNX2X_F_CMD_TX_STOP, 1057 BNX2X_F_CMD_TX_STOP,
1049 BNX2X_F_CMD_TX_START, 1058 BNX2X_F_CMD_TX_START,
1050 BNX2X_F_CMD_MAX, 1059 BNX2X_F_CMD_MAX,
@@ -1089,6 +1098,18 @@ struct bnx2x_func_start_params {
1089 u8 network_cos_mode; 1098 u8 network_cos_mode;
1090}; 1099};
1091 1100
1101struct bnx2x_func_afex_update_params {
1102 u16 vif_id;
1103 u16 afex_default_vlan;
1104 u8 allowed_priorities;
1105};
1106
1107struct bnx2x_func_afex_viflists_params {
1108 u16 vif_list_index;
1109 u8 func_bit_map;
1110 u8 afex_vif_list_command;
1111 u8 func_to_clear;
1112};
1092struct bnx2x_func_tx_start_params { 1113struct bnx2x_func_tx_start_params {
1093 struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; 1114 struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
1094 u8 dcb_enabled; 1115 u8 dcb_enabled;
@@ -1110,6 +1131,8 @@ struct bnx2x_func_state_params {
1110 struct bnx2x_func_hw_init_params hw_init; 1131 struct bnx2x_func_hw_init_params hw_init;
1111 struct bnx2x_func_hw_reset_params hw_reset; 1132 struct bnx2x_func_hw_reset_params hw_reset;
1112 struct bnx2x_func_start_params start; 1133 struct bnx2x_func_start_params start;
1134 struct bnx2x_func_afex_update_params afex_update;
1135 struct bnx2x_func_afex_viflists_params afex_viflists;
1113 struct bnx2x_func_tx_start_params tx_start; 1136 struct bnx2x_func_tx_start_params tx_start;
1114 } params; 1137 } params;
1115}; 1138};
@@ -1154,6 +1177,13 @@ struct bnx2x_func_sp_obj {
1154 void *rdata; 1177 void *rdata;
1155 dma_addr_t rdata_mapping; 1178 dma_addr_t rdata_mapping;
1156 1179
1180 /* Buffer to use as a afex ramrod data and its mapping.
1181 * This can't be same rdata as above because afex ramrod requests
1182 * can arrive to the object in parallel to other ramrod requests.
1183 */
1184 void *afex_rdata;
1185 dma_addr_t afex_rdata_mapping;
1186
1157 /* this mutex validates that when pending flag is taken, the next 1187 /* this mutex validates that when pending flag is taken, the next
1158 * ramrod to be sent will be the one set the pending bit 1188 * ramrod to be sent will be the one set the pending bit
1159 */ 1189 */
@@ -1197,6 +1227,7 @@ union bnx2x_qable_obj {
1197void bnx2x_init_func_obj(struct bnx2x *bp, 1227void bnx2x_init_func_obj(struct bnx2x *bp,
1198 struct bnx2x_func_sp_obj *obj, 1228 struct bnx2x_func_sp_obj *obj,
1199 void *rdata, dma_addr_t rdata_mapping, 1229 void *rdata, dma_addr_t rdata_mapping,
1230 void *afex_rdata, dma_addr_t afex_rdata_mapping,
1200 struct bnx2x_func_sp_drv_ops *drv_iface); 1231 struct bnx2x_func_sp_drv_ops *drv_iface);
1201 1232
1202int bnx2x_func_state_change(struct bnx2x *bp, 1233int bnx2x_func_state_change(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index e1c9310fb07c..7366e92c3fa7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1561,3 +1561,274 @@ void bnx2x_save_statistics(struct bnx2x *bp)
1561 UPDATE_FW_STAT_OLD(mac_discard); 1561 UPDATE_FW_STAT_OLD(mac_discard);
1562 } 1562 }
1563} 1563}
1564
1565void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1566 u32 stats_type)
1567{
1568 int i;
1569 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1570 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1571 struct per_queue_stats *fcoe_q_stats =
1572 &bp->fw_stats_data->queue_stats[FCOE_IDX];
1573
1574 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1575 &fcoe_q_stats->tstorm_queue_statistics;
1576
1577 struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
1578 &fcoe_q_stats->ustorm_queue_statistics;
1579
1580 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
1581 &fcoe_q_stats->xstorm_queue_statistics;
1582
1583 struct fcoe_statistics_params *fw_fcoe_stat =
1584 &bp->fw_stats_data->fcoe;
1585
1586 memset(afex_stats, 0, sizeof(struct afex_stats));
1587
1588 for_each_eth_queue(bp, i) {
1589 struct bnx2x_fastpath *fp = &bp->fp[i];
1590 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1591
1592 ADD_64(afex_stats->rx_unicast_bytes_hi,
1593 qstats->total_unicast_bytes_received_hi,
1594 afex_stats->rx_unicast_bytes_lo,
1595 qstats->total_unicast_bytes_received_lo);
1596
1597 ADD_64(afex_stats->rx_broadcast_bytes_hi,
1598 qstats->total_broadcast_bytes_received_hi,
1599 afex_stats->rx_broadcast_bytes_lo,
1600 qstats->total_broadcast_bytes_received_lo);
1601
1602 ADD_64(afex_stats->rx_multicast_bytes_hi,
1603 qstats->total_multicast_bytes_received_hi,
1604 afex_stats->rx_multicast_bytes_lo,
1605 qstats->total_multicast_bytes_received_lo);
1606
1607 ADD_64(afex_stats->rx_unicast_frames_hi,
1608 qstats->total_unicast_packets_received_hi,
1609 afex_stats->rx_unicast_frames_lo,
1610 qstats->total_unicast_packets_received_lo);
1611
1612 ADD_64(afex_stats->rx_broadcast_frames_hi,
1613 qstats->total_broadcast_packets_received_hi,
1614 afex_stats->rx_broadcast_frames_lo,
1615 qstats->total_broadcast_packets_received_lo);
1616
1617 ADD_64(afex_stats->rx_multicast_frames_hi,
1618 qstats->total_multicast_packets_received_hi,
1619 afex_stats->rx_multicast_frames_lo,
1620 qstats->total_multicast_packets_received_lo);
1621
1622 /* sum to rx_frames_discarded all discraded
1623 * packets due to size, ttl0 and checksum
1624 */
1625 ADD_64(afex_stats->rx_frames_discarded_hi,
1626 qstats->total_packets_received_checksum_discarded_hi,
1627 afex_stats->rx_frames_discarded_lo,
1628 qstats->total_packets_received_checksum_discarded_lo);
1629
1630 ADD_64(afex_stats->rx_frames_discarded_hi,
1631 qstats->total_packets_received_ttl0_discarded_hi,
1632 afex_stats->rx_frames_discarded_lo,
1633 qstats->total_packets_received_ttl0_discarded_lo);
1634
1635 ADD_64(afex_stats->rx_frames_discarded_hi,
1636 qstats->etherstatsoverrsizepkts_hi,
1637 afex_stats->rx_frames_discarded_lo,
1638 qstats->etherstatsoverrsizepkts_lo);
1639
1640 ADD_64(afex_stats->rx_frames_dropped_hi,
1641 qstats->no_buff_discard_hi,
1642 afex_stats->rx_frames_dropped_lo,
1643 qstats->no_buff_discard_lo);
1644
1645 ADD_64(afex_stats->tx_unicast_bytes_hi,
1646 qstats->total_unicast_bytes_transmitted_hi,
1647 afex_stats->tx_unicast_bytes_lo,
1648 qstats->total_unicast_bytes_transmitted_lo);
1649
1650 ADD_64(afex_stats->tx_broadcast_bytes_hi,
1651 qstats->total_broadcast_bytes_transmitted_hi,
1652 afex_stats->tx_broadcast_bytes_lo,
1653 qstats->total_broadcast_bytes_transmitted_lo);
1654
1655 ADD_64(afex_stats->tx_multicast_bytes_hi,
1656 qstats->total_multicast_bytes_transmitted_hi,
1657 afex_stats->tx_multicast_bytes_lo,
1658 qstats->total_multicast_bytes_transmitted_lo);
1659
1660 ADD_64(afex_stats->tx_unicast_frames_hi,
1661 qstats->total_unicast_packets_transmitted_hi,
1662 afex_stats->tx_unicast_frames_lo,
1663 qstats->total_unicast_packets_transmitted_lo);
1664
1665 ADD_64(afex_stats->tx_broadcast_frames_hi,
1666 qstats->total_broadcast_packets_transmitted_hi,
1667 afex_stats->tx_broadcast_frames_lo,
1668 qstats->total_broadcast_packets_transmitted_lo);
1669
1670 ADD_64(afex_stats->tx_multicast_frames_hi,
1671 qstats->total_multicast_packets_transmitted_hi,
1672 afex_stats->tx_multicast_frames_lo,
1673 qstats->total_multicast_packets_transmitted_lo);
1674
1675 ADD_64(afex_stats->tx_frames_dropped_hi,
1676 qstats->total_transmitted_dropped_packets_error_hi,
1677 afex_stats->tx_frames_dropped_lo,
1678 qstats->total_transmitted_dropped_packets_error_lo);
1679 }
1680
1681 /* now add FCoE statistics which are collected separately
1682 * (both offloaded and non offloaded)
1683 */
1684 if (!NO_FCOE(bp)) {
1685 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1686 LE32_0,
1687 afex_stats->rx_unicast_bytes_lo,
1688 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1689
1690 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1691 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
1692 afex_stats->rx_unicast_bytes_lo,
1693 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1694
1695 ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
1696 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
1697 afex_stats->rx_broadcast_bytes_lo,
1698 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1699
1700 ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
1701 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
1702 afex_stats->rx_multicast_bytes_lo,
1703 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1704
1705 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1706 LE32_0,
1707 afex_stats->rx_unicast_frames_lo,
1708 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1709
1710 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1711 LE32_0,
1712 afex_stats->rx_unicast_frames_lo,
1713 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1714
1715 ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
1716 LE32_0,
1717 afex_stats->rx_broadcast_frames_lo,
1718 fcoe_q_tstorm_stats->rcv_bcast_pkts);
1719
1720 ADD_64_LE(afex_stats->rx_multicast_frames_hi,
1721 LE32_0,
1722 afex_stats->rx_multicast_frames_lo,
1723 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1724
1725 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1726 LE32_0,
1727 afex_stats->rx_frames_discarded_lo,
1728 fcoe_q_tstorm_stats->checksum_discard);
1729
1730 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1731 LE32_0,
1732 afex_stats->rx_frames_discarded_lo,
1733 fcoe_q_tstorm_stats->pkts_too_big_discard);
1734
1735 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1736 LE32_0,
1737 afex_stats->rx_frames_discarded_lo,
1738 fcoe_q_tstorm_stats->ttl0_discard);
1739
1740 ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
1741 LE16_0,
1742 afex_stats->rx_frames_dropped_lo,
1743 fcoe_q_tstorm_stats->no_buff_discard);
1744
1745 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1746 LE32_0,
1747 afex_stats->rx_frames_dropped_lo,
1748 fcoe_q_ustorm_stats->ucast_no_buff_pkts);
1749
1750 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1751 LE32_0,
1752 afex_stats->rx_frames_dropped_lo,
1753 fcoe_q_ustorm_stats->mcast_no_buff_pkts);
1754
1755 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1756 LE32_0,
1757 afex_stats->rx_frames_dropped_lo,
1758 fcoe_q_ustorm_stats->bcast_no_buff_pkts);
1759
1760 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1761 LE32_0,
1762 afex_stats->rx_frames_dropped_lo,
1763 fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
1764
1765 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1766 LE32_0,
1767 afex_stats->rx_frames_dropped_lo,
1768 fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
1769
1770 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1771 LE32_0,
1772 afex_stats->tx_unicast_bytes_lo,
1773 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1774
1775 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1776 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
1777 afex_stats->tx_unicast_bytes_lo,
1778 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1779
1780 ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
1781 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
1782 afex_stats->tx_broadcast_bytes_lo,
1783 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1784
1785 ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
1786 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
1787 afex_stats->tx_multicast_bytes_lo,
1788 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1789
1790 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1791 LE32_0,
1792 afex_stats->tx_unicast_frames_lo,
1793 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
1794
1795 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1796 LE32_0,
1797 afex_stats->tx_unicast_frames_lo,
1798 fcoe_q_xstorm_stats->ucast_pkts_sent);
1799
1800 ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
1801 LE32_0,
1802 afex_stats->tx_broadcast_frames_lo,
1803 fcoe_q_xstorm_stats->bcast_pkts_sent);
1804
1805 ADD_64_LE(afex_stats->tx_multicast_frames_hi,
1806 LE32_0,
1807 afex_stats->tx_multicast_frames_lo,
1808 fcoe_q_xstorm_stats->mcast_pkts_sent);
1809
1810 ADD_64_LE(afex_stats->tx_frames_dropped_hi,
1811 LE32_0,
1812 afex_stats->tx_frames_dropped_lo,
1813 fcoe_q_xstorm_stats->error_drop_pkts);
1814 }
1815
1816 /* if port stats are requested, add them to the PMF
1817 * stats, as anyway they will be accumulated by the
1818 * MCP before sent to the switch
1819 */
1820 if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
1821 ADD_64(afex_stats->rx_frames_dropped_hi,
1822 0,
1823 afex_stats->rx_frames_dropped_lo,
1824 estats->mac_filter_discard);
1825 ADD_64(afex_stats->rx_frames_dropped_hi,
1826 0,
1827 afex_stats->rx_frames_dropped_lo,
1828 estats->brb_truncate_discard);
1829 ADD_64(afex_stats->rx_frames_discarded_hi,
1830 0,
1831 afex_stats->rx_frames_discarded_lo,
1832 estats->mac_discard);
1833 }
1834}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 2b46e1eb7fd1..93e689fdfeda 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -338,6 +338,18 @@ struct bnx2x_fw_port_stats_old {
338 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ 338 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
339 } while (0) 339 } while (0)
340 340
341#define LE32_0 ((__force __le32) 0)
342#define LE16_0 ((__force __le16) 0)
343
344/* The _force is for cases where high value is 0 */
345#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \
346 ADD_64(s_hi, le32_to_cpu(a_hi_le), \
347 s_lo, le32_to_cpu(a_lo_le))
348
349#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \
350 ADD_64(s_hi, le16_to_cpu(a_hi_le), \
351 s_lo, le16_to_cpu(a_lo_le))
352
341/* difference = minuend - subtrahend */ 353/* difference = minuend - subtrahend */
342#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ 354#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
343 do { \ 355 do { \
@@ -529,4 +541,7 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
529 * @bp: driver handle 541 * @bp: driver handle
530 */ 542 */
531void bnx2x_save_statistics(struct bnx2x *bp); 543void bnx2x_save_statistics(struct bnx2x *bp);
544
545void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
546 u32 stats_type);
532#endif /* BNX2X_STATS_H */ 547#endif /* BNX2X_STATS_H */
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 062ac333fde6..0c3e7c70ffbc 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12234,6 +12234,7 @@ static const struct ethtool_ops tg3_ethtool_ops = {
12234 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 12234 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12235 .get_rxfh_indir = tg3_get_rxfh_indir, 12235 .get_rxfh_indir = tg3_get_rxfh_indir,
12236 .set_rxfh_indir = tg3_set_rxfh_indir, 12236 .set_rxfh_indir = tg3_set_rxfh_indir,
12237 .get_ts_info = ethtool_op_get_ts_info,
12237}; 12238};
12238 12239
12239static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, 12240static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 77977d735dd7..0b640fafbda3 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -70,7 +70,6 @@ static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
70static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc); 70static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
71static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc); 71static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
72static void bfa_ioc_recover(struct bfa_ioc *ioc); 72static void bfa_ioc_recover(struct bfa_ioc *ioc);
73static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
74static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event); 73static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
75static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); 74static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
76static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); 75static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
@@ -346,8 +345,6 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
346 switch (event) { 345 switch (event) {
347 case IOC_E_FWRSP_GETATTR: 346 case IOC_E_FWRSP_GETATTR:
348 del_timer(&ioc->ioc_timer); 347 del_timer(&ioc->ioc_timer);
349 bfa_ioc_check_attr_wwns(ioc);
350 bfa_ioc_hb_monitor(ioc);
351 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 348 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
352 break; 349 break;
353 350
@@ -380,6 +377,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
380{ 377{
381 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 378 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
382 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); 379 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
380 bfa_ioc_hb_monitor(ioc);
383} 381}
384 382
385static void 383static void
@@ -1207,27 +1205,62 @@ bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1207 writel(1, sem_reg); 1205 writel(1, sem_reg);
1208} 1206}
1209 1207
1208/* Clear fwver hdr */
1209static void
1210bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
1211{
1212 u32 pgnum, pgoff, loff = 0;
1213 int i;
1214
1215 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1216 pgoff = PSS_SMEM_PGOFF(loff);
1217 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1218
1219 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
1220 writel(0, ioc->ioc_regs.smem_page_start + loff);
1221 loff += sizeof(u32);
1222 }
1223}
1224
1225
1210static void 1226static void
1211bfa_ioc_hw_sem_init(struct bfa_ioc *ioc) 1227bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1212{ 1228{
1213 struct bfi_ioc_image_hdr fwhdr; 1229 struct bfi_ioc_image_hdr fwhdr;
1214 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate); 1230 u32 fwstate, r32;
1215 1231
1216 if (fwstate == BFI_IOC_UNINIT) 1232 /* Spin on init semaphore to serialize. */
1233 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1234 while (r32 & 0x1) {
1235 udelay(20);
1236 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1237 }
1238
1239 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1240 if (fwstate == BFI_IOC_UNINIT) {
1241 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1217 return; 1242 return;
1243 }
1218 1244
1219 bfa_nw_ioc_fwver_get(ioc, &fwhdr); 1245 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1220 1246
1221 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) 1247 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
1248 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1222 return; 1249 return;
1250 }
1223 1251
1252 bfa_ioc_fwver_clear(ioc);
1224 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); 1253 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
1254 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
1225 1255
1226 /* 1256 /*
1227 * Try to lock and then unlock the semaphore. 1257 * Try to lock and then unlock the semaphore.
1228 */ 1258 */
1229 readl(ioc->ioc_regs.ioc_sem_reg); 1259 readl(ioc->ioc_regs.ioc_sem_reg);
1230 writel(1, ioc->ioc_regs.ioc_sem_reg); 1260 writel(1, ioc->ioc_regs.ioc_sem_reg);
1261
1262 /* Unlock init semaphore */
1263 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1231} 1264}
1232 1265
1233static void 1266static void
@@ -1585,11 +1618,6 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1585 u32 i; 1618 u32 i;
1586 u32 asicmode; 1619 u32 asicmode;
1587 1620
1588 /**
1589 * Initialize LMEM first before code download
1590 */
1591 bfa_ioc_lmem_init(ioc);
1592
1593 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno); 1621 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1594 1622
1595 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1623 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
@@ -1914,6 +1942,10 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
1914 bfa_ioc_pll_init_asic(ioc); 1942 bfa_ioc_pll_init_asic(ioc);
1915 1943
1916 ioc->pllinit = true; 1944 ioc->pllinit = true;
1945
1946 /* Initialize LMEM */
1947 bfa_ioc_lmem_init(ioc);
1948
1917 /* 1949 /*
1918 * release semaphore. 1950 * release semaphore.
1919 */ 1951 */
@@ -2513,13 +2545,6 @@ bfa_ioc_recover(struct bfa_ioc *ioc)
2513 bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 2545 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2514} 2546}
2515 2547
2516static void
2517bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
2518{
2519 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2520 return;
2521}
2522
2523/** 2548/**
2524 * @dg hal_iocpf_pvt BFA IOC PF private functions 2549 * @dg hal_iocpf_pvt BFA IOC PF private functions
2525 * @{ 2550 * @{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index 348479bbfa3a..b6b036a143ae 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -199,9 +199,9 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
199 * Host to LPU mailbox message addresses 199 * Host to LPU mailbox message addresses
200 */ 200 */
201static const struct { 201static const struct {
202 u32 hfn_mbox; 202 u32 hfn_mbox;
203 u32 lpu_mbox; 203 u32 lpu_mbox;
204 u32 hfn_pgn; 204 u32 hfn_pgn;
205} ct_fnreg[] = { 205} ct_fnreg[] = {
206 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, 206 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
207 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, 207 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
@@ -803,17 +803,72 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb)
803} 803}
804 804
805#define CT2_NFC_MAX_DELAY 1000 805#define CT2_NFC_MAX_DELAY 1000
806#define CT2_NFC_VER_VALID 0x143
807#define BFA_IOC_PLL_POLL 1000000
808
809static bool
810bfa_ioc_ct2_nfc_halted(void __iomem *rb)
811{
812 volatile u32 r32;
813
814 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
815 if (r32 & __NFC_CONTROLLER_HALTED)
816 return true;
817
818 return false;
819}
820
821static void
822bfa_ioc_ct2_nfc_resume(void __iomem *rb)
823{
824 volatile u32 r32;
825 int i;
826
827 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
828 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
829 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
830 if (!(r32 & __NFC_CONTROLLER_HALTED))
831 return;
832 udelay(1000);
833 }
834 BUG_ON(1);
835}
836
806static enum bfa_status 837static enum bfa_status
807bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) 838bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
808{ 839{
809 volatile u32 wgn, r32; 840 volatile u32 wgn, r32;
810 int i; 841 u32 nfc_ver, i;
811 842
812 /*
813 * Initialize PLL if not already done by NFC
814 */
815 wgn = readl(rb + CT2_WGN_STATUS); 843 wgn = readl(rb + CT2_WGN_STATUS);
816 if (!(wgn & __GLBL_PF_VF_CFG_RDY)) { 844
845 nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
846
847 if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
848 (nfc_ver >= CT2_NFC_VER_VALID)) {
849 if (bfa_ioc_ct2_nfc_halted(rb))
850 bfa_ioc_ct2_nfc_resume(rb);
851 writel(__RESET_AND_START_SCLK_LCLK_PLLS,
852 rb + CT2_CSI_FW_CTL_SET_REG);
853
854 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
855 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
856 if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
857 break;
858 }
859 BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
860
861 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
862 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
863 if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
864 break;
865 }
866 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
867 udelay(1000);
868
869 r32 = readl(rb + CT2_CSI_FW_CTL_REG);
870 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
871 } else {
817 writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG)); 872 writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
818 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { 873 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
819 r32 = readl(rb + CT2_NFC_CSR_SET_REG); 874 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
@@ -821,53 +876,48 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
821 break; 876 break;
822 udelay(1000); 877 udelay(1000);
823 } 878 }
879
880 bfa_ioc_ct2_mac_reset(rb);
881 bfa_ioc_ct2_sclk_init(rb);
882 bfa_ioc_ct2_lclk_init(rb);
883
884 /* release soft reset on s_clk & l_clk */
885 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
886 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
887 rb + CT2_APP_PLL_SCLK_CTL_REG);
888 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
889 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
890 rb + CT2_APP_PLL_LCLK_CTL_REG);
891 }
892
893 /* Announce flash device presence, if flash was corrupted. */
894 if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
895 r32 = readl((rb + PSS_GPIO_OUT_REG));
896 writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
897 r32 = readl((rb + PSS_GPIO_OE_REG));
898 writel(r32 | 1, rb + PSS_GPIO_OE_REG);
824 } 899 }
825 900
826 /* 901 /*
827 * Mask the interrupts and clear any 902 * Mask the interrupts and clear any
828 * pending interrupts left by BIOS/EFI 903 * pending interrupts left by BIOS/EFI
829 */ 904 */
830
831 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); 905 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
832 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); 906 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
833 907
834 r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); 908 /* For first time initialization, no need to clear interrupts */
835 if (r32 == 1) { 909 r32 = readl(rb + HOST_SEM5_REG);
836 writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); 910 if (r32 & 0x1) {
837 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); 911 r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
838 } 912 if (r32 == 1) {
839 r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); 913 writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
840 if (r32 == 1) { 914 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
841 writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); 915 }
842 readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); 916 r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
843 } 917 if (r32 == 1) {
844 918 writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
845 bfa_ioc_ct2_mac_reset(rb); 919 readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
846 bfa_ioc_ct2_sclk_init(rb); 920 }
847 bfa_ioc_ct2_lclk_init(rb);
848
849 /*
850 * release soft reset on s_clk & l_clk
851 */
852 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
853 writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
854 (rb + CT2_APP_PLL_SCLK_CTL_REG));
855
856 /*
857 * release soft reset on s_clk & l_clk
858 */
859 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
860 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
861 (rb + CT2_APP_PLL_LCLK_CTL_REG));
862
863 /*
864 * Announce flash device presence, if flash was corrupted.
865 */
866 if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
867 r32 = readl((rb + PSS_GPIO_OUT_REG));
868 writel((r32 & ~1), (rb + PSS_GPIO_OUT_REG));
869 r32 = readl((rb + PSS_GPIO_OE_REG));
870 writel((r32 | 1), (rb + PSS_GPIO_OE_REG));
871 } 921 }
872 922
873 bfa_ioc_ct2_mem_init(rb); 923 bfa_ioc_ct2_mem_init(rb);
diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h
index efacff3ab51d..0e094fe46dfd 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_reg.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h
@@ -339,10 +339,16 @@ enum {
339#define __A2T_AHB_LOAD 0x00000800 339#define __A2T_AHB_LOAD 0x00000800
340#define __WGN_READY 0x00000400 340#define __WGN_READY 0x00000400
341#define __GLBL_PF_VF_CFG_RDY 0x00000200 341#define __GLBL_PF_VF_CFG_RDY 0x00000200
342#define CT2_NFC_CSR_CLR_REG 0x00027420
342#define CT2_NFC_CSR_SET_REG 0x00027424 343#define CT2_NFC_CSR_SET_REG 0x00027424
343#define __HALT_NFC_CONTROLLER 0x00000002 344#define __HALT_NFC_CONTROLLER 0x00000002
344#define __NFC_CONTROLLER_HALTED 0x00001000 345#define __NFC_CONTROLLER_HALTED 0x00001000
345 346
347#define CT2_RSC_GPR15_REG 0x0002765c
348#define CT2_CSI_FW_CTL_REG 0x00027080
349#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000
350#define CT2_CSI_FW_CTL_SET_REG 0x00027088
351
346#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0 352#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0
347#define __CSI_MAC_RESET 0x00000010 353#define __CSI_MAC_RESET 0x00000010
348#define __CSI_MAC_AHB_RESET 0x00000008 354#define __CSI_MAC_AHB_RESET 0x00000008
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ff78f770dec9..25c4e7f2a099 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -80,8 +80,6 @@ do { \
80 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ 80 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
81} while (0) 81} while (0)
82 82
83#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
84
85static void 83static void
86bnad_add_to_list(struct bnad *bnad) 84bnad_add_to_list(struct bnad *bnad)
87{ 85{
@@ -103,7 +101,7 @@ bnad_remove_from_list(struct bnad *bnad)
103 * Reinitialize completions in CQ, once Rx is taken down 101 * Reinitialize completions in CQ, once Rx is taken down
104 */ 102 */
105static void 103static void
106bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb) 104bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
107{ 105{
108 struct bna_cq_entry *cmpl, *next_cmpl; 106 struct bna_cq_entry *cmpl, *next_cmpl;
109 unsigned int wi_range, wis = 0, ccb_prod = 0; 107 unsigned int wi_range, wis = 0, ccb_prod = 0;
@@ -141,7 +139,8 @@ bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
141 139
142 for (j = 0; j < frag; j++) { 140 for (j = 0; j < frag; j++) {
143 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr), 141 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
144 skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE); 142 skb_frag_size(&skb_shinfo(skb)->frags[j]),
143 DMA_TO_DEVICE);
145 dma_unmap_addr_set(&array[index], dma_addr, 0); 144 dma_unmap_addr_set(&array[index], dma_addr, 0);
146 BNA_QE_INDX_ADD(index, 1, depth); 145 BNA_QE_INDX_ADD(index, 1, depth);
147 } 146 }
@@ -155,7 +154,7 @@ bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
155 * so DMA unmap & freeing is fine. 154 * so DMA unmap & freeing is fine.
156 */ 155 */
157static void 156static void
158bnad_free_all_txbufs(struct bnad *bnad, 157bnad_txq_cleanup(struct bnad *bnad,
159 struct bna_tcb *tcb) 158 struct bna_tcb *tcb)
160{ 159{
161 u32 unmap_cons; 160 u32 unmap_cons;
@@ -183,13 +182,12 @@ bnad_free_all_txbufs(struct bnad *bnad,
183/* Data Path Handlers */ 182/* Data Path Handlers */
184 183
185/* 184/*
186 * bnad_free_txbufs : Frees the Tx bufs on Tx completion 185 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
187 * Can be called in a) Interrupt context 186 * Can be called in a) Interrupt context
188 * b) Sending context 187 * b) Sending context
189 * c) Tasklet context
190 */ 188 */
191static u32 189static u32
192bnad_free_txbufs(struct bnad *bnad, 190bnad_txcmpl_process(struct bnad *bnad,
193 struct bna_tcb *tcb) 191 struct bna_tcb *tcb)
194{ 192{
195 u32 unmap_cons, sent_packets = 0, sent_bytes = 0; 193 u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
@@ -198,13 +196,7 @@ bnad_free_txbufs(struct bnad *bnad,
198 struct bnad_skb_unmap *unmap_array; 196 struct bnad_skb_unmap *unmap_array;
199 struct sk_buff *skb; 197 struct sk_buff *skb;
200 198
201 /* 199 /* Just return if TX is stopped */
202 * Just return if TX is stopped. This check is useful
203 * when bnad_free_txbufs() runs out of a tasklet scheduled
204 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
205 * but this routine runs actually after the cleanup has been
206 * executed.
207 */
208 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) 200 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
209 return 0; 201 return 0;
210 202
@@ -243,57 +235,8 @@ bnad_free_txbufs(struct bnad *bnad,
243 return sent_packets; 235 return sent_packets;
244} 236}
245 237
246/* Tx Free Tasklet function */
247/* Frees for all the tcb's in all the Tx's */
248/*
249 * Scheduled from sending context, so that
250 * the fat Tx lock is not held for too long
251 * in the sending context.
252 */
253static void
254bnad_tx_free_tasklet(unsigned long bnad_ptr)
255{
256 struct bnad *bnad = (struct bnad *)bnad_ptr;
257 struct bna_tcb *tcb;
258 u32 acked = 0;
259 int i, j;
260
261 for (i = 0; i < bnad->num_tx; i++) {
262 for (j = 0; j < bnad->num_txq_per_tx; j++) {
263 tcb = bnad->tx_info[i].tcb[j];
264 if (!tcb)
265 continue;
266 if (((u16) (*tcb->hw_consumer_index) !=
267 tcb->consumer_index) &&
268 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
269 &tcb->flags))) {
270 acked = bnad_free_txbufs(bnad, tcb);
271 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
272 &tcb->flags)))
273 bna_ib_ack(tcb->i_dbell, acked);
274 smp_mb__before_clear_bit();
275 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
276 }
277 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
278 &tcb->flags)))
279 continue;
280 if (netif_queue_stopped(bnad->netdev)) {
281 if (acked && netif_carrier_ok(bnad->netdev) &&
282 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
283 BNAD_NETIF_WAKE_THRESHOLD) {
284 netif_wake_queue(bnad->netdev);
285 /* TODO */
286 /* Counters for individual TxQs? */
287 BNAD_UPDATE_CTR(bnad,
288 netif_queue_wakeup);
289 }
290 }
291 }
292 }
293}
294
295static u32 238static u32
296bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) 239bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
297{ 240{
298 struct net_device *netdev = bnad->netdev; 241 struct net_device *netdev = bnad->netdev;
299 u32 sent = 0; 242 u32 sent = 0;
@@ -301,7 +244,7 @@ bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
301 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) 244 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
302 return 0; 245 return 0;
303 246
304 sent = bnad_free_txbufs(bnad, tcb); 247 sent = bnad_txcmpl_process(bnad, tcb);
305 if (sent) { 248 if (sent) {
306 if (netif_queue_stopped(netdev) && 249 if (netif_queue_stopped(netdev) &&
307 netif_carrier_ok(netdev) && 250 netif_carrier_ok(netdev) &&
@@ -330,13 +273,13 @@ bnad_msix_tx(int irq, void *data)
330 struct bna_tcb *tcb = (struct bna_tcb *)data; 273 struct bna_tcb *tcb = (struct bna_tcb *)data;
331 struct bnad *bnad = tcb->bnad; 274 struct bnad *bnad = tcb->bnad;
332 275
333 bnad_tx(bnad, tcb); 276 bnad_tx_complete(bnad, tcb);
334 277
335 return IRQ_HANDLED; 278 return IRQ_HANDLED;
336} 279}
337 280
338static void 281static void
339bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb) 282bnad_rcb_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
340{ 283{
341 struct bnad_unmap_q *unmap_q = rcb->unmap_q; 284 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
342 285
@@ -348,7 +291,7 @@ bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
348} 291}
349 292
350static void 293static void
351bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) 294bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
352{ 295{
353 struct bnad_unmap_q *unmap_q; 296 struct bnad_unmap_q *unmap_q;
354 struct bnad_skb_unmap *unmap_array; 297 struct bnad_skb_unmap *unmap_array;
@@ -369,11 +312,11 @@ bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
369 DMA_FROM_DEVICE); 312 DMA_FROM_DEVICE);
370 dev_kfree_skb(skb); 313 dev_kfree_skb(skb);
371 } 314 }
372 bnad_reset_rcb(bnad, rcb); 315 bnad_rcb_cleanup(bnad, rcb);
373} 316}
374 317
375static void 318static void
376bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) 319bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
377{ 320{
378 u16 to_alloc, alloced, unmap_prod, wi_range; 321 u16 to_alloc, alloced, unmap_prod, wi_range;
379 struct bnad_unmap_q *unmap_q = rcb->unmap_q; 322 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
@@ -434,14 +377,14 @@ bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
434 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { 377 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
435 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) 378 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
436 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) 379 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
437 bnad_alloc_n_post_rxbufs(bnad, rcb); 380 bnad_rxq_post(bnad, rcb);
438 smp_mb__before_clear_bit(); 381 smp_mb__before_clear_bit();
439 clear_bit(BNAD_RXQ_REFILL, &rcb->flags); 382 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
440 } 383 }
441} 384}
442 385
443static u32 386static u32
444bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) 387bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
445{ 388{
446 struct bna_cq_entry *cmpl, *next_cmpl; 389 struct bna_cq_entry *cmpl, *next_cmpl;
447 struct bna_rcb *rcb = NULL; 390 struct bna_rcb *rcb = NULL;
@@ -453,12 +396,8 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
453 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; 396 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
454 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); 397 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
455 398
456 set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags); 399 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
457
458 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
459 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
460 return 0; 400 return 0;
461 }
462 401
463 prefetch(bnad->netdev); 402 prefetch(bnad->netdev);
464 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, 403 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
@@ -533,9 +472,8 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
533 472
534 if (skb->ip_summed == CHECKSUM_UNNECESSARY) 473 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
535 napi_gro_receive(&rx_ctrl->napi, skb); 474 napi_gro_receive(&rx_ctrl->napi, skb);
536 else { 475 else
537 netif_receive_skb(skb); 476 netif_receive_skb(skb);
538 }
539 477
540next: 478next:
541 cmpl->valid = 0; 479 cmpl->valid = 0;
@@ -646,7 +584,7 @@ bnad_isr(int irq, void *data)
646 for (j = 0; j < bnad->num_txq_per_tx; j++) { 584 for (j = 0; j < bnad->num_txq_per_tx; j++) {
647 tcb = bnad->tx_info[i].tcb[j]; 585 tcb = bnad->tx_info[i].tcb[j];
648 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) 586 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
649 bnad_tx(bnad, bnad->tx_info[i].tcb[j]); 587 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
650 } 588 }
651 } 589 }
652 /* Rx processing */ 590 /* Rx processing */
@@ -839,20 +777,9 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
839{ 777{
840 struct bnad_tx_info *tx_info = 778 struct bnad_tx_info *tx_info =
841 (struct bnad_tx_info *)tcb->txq->tx->priv; 779 (struct bnad_tx_info *)tcb->txq->tx->priv;
842 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
843
844 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
845 cpu_relax();
846
847 bnad_free_all_txbufs(bnad, tcb);
848
849 unmap_q->producer_index = 0;
850 unmap_q->consumer_index = 0;
851
852 smp_mb__before_clear_bit();
853 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
854 780
855 tx_info->tcb[tcb->id] = NULL; 781 tx_info->tcb[tcb->id] = NULL;
782 tcb->priv = NULL;
856} 783}
857 784
858static void 785static void
@@ -866,12 +793,6 @@ bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
866} 793}
867 794
868static void 795static void
869bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
870{
871 bnad_free_all_rxbufs(bnad, rcb);
872}
873
874static void
875bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) 796bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
876{ 797{
877 struct bnad_rx_info *rx_info = 798 struct bnad_rx_info *rx_info =
@@ -916,7 +837,6 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
916{ 837{
917 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; 838 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
918 struct bna_tcb *tcb; 839 struct bna_tcb *tcb;
919 struct bnad_unmap_q *unmap_q;
920 u32 txq_id; 840 u32 txq_id;
921 int i; 841 int i;
922 842
@@ -926,23 +846,9 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
926 continue; 846 continue;
927 txq_id = tcb->id; 847 txq_id = tcb->id;
928 848
929 unmap_q = tcb->unmap_q; 849 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
930
931 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
932 continue;
933
934 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
935 cpu_relax();
936
937 bnad_free_all_txbufs(bnad, tcb);
938
939 unmap_q->producer_index = 0;
940 unmap_q->consumer_index = 0;
941
942 smp_mb__before_clear_bit();
943 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
944
945 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); 850 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
851 BUG_ON(*(tcb->hw_consumer_index) != 0);
946 852
947 if (netif_carrier_ok(bnad->netdev)) { 853 if (netif_carrier_ok(bnad->netdev)) {
948 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n", 854 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
@@ -963,6 +869,54 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
963 } 869 }
964} 870}
965 871
872/*
873 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
874 */
875static void
876bnad_tx_cleanup(struct delayed_work *work)
877{
878 struct bnad_tx_info *tx_info =
879 container_of(work, struct bnad_tx_info, tx_cleanup_work);
880 struct bnad *bnad = NULL;
881 struct bnad_unmap_q *unmap_q;
882 struct bna_tcb *tcb;
883 unsigned long flags;
884 uint32_t i, pending = 0;
885
886 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
887 tcb = tx_info->tcb[i];
888 if (!tcb)
889 continue;
890
891 bnad = tcb->bnad;
892
893 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
894 pending++;
895 continue;
896 }
897
898 bnad_txq_cleanup(bnad, tcb);
899
900 unmap_q = tcb->unmap_q;
901 unmap_q->producer_index = 0;
902 unmap_q->consumer_index = 0;
903
904 smp_mb__before_clear_bit();
905 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
906 }
907
908 if (pending) {
909 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
910 msecs_to_jiffies(1));
911 return;
912 }
913
914 spin_lock_irqsave(&bnad->bna_lock, flags);
915 bna_tx_cleanup_complete(tx_info->tx);
916 spin_unlock_irqrestore(&bnad->bna_lock, flags);
917}
918
919
966static void 920static void
967bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) 921bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
968{ 922{
@@ -976,8 +930,7 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
976 continue; 930 continue;
977 } 931 }
978 932
979 mdelay(BNAD_TXRX_SYNC_MDELAY); 933 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
980 bna_tx_cleanup_complete(tx);
981} 934}
982 935
983static void 936static void
@@ -1001,6 +954,44 @@ bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1001 } 954 }
1002} 955}
1003 956
957/*
958 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
959 */
960static void
961bnad_rx_cleanup(void *work)
962{
963 struct bnad_rx_info *rx_info =
964 container_of(work, struct bnad_rx_info, rx_cleanup_work);
965 struct bnad_rx_ctrl *rx_ctrl;
966 struct bnad *bnad = NULL;
967 unsigned long flags;
968 uint32_t i;
969
970 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
971 rx_ctrl = &rx_info->rx_ctrl[i];
972
973 if (!rx_ctrl->ccb)
974 continue;
975
976 bnad = rx_ctrl->ccb->bnad;
977
978 /*
979 * Wait till the poll handler has exited
980 * and nothing can be scheduled anymore
981 */
982 napi_disable(&rx_ctrl->napi);
983
984 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
985 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
986 if (rx_ctrl->ccb->rcb[1])
987 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
988 }
989
990 spin_lock_irqsave(&bnad->bna_lock, flags);
991 bna_rx_cleanup_complete(rx_info->rx);
992 spin_unlock_irqrestore(&bnad->bna_lock, flags);
993}
994
1004static void 995static void
1005bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) 996bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1006{ 997{
@@ -1009,8 +1000,6 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1009 struct bnad_rx_ctrl *rx_ctrl; 1000 struct bnad_rx_ctrl *rx_ctrl;
1010 int i; 1001 int i;
1011 1002
1012 mdelay(BNAD_TXRX_SYNC_MDELAY);
1013
1014 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { 1003 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1015 rx_ctrl = &rx_info->rx_ctrl[i]; 1004 rx_ctrl = &rx_info->rx_ctrl[i];
1016 ccb = rx_ctrl->ccb; 1005 ccb = rx_ctrl->ccb;
@@ -1021,12 +1010,9 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1021 1010
1022 if (ccb->rcb[1]) 1011 if (ccb->rcb[1])
1023 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); 1012 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1024
1025 while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
1026 cpu_relax();
1027 } 1013 }
1028 1014
1029 bna_rx_cleanup_complete(rx); 1015 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1030} 1016}
1031 1017
1032static void 1018static void
@@ -1046,13 +1032,12 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1046 if (!ccb) 1032 if (!ccb)
1047 continue; 1033 continue;
1048 1034
1049 bnad_cq_cmpl_init(bnad, ccb); 1035 napi_enable(&rx_ctrl->napi);
1050 1036
1051 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) { 1037 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1052 rcb = ccb->rcb[j]; 1038 rcb = ccb->rcb[j];
1053 if (!rcb) 1039 if (!rcb)
1054 continue; 1040 continue;
1055 bnad_free_all_rxbufs(bnad, rcb);
1056 1041
1057 set_bit(BNAD_RXQ_STARTED, &rcb->flags); 1042 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1058 set_bit(BNAD_RXQ_POST_OK, &rcb->flags); 1043 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
@@ -1063,7 +1048,7 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1063 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { 1048 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1064 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) 1049 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1065 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) 1050 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1066 bnad_alloc_n_post_rxbufs(bnad, rcb); 1051 bnad_rxq_post(bnad, rcb);
1067 smp_mb__before_clear_bit(); 1052 smp_mb__before_clear_bit();
1068 clear_bit(BNAD_RXQ_REFILL, &rcb->flags); 1053 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1069 } 1054 }
@@ -1687,7 +1672,7 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1687 if (!netif_carrier_ok(bnad->netdev)) 1672 if (!netif_carrier_ok(bnad->netdev))
1688 goto poll_exit; 1673 goto poll_exit;
1689 1674
1690 rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget); 1675 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1691 if (rcvd >= budget) 1676 if (rcvd >= budget)
1692 return rcvd; 1677 return rcvd;
1693 1678
@@ -1704,7 +1689,7 @@ poll_exit:
1704 1689
1705#define BNAD_NAPI_POLL_QUOTA 64 1690#define BNAD_NAPI_POLL_QUOTA 64
1706static void 1691static void
1707bnad_napi_init(struct bnad *bnad, u32 rx_id) 1692bnad_napi_add(struct bnad *bnad, u32 rx_id)
1708{ 1693{
1709 struct bnad_rx_ctrl *rx_ctrl; 1694 struct bnad_rx_ctrl *rx_ctrl;
1710 int i; 1695 int i;
@@ -1718,34 +1703,18 @@ bnad_napi_init(struct bnad *bnad, u32 rx_id)
1718} 1703}
1719 1704
1720static void 1705static void
1721bnad_napi_enable(struct bnad *bnad, u32 rx_id) 1706bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1722{
1723 struct bnad_rx_ctrl *rx_ctrl;
1724 int i;
1725
1726 /* Initialize & enable NAPI */
1727 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1728 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1729
1730 napi_enable(&rx_ctrl->napi);
1731 }
1732}
1733
1734static void
1735bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1736{ 1707{
1737 int i; 1708 int i;
1738 1709
1739 /* First disable and then clean up */ 1710 /* First disable and then clean up */
1740 for (i = 0; i < bnad->num_rxp_per_rx; i++) { 1711 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1741 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1742 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); 1712 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1743 }
1744} 1713}
1745 1714
1746/* Should be held with conf_lock held */ 1715/* Should be held with conf_lock held */
1747void 1716void
1748bnad_cleanup_tx(struct bnad *bnad, u32 tx_id) 1717bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1749{ 1718{
1750 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; 1719 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1751 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; 1720 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
@@ -1764,9 +1733,6 @@ bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
1764 bnad_tx_msix_unregister(bnad, tx_info, 1733 bnad_tx_msix_unregister(bnad, tx_info,
1765 bnad->num_txq_per_tx); 1734 bnad->num_txq_per_tx);
1766 1735
1767 if (0 == tx_id)
1768 tasklet_kill(&bnad->tx_free_tasklet);
1769
1770 spin_lock_irqsave(&bnad->bna_lock, flags); 1736 spin_lock_irqsave(&bnad->bna_lock, flags);
1771 bna_tx_destroy(tx_info->tx); 1737 bna_tx_destroy(tx_info->tx);
1772 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1738 spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -1832,6 +1798,9 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1832 goto err_return; 1798 goto err_return;
1833 tx_info->tx = tx; 1799 tx_info->tx = tx;
1834 1800
1801 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
1802 (work_func_t)bnad_tx_cleanup);
1803
1835 /* Register ISR for the Tx object */ 1804 /* Register ISR for the Tx object */
1836 if (intr_info->intr_type == BNA_INTR_T_MSIX) { 1805 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1837 err = bnad_tx_msix_register(bnad, tx_info, 1806 err = bnad_tx_msix_register(bnad, tx_info,
@@ -1896,7 +1865,7 @@ bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1896 1865
1897/* Called with mutex_lock(&bnad->conf_mutex) held */ 1866/* Called with mutex_lock(&bnad->conf_mutex) held */
1898void 1867void
1899bnad_cleanup_rx(struct bnad *bnad, u32 rx_id) 1868bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
1900{ 1869{
1901 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; 1870 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1902 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; 1871 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
@@ -1928,7 +1897,7 @@ bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
1928 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX) 1897 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1929 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); 1898 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1930 1899
1931 bnad_napi_disable(bnad, rx_id); 1900 bnad_napi_delete(bnad, rx_id);
1932 1901
1933 spin_lock_irqsave(&bnad->bna_lock, flags); 1902 spin_lock_irqsave(&bnad->bna_lock, flags);
1934 bna_rx_destroy(rx_info->rx); 1903 bna_rx_destroy(rx_info->rx);
@@ -1952,7 +1921,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1952 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; 1921 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1953 static const struct bna_rx_event_cbfn rx_cbfn = { 1922 static const struct bna_rx_event_cbfn rx_cbfn = {
1954 .rcb_setup_cbfn = bnad_cb_rcb_setup, 1923 .rcb_setup_cbfn = bnad_cb_rcb_setup,
1955 .rcb_destroy_cbfn = bnad_cb_rcb_destroy, 1924 .rcb_destroy_cbfn = NULL,
1956 .ccb_setup_cbfn = bnad_cb_ccb_setup, 1925 .ccb_setup_cbfn = bnad_cb_ccb_setup,
1957 .ccb_destroy_cbfn = bnad_cb_ccb_destroy, 1926 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
1958 .rx_stall_cbfn = bnad_cb_rx_stall, 1927 .rx_stall_cbfn = bnad_cb_rx_stall,
@@ -1998,11 +1967,14 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1998 rx_info->rx = rx; 1967 rx_info->rx = rx;
1999 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1968 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2000 1969
1970 INIT_WORK(&rx_info->rx_cleanup_work,
1971 (work_func_t)(bnad_rx_cleanup));
1972
2001 /* 1973 /*
2002 * Init NAPI, so that state is set to NAPI_STATE_SCHED, 1974 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2003 * so that IRQ handler cannot schedule NAPI at this point. 1975 * so that IRQ handler cannot schedule NAPI at this point.
2004 */ 1976 */
2005 bnad_napi_init(bnad, rx_id); 1977 bnad_napi_add(bnad, rx_id);
2006 1978
2007 /* Register ISR for the Rx object */ 1979 /* Register ISR for the Rx object */
2008 if (intr_info->intr_type == BNA_INTR_T_MSIX) { 1980 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
@@ -2028,13 +2000,10 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2028 bna_rx_enable(rx); 2000 bna_rx_enable(rx);
2029 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2001 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2030 2002
2031 /* Enable scheduling of NAPI */
2032 bnad_napi_enable(bnad, rx_id);
2033
2034 return 0; 2003 return 0;
2035 2004
2036err_return: 2005err_return:
2037 bnad_cleanup_rx(bnad, rx_id); 2006 bnad_destroy_rx(bnad, rx_id);
2038 return err; 2007 return err;
2039} 2008}
2040 2009
@@ -2519,7 +2488,7 @@ bnad_open(struct net_device *netdev)
2519 return 0; 2488 return 0;
2520 2489
2521cleanup_tx: 2490cleanup_tx:
2522 bnad_cleanup_tx(bnad, 0); 2491 bnad_destroy_tx(bnad, 0);
2523 2492
2524err_return: 2493err_return:
2525 mutex_unlock(&bnad->conf_mutex); 2494 mutex_unlock(&bnad->conf_mutex);
@@ -2546,8 +2515,8 @@ bnad_stop(struct net_device *netdev)
2546 2515
2547 wait_for_completion(&bnad->bnad_completions.enet_comp); 2516 wait_for_completion(&bnad->bnad_completions.enet_comp);
2548 2517
2549 bnad_cleanup_tx(bnad, 0); 2518 bnad_destroy_tx(bnad, 0);
2550 bnad_cleanup_rx(bnad, 0); 2519 bnad_destroy_rx(bnad, 0);
2551 2520
2552 /* Synchronize mailbox IRQ */ 2521 /* Synchronize mailbox IRQ */
2553 bnad_mbox_irq_sync(bnad); 2522 bnad_mbox_irq_sync(bnad);
@@ -2620,7 +2589,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2620 if ((u16) (*tcb->hw_consumer_index) != 2589 if ((u16) (*tcb->hw_consumer_index) !=
2621 tcb->consumer_index && 2590 tcb->consumer_index &&
2622 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { 2591 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2623 acked = bnad_free_txbufs(bnad, tcb); 2592 acked = bnad_txcmpl_process(bnad, tcb);
2624 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) 2593 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2625 bna_ib_ack(tcb->i_dbell, acked); 2594 bna_ib_ack(tcb->i_dbell, acked);
2626 smp_mb__before_clear_bit(); 2595 smp_mb__before_clear_bit();
@@ -2843,9 +2812,6 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2843 bna_txq_prod_indx_doorbell(tcb); 2812 bna_txq_prod_indx_doorbell(tcb);
2844 smp_mb(); 2813 smp_mb();
2845 2814
2846 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2847 tasklet_schedule(&bnad->tx_free_tasklet);
2848
2849 return NETDEV_TX_OK; 2815 return NETDEV_TX_OK;
2850} 2816}
2851 2817
@@ -3127,8 +3093,8 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
3127/* 3093/*
3128 * 1. Initialize the bnad structure 3094 * 1. Initialize the bnad structure
3129 * 2. Setup netdev pointer in pci_dev 3095 * 2. Setup netdev pointer in pci_dev
3130 * 3. Initialze Tx free tasklet 3096 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3131 * 4. Initialize no. of TxQ & CQs & MSIX vectors 3097 * 4. Initialize work queue.
3132 */ 3098 */
3133static int 3099static int
3134bnad_init(struct bnad *bnad, 3100bnad_init(struct bnad *bnad,
@@ -3171,8 +3137,11 @@ bnad_init(struct bnad *bnad,
3171 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO; 3137 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3172 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO; 3138 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3173 3139
3174 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet, 3140 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3175 (unsigned long)bnad); 3141 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3142
3143 if (!bnad->work_q)
3144 return -ENOMEM;
3176 3145
3177 return 0; 3146 return 0;
3178} 3147}
@@ -3185,6 +3154,12 @@ bnad_init(struct bnad *bnad,
3185static void 3154static void
3186bnad_uninit(struct bnad *bnad) 3155bnad_uninit(struct bnad *bnad)
3187{ 3156{
3157 if (bnad->work_q) {
3158 flush_workqueue(bnad->work_q);
3159 destroy_workqueue(bnad->work_q);
3160 bnad->work_q = NULL;
3161 }
3162
3188 if (bnad->bar0) 3163 if (bnad->bar0)
3189 iounmap(bnad->bar0); 3164 iounmap(bnad->bar0);
3190 pci_set_drvdata(bnad->pcidev, NULL); 3165 pci_set_drvdata(bnad->pcidev, NULL);
@@ -3304,7 +3279,6 @@ bnad_pci_probe(struct pci_dev *pdev,
3304 /* 3279 /*
3305 * Initialize bnad structure 3280 * Initialize bnad structure
3306 * Setup relation between pci_dev & netdev 3281 * Setup relation between pci_dev & netdev
3307 * Init Tx free tasklet
3308 */ 3282 */
3309 err = bnad_init(bnad, pdev, netdev); 3283 err = bnad_init(bnad, pdev, netdev);
3310 if (err) 3284 if (err)
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 55824d92699f..72742be11277 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -71,7 +71,7 @@ struct bnad_rx_ctrl {
71#define BNAD_NAME "bna" 71#define BNAD_NAME "bna"
72#define BNAD_NAME_LEN 64 72#define BNAD_NAME_LEN 64
73 73
74#define BNAD_VERSION "3.0.2.2" 74#define BNAD_VERSION "3.0.23.0"
75 75
76#define BNAD_MAILBOX_MSIX_INDEX 0 76#define BNAD_MAILBOX_MSIX_INDEX 0
77#define BNAD_MAILBOX_MSIX_VECTORS 1 77#define BNAD_MAILBOX_MSIX_VECTORS 1
@@ -210,6 +210,7 @@ struct bnad_tx_info {
210 struct bna_tx *tx; /* 1:1 between tx_info & tx */ 210 struct bna_tx *tx; /* 1:1 between tx_info & tx */
211 struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX]; 211 struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
212 u32 tx_id; 212 u32 tx_id;
213 struct delayed_work tx_cleanup_work;
213} ____cacheline_aligned; 214} ____cacheline_aligned;
214 215
215struct bnad_rx_info { 216struct bnad_rx_info {
@@ -217,6 +218,7 @@ struct bnad_rx_info {
217 218
218 struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX]; 219 struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX];
219 u32 rx_id; 220 u32 rx_id;
221 struct work_struct rx_cleanup_work;
220} ____cacheline_aligned; 222} ____cacheline_aligned;
221 223
222/* Unmap queues for Tx / Rx cleanup */ 224/* Unmap queues for Tx / Rx cleanup */
@@ -318,7 +320,7 @@ struct bnad {
318 /* Burnt in MAC address */ 320 /* Burnt in MAC address */
319 mac_t perm_addr; 321 mac_t perm_addr;
320 322
321 struct tasklet_struct tx_free_tasklet; 323 struct workqueue_struct *work_q;
322 324
323 /* Statistics */ 325 /* Statistics */
324 struct bnad_stats stats; 326 struct bnad_stats stats;
@@ -328,6 +330,7 @@ struct bnad {
328 char adapter_name[BNAD_NAME_LEN]; 330 char adapter_name[BNAD_NAME_LEN];
329 char port_name[BNAD_NAME_LEN]; 331 char port_name[BNAD_NAME_LEN];
330 char mbox_irq_name[BNAD_NAME_LEN]; 332 char mbox_irq_name[BNAD_NAME_LEN];
333 char wq_name[BNAD_NAME_LEN];
331 334
332 /* debugfs specific data */ 335 /* debugfs specific data */
333 char *regdata; 336 char *regdata;
@@ -370,8 +373,8 @@ extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
370 373
371extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id); 374extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
372extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id); 375extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
373extern void bnad_cleanup_tx(struct bnad *bnad, u32 tx_id); 376extern void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
374extern void bnad_cleanup_rx(struct bnad *bnad, u32 rx_id); 377extern void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
375 378
376/* Timer start/stop protos */ 379/* Timer start/stop protos */
377extern void bnad_dim_timer_start(struct bnad *bnad); 380extern void bnad_dim_timer_start(struct bnad *bnad);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index ab753d7334a6..40e1e84f4984 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -464,7 +464,7 @@ bnad_set_ringparam(struct net_device *netdev,
464 for (i = 0; i < bnad->num_rx; i++) { 464 for (i = 0; i < bnad->num_rx; i++) {
465 if (!bnad->rx_info[i].rx) 465 if (!bnad->rx_info[i].rx)
466 continue; 466 continue;
467 bnad_cleanup_rx(bnad, i); 467 bnad_destroy_rx(bnad, i);
468 current_err = bnad_setup_rx(bnad, i); 468 current_err = bnad_setup_rx(bnad, i);
469 if (current_err && !err) 469 if (current_err && !err)
470 err = current_err; 470 err = current_err;
@@ -492,7 +492,7 @@ bnad_set_ringparam(struct net_device *netdev,
492 for (i = 0; i < bnad->num_tx; i++) { 492 for (i = 0; i < bnad->num_tx; i++) {
493 if (!bnad->tx_info[i].tx) 493 if (!bnad->tx_info[i].tx)
494 continue; 494 continue;
495 bnad_cleanup_tx(bnad, i); 495 bnad_destroy_tx(bnad, i);
496 current_err = bnad_setup_tx(bnad, i); 496 current_err = bnad_setup_tx(bnad, i);
497 if (current_err && !err) 497 if (current_err && !err)
498 err = current_err; 498 err = current_err;
@@ -539,7 +539,7 @@ bnad_set_pauseparam(struct net_device *netdev,
539} 539}
540 540
541static void 541static void
542bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string) 542bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
543{ 543{
544 struct bnad *bnad = netdev_priv(netdev); 544 struct bnad *bnad = netdev_priv(netdev);
545 int i, j, q_num; 545 int i, j, q_num;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index c4834c23be35..1466bc4e3dda 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1213,6 +1213,7 @@ static const struct ethtool_ops macb_ethtool_ops = {
1213 .set_settings = macb_set_settings, 1213 .set_settings = macb_set_settings,
1214 .get_drvinfo = macb_get_drvinfo, 1214 .get_drvinfo = macb_get_drvinfo,
1215 .get_link = ethtool_op_get_link, 1215 .get_link = ethtool_op_get_link,
1216 .get_ts_info = ethtool_op_get_ts_info,
1216}; 1217};
1217 1218
1218static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1219static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 77b4e873f91c..d7ac6c17547c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1193,18 +1193,16 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
1193 if (err) 1193 if (err)
1194 return err; 1194 return err;
1195 1195
1196 NLA_PUT_U16(skb, IFLA_PORT_REQUEST, pp->request); 1196 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
1197 NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response); 1197 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
1198 if (pp->set & ENIC_SET_NAME) 1198 ((pp->set & ENIC_SET_NAME) &&
1199 NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, 1199 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
1200 pp->name); 1200 ((pp->set & ENIC_SET_INSTANCE) &&
1201 if (pp->set & ENIC_SET_INSTANCE) 1201 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
1202 NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX, 1202 pp->instance_uuid)) ||
1203 pp->instance_uuid); 1203 ((pp->set & ENIC_SET_HOST) &&
1204 if (pp->set & ENIC_SET_HOST) 1204 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
1205 NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, 1205 goto nla_put_failure;
1206 pp->host_uuid);
1207
1208 return 0; 1206 return 0;
1209 1207
1210nla_put_failure: 1208nla_put_failure:
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index dafea1ecb7b1..43464f0a4f99 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -184,7 +184,7 @@ static int (*enic_pp_handlers[])(struct enic *enic, int vf,
184}; 184};
185 185
186static const int enic_pp_handlers_count = 186static const int enic_pp_handlers_count =
187 sizeof(enic_pp_handlers)/sizeof(*enic_pp_handlers); 187 ARRAY_SIZE(enic_pp_handlers);
188 188
189static int enic_pp_preassociate(struct enic *enic, int vf, 189static int enic_pp_preassociate(struct enic *enic, int vf,
190 struct enic_port_profile *prev_pp, int *restore_pp) 190 struct enic_port_profile *prev_pp, int *restore_pp)
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 68f1c39184df..61cc09342865 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1380,6 +1380,7 @@ static void de_free_rings (struct de_private *de)
1380static int de_open (struct net_device *dev) 1380static int de_open (struct net_device *dev)
1381{ 1381{
1382 struct de_private *de = netdev_priv(dev); 1382 struct de_private *de = netdev_priv(dev);
1383 const int irq = de->pdev->irq;
1383 int rc; 1384 int rc;
1384 1385
1385 netif_dbg(de, ifup, dev, "enabling interface\n"); 1386 netif_dbg(de, ifup, dev, "enabling interface\n");
@@ -1394,10 +1395,9 @@ static int de_open (struct net_device *dev)
1394 1395
1395 dw32(IntrMask, 0); 1396 dw32(IntrMask, 0);
1396 1397
1397 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev); 1398 rc = request_irq(irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1398 if (rc) { 1399 if (rc) {
1399 netdev_err(dev, "IRQ %d request failure, err=%d\n", 1400 netdev_err(dev, "IRQ %d request failure, err=%d\n", irq, rc);
1400 dev->irq, rc);
1401 goto err_out_free; 1401 goto err_out_free;
1402 } 1402 }
1403 1403
@@ -1413,7 +1413,7 @@ static int de_open (struct net_device *dev)
1413 return 0; 1413 return 0;
1414 1414
1415err_out_free_irq: 1415err_out_free_irq:
1416 free_irq(dev->irq, dev); 1416 free_irq(irq, dev);
1417err_out_free: 1417err_out_free:
1418 de_free_rings(de); 1418 de_free_rings(de);
1419 return rc; 1419 return rc;
@@ -1434,7 +1434,7 @@ static int de_close (struct net_device *dev)
1434 netif_carrier_off(dev); 1434 netif_carrier_off(dev);
1435 spin_unlock_irqrestore(&de->lock, flags); 1435 spin_unlock_irqrestore(&de->lock, flags);
1436 1436
1437 free_irq(dev->irq, dev); 1437 free_irq(de->pdev->irq, dev);
1438 1438
1439 de_free_rings(de); 1439 de_free_rings(de);
1440 de_adapter_sleep(de); 1440 de_adapter_sleep(de);
@@ -1444,6 +1444,7 @@ static int de_close (struct net_device *dev)
1444static void de_tx_timeout (struct net_device *dev) 1444static void de_tx_timeout (struct net_device *dev)
1445{ 1445{
1446 struct de_private *de = netdev_priv(dev); 1446 struct de_private *de = netdev_priv(dev);
1447 const int irq = de->pdev->irq;
1447 1448
1448 netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n", 1449 netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1449 dr32(MacStatus), dr32(MacMode), dr32(SIAStatus), 1450 dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
@@ -1451,7 +1452,7 @@ static void de_tx_timeout (struct net_device *dev)
1451 1452
1452 del_timer_sync(&de->media_timer); 1453 del_timer_sync(&de->media_timer);
1453 1454
1454 disable_irq(dev->irq); 1455 disable_irq(irq);
1455 spin_lock_irq(&de->lock); 1456 spin_lock_irq(&de->lock);
1456 1457
1457 de_stop_hw(de); 1458 de_stop_hw(de);
@@ -1459,12 +1460,12 @@ static void de_tx_timeout (struct net_device *dev)
1459 netif_carrier_off(dev); 1460 netif_carrier_off(dev);
1460 1461
1461 spin_unlock_irq(&de->lock); 1462 spin_unlock_irq(&de->lock);
1462 enable_irq(dev->irq); 1463 enable_irq(irq);
1463 1464
1464 /* Update the error counts. */ 1465 /* Update the error counts. */
1465 __de_get_stats(de); 1466 __de_get_stats(de);
1466 1467
1467 synchronize_irq(dev->irq); 1468 synchronize_irq(irq);
1468 de_clean_rings(de); 1469 de_clean_rings(de);
1469 1470
1470 de_init_rings(de); 1471 de_init_rings(de);
@@ -2024,8 +2025,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2024 goto err_out_res; 2025 goto err_out_res;
2025 } 2026 }
2026 2027
2027 dev->irq = pdev->irq;
2028
2029 /* obtain and check validity of PCI I/O address */ 2028 /* obtain and check validity of PCI I/O address */
2030 pciaddr = pci_resource_start(pdev, 1); 2029 pciaddr = pci_resource_start(pdev, 1);
2031 if (!pciaddr) { 2030 if (!pciaddr) {
@@ -2050,7 +2049,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2050 pciaddr, pci_name(pdev)); 2049 pciaddr, pci_name(pdev));
2051 goto err_out_res; 2050 goto err_out_res;
2052 } 2051 }
2053 dev->base_addr = (unsigned long) regs;
2054 de->regs = regs; 2052 de->regs = regs;
2055 2053
2056 de_adapter_wake(de); 2054 de_adapter_wake(de);
@@ -2078,11 +2076,9 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2078 goto err_out_iomap; 2076 goto err_out_iomap;
2079 2077
2080 /* print info about board and interface just registered */ 2078 /* print info about board and interface just registered */
2081 netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n", 2079 netdev_info(dev, "%s at %p, %pM, IRQ %d\n",
2082 de->de21040 ? "21040" : "21041", 2080 de->de21040 ? "21040" : "21041",
2083 dev->base_addr, 2081 regs, dev->dev_addr, pdev->irq);
2084 dev->dev_addr,
2085 dev->irq);
2086 2082
2087 pci_set_drvdata(pdev, dev); 2083 pci_set_drvdata(pdev, dev);
2088 2084
@@ -2130,9 +2126,11 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2130 2126
2131 rtnl_lock(); 2127 rtnl_lock();
2132 if (netif_running (dev)) { 2128 if (netif_running (dev)) {
2129 const int irq = pdev->irq;
2130
2133 del_timer_sync(&de->media_timer); 2131 del_timer_sync(&de->media_timer);
2134 2132
2135 disable_irq(dev->irq); 2133 disable_irq(irq);
2136 spin_lock_irq(&de->lock); 2134 spin_lock_irq(&de->lock);
2137 2135
2138 de_stop_hw(de); 2136 de_stop_hw(de);
@@ -2141,12 +2139,12 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2141 netif_carrier_off(dev); 2139 netif_carrier_off(dev);
2142 2140
2143 spin_unlock_irq(&de->lock); 2141 spin_unlock_irq(&de->lock);
2144 enable_irq(dev->irq); 2142 enable_irq(irq);
2145 2143
2146 /* Update the error counts. */ 2144 /* Update the error counts. */
2147 __de_get_stats(de); 2145 __de_get_stats(de);
2148 2146
2149 synchronize_irq(dev->irq); 2147 synchronize_irq(irq);
2150 de_clean_rings(de); 2148 de_clean_rings(de);
2151 2149
2152 de_adapter_sleep(de); 2150 de_adapter_sleep(de);
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 1eccf4945485..4d6fe604fa64 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -150,6 +150,12 @@
150#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */ 150#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
151#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */ 151#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
152 152
153#define dw32(reg, val) iowrite32(val, ioaddr + (reg))
154#define dw16(reg, val) iowrite16(val, ioaddr + (reg))
155#define dr32(reg) ioread32(ioaddr + (reg))
156#define dr16(reg) ioread16(ioaddr + (reg))
157#define dr8(reg) ioread8(ioaddr + (reg))
158
153#define DMFE_DBUG(dbug_now, msg, value) \ 159#define DMFE_DBUG(dbug_now, msg, value) \
154 do { \ 160 do { \
155 if (dmfe_debug || (dbug_now)) \ 161 if (dmfe_debug || (dbug_now)) \
@@ -178,14 +184,6 @@
178 184
179#define SROM_V41_CODE 0x14 185#define SROM_V41_CODE 0x14
180 186
181#define SROM_CLK_WRITE(data, ioaddr) \
182 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
183 udelay(5); \
184 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
185 udelay(5); \
186 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
187 udelay(5);
188
189#define __CHK_IO_SIZE(pci_id, dev_rev) \ 187#define __CHK_IO_SIZE(pci_id, dev_rev) \
190 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \ 188 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
191 DM9102A_IO_SIZE: DM9102_IO_SIZE) 189 DM9102A_IO_SIZE: DM9102_IO_SIZE)
@@ -213,11 +211,11 @@ struct rx_desc {
213struct dmfe_board_info { 211struct dmfe_board_info {
214 u32 chip_id; /* Chip vendor/Device ID */ 212 u32 chip_id; /* Chip vendor/Device ID */
215 u8 chip_revision; /* Chip revision */ 213 u8 chip_revision; /* Chip revision */
216 struct DEVICE *next_dev; /* next device */ 214 struct net_device *next_dev; /* next device */
217 struct pci_dev *pdev; /* PCI device */ 215 struct pci_dev *pdev; /* PCI device */
218 spinlock_t lock; 216 spinlock_t lock;
219 217
220 long ioaddr; /* I/O base address */ 218 void __iomem *ioaddr; /* I/O base address */
221 u32 cr0_data; 219 u32 cr0_data;
222 u32 cr5_data; 220 u32 cr5_data;
223 u32 cr6_data; 221 u32 cr6_data;
@@ -320,20 +318,20 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
320static int dmfe_stop(struct DEVICE *); 318static int dmfe_stop(struct DEVICE *);
321static void dmfe_set_filter_mode(struct DEVICE *); 319static void dmfe_set_filter_mode(struct DEVICE *);
322static const struct ethtool_ops netdev_ethtool_ops; 320static const struct ethtool_ops netdev_ethtool_ops;
323static u16 read_srom_word(long ,int); 321static u16 read_srom_word(void __iomem *, int);
324static irqreturn_t dmfe_interrupt(int , void *); 322static irqreturn_t dmfe_interrupt(int , void *);
325#ifdef CONFIG_NET_POLL_CONTROLLER 323#ifdef CONFIG_NET_POLL_CONTROLLER
326static void poll_dmfe (struct net_device *dev); 324static void poll_dmfe (struct net_device *dev);
327#endif 325#endif
328static void dmfe_descriptor_init(struct net_device *, unsigned long); 326static void dmfe_descriptor_init(struct net_device *);
329static void allocate_rx_buffer(struct net_device *); 327static void allocate_rx_buffer(struct net_device *);
330static void update_cr6(u32, unsigned long); 328static void update_cr6(u32, void __iomem *);
331static void send_filter_frame(struct DEVICE *); 329static void send_filter_frame(struct DEVICE *);
332static void dm9132_id_table(struct DEVICE *); 330static void dm9132_id_table(struct DEVICE *);
333static u16 phy_read(unsigned long, u8, u8, u32); 331static u16 phy_read(void __iomem *, u8, u8, u32);
334static void phy_write(unsigned long, u8, u8, u16, u32); 332static void phy_write(void __iomem *, u8, u8, u16, u32);
335static void phy_write_1bit(unsigned long, u32); 333static void phy_write_1bit(void __iomem *, u32);
336static u16 phy_read_1bit(unsigned long); 334static u16 phy_read_1bit(void __iomem *);
337static u8 dmfe_sense_speed(struct dmfe_board_info *); 335static u8 dmfe_sense_speed(struct dmfe_board_info *);
338static void dmfe_process_mode(struct dmfe_board_info *); 336static void dmfe_process_mode(struct dmfe_board_info *);
339static void dmfe_timer(unsigned long); 337static void dmfe_timer(unsigned long);
@@ -462,14 +460,16 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
462 db->buf_pool_dma_start = db->buf_pool_dma_ptr; 460 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
463 461
464 db->chip_id = ent->driver_data; 462 db->chip_id = ent->driver_data;
465 db->ioaddr = pci_resource_start(pdev, 0); 463 /* IO type range. */
464 db->ioaddr = pci_iomap(pdev, 0, 0);
465 if (!db->ioaddr)
466 goto err_out_free_buf;
467
466 db->chip_revision = pdev->revision; 468 db->chip_revision = pdev->revision;
467 db->wol_mode = 0; 469 db->wol_mode = 0;
468 470
469 db->pdev = pdev; 471 db->pdev = pdev;
470 472
471 dev->base_addr = db->ioaddr;
472 dev->irq = pdev->irq;
473 pci_set_drvdata(pdev, dev); 473 pci_set_drvdata(pdev, dev);
474 dev->netdev_ops = &netdev_ops; 474 dev->netdev_ops = &netdev_ops;
475 dev->ethtool_ops = &netdev_ethtool_ops; 475 dev->ethtool_ops = &netdev_ethtool_ops;
@@ -484,9 +484,10 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
484 db->chip_type = 0; 484 db->chip_type = 0;
485 485
486 /* read 64 word srom data */ 486 /* read 64 word srom data */
487 for (i = 0; i < 64; i++) 487 for (i = 0; i < 64; i++) {
488 ((__le16 *) db->srom)[i] = 488 ((__le16 *) db->srom)[i] =
489 cpu_to_le16(read_srom_word(db->ioaddr, i)); 489 cpu_to_le16(read_srom_word(db->ioaddr, i));
490 }
490 491
491 /* Set Node address */ 492 /* Set Node address */
492 for (i = 0; i < 6; i++) 493 for (i = 0; i < 6; i++)
@@ -494,16 +495,18 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
494 495
495 err = register_netdev (dev); 496 err = register_netdev (dev);
496 if (err) 497 if (err)
497 goto err_out_free_buf; 498 goto err_out_unmap;
498 499
499 dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n", 500 dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
500 ent->driver_data >> 16, 501 ent->driver_data >> 16,
501 pci_name(pdev), dev->dev_addr, dev->irq); 502 pci_name(pdev), dev->dev_addr, pdev->irq);
502 503
503 pci_set_master(pdev); 504 pci_set_master(pdev);
504 505
505 return 0; 506 return 0;
506 507
508err_out_unmap:
509 pci_iounmap(pdev, db->ioaddr);
507err_out_free_buf: 510err_out_free_buf:
508 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, 511 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
509 db->buf_pool_ptr, db->buf_pool_dma_ptr); 512 db->buf_pool_ptr, db->buf_pool_dma_ptr);
@@ -532,7 +535,7 @@ static void __devexit dmfe_remove_one (struct pci_dev *pdev)
532 if (dev) { 535 if (dev) {
533 536
534 unregister_netdev(dev); 537 unregister_netdev(dev);
535 538 pci_iounmap(db->pdev, db->ioaddr);
536 pci_free_consistent(db->pdev, sizeof(struct tx_desc) * 539 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
537 DESC_ALL_CNT + 0x20, db->desc_pool_ptr, 540 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
538 db->desc_pool_dma_ptr); 541 db->desc_pool_dma_ptr);
@@ -555,13 +558,13 @@ static void __devexit dmfe_remove_one (struct pci_dev *pdev)
555 558
556static int dmfe_open(struct DEVICE *dev) 559static int dmfe_open(struct DEVICE *dev)
557{ 560{
558 int ret;
559 struct dmfe_board_info *db = netdev_priv(dev); 561 struct dmfe_board_info *db = netdev_priv(dev);
562 const int irq = db->pdev->irq;
563 int ret;
560 564
561 DMFE_DBUG(0, "dmfe_open", 0); 565 DMFE_DBUG(0, "dmfe_open", 0);
562 566
563 ret = request_irq(dev->irq, dmfe_interrupt, 567 ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
564 IRQF_SHARED, dev->name, dev);
565 if (ret) 568 if (ret)
566 return ret; 569 return ret;
567 570
@@ -615,14 +618,14 @@ static int dmfe_open(struct DEVICE *dev)
615static void dmfe_init_dm910x(struct DEVICE *dev) 618static void dmfe_init_dm910x(struct DEVICE *dev)
616{ 619{
617 struct dmfe_board_info *db = netdev_priv(dev); 620 struct dmfe_board_info *db = netdev_priv(dev);
618 unsigned long ioaddr = db->ioaddr; 621 void __iomem *ioaddr = db->ioaddr;
619 622
620 DMFE_DBUG(0, "dmfe_init_dm910x()", 0); 623 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
621 624
622 /* Reset DM910x MAC controller */ 625 /* Reset DM910x MAC controller */
623 outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */ 626 dw32(DCR0, DM910X_RESET); /* RESET MAC */
624 udelay(100); 627 udelay(100);
625 outl(db->cr0_data, ioaddr + DCR0); 628 dw32(DCR0, db->cr0_data);
626 udelay(5); 629 udelay(5);
627 630
628 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */ 631 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
@@ -633,12 +636,12 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
633 db->media_mode = dmfe_media_mode; 636 db->media_mode = dmfe_media_mode;
634 637
635 /* RESET Phyxcer Chip by GPR port bit 7 */ 638 /* RESET Phyxcer Chip by GPR port bit 7 */
636 outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */ 639 dw32(DCR12, 0x180); /* Let bit 7 output port */
637 if (db->chip_id == PCI_DM9009_ID) { 640 if (db->chip_id == PCI_DM9009_ID) {
638 outl(0x80, ioaddr + DCR12); /* Issue RESET signal */ 641 dw32(DCR12, 0x80); /* Issue RESET signal */
639 mdelay(300); /* Delay 300 ms */ 642 mdelay(300); /* Delay 300 ms */
640 } 643 }
641 outl(0x0, ioaddr + DCR12); /* Clear RESET signal */ 644 dw32(DCR12, 0x0); /* Clear RESET signal */
642 645
643 /* Process Phyxcer Media Mode */ 646 /* Process Phyxcer Media Mode */
644 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */ 647 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
@@ -649,7 +652,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
649 db->op_mode = db->media_mode; /* Force Mode */ 652 db->op_mode = db->media_mode; /* Force Mode */
650 653
651 /* Initialize Transmit/Receive decriptor and CR3/4 */ 654 /* Initialize Transmit/Receive decriptor and CR3/4 */
652 dmfe_descriptor_init(dev, ioaddr); 655 dmfe_descriptor_init(dev);
653 656
654 /* Init CR6 to program DM910x operation */ 657 /* Init CR6 to program DM910x operation */
655 update_cr6(db->cr6_data, ioaddr); 658 update_cr6(db->cr6_data, ioaddr);
@@ -662,10 +665,10 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
662 665
663 /* Init CR7, interrupt active bit */ 666 /* Init CR7, interrupt active bit */
664 db->cr7_data = CR7_DEFAULT; 667 db->cr7_data = CR7_DEFAULT;
665 outl(db->cr7_data, ioaddr + DCR7); 668 dw32(DCR7, db->cr7_data);
666 669
667 /* Init CR15, Tx jabber and Rx watchdog timer */ 670 /* Init CR15, Tx jabber and Rx watchdog timer */
668 outl(db->cr15_data, ioaddr + DCR15); 671 dw32(DCR15, db->cr15_data);
669 672
670 /* Enable DM910X Tx/Rx function */ 673 /* Enable DM910X Tx/Rx function */
671 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000; 674 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
@@ -682,6 +685,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
682 struct DEVICE *dev) 685 struct DEVICE *dev)
683{ 686{
684 struct dmfe_board_info *db = netdev_priv(dev); 687 struct dmfe_board_info *db = netdev_priv(dev);
688 void __iomem *ioaddr = db->ioaddr;
685 struct tx_desc *txptr; 689 struct tx_desc *txptr;
686 unsigned long flags; 690 unsigned long flags;
687 691
@@ -707,7 +711,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
707 } 711 }
708 712
709 /* Disable NIC interrupt */ 713 /* Disable NIC interrupt */
710 outl(0, dev->base_addr + DCR7); 714 dw32(DCR7, 0);
711 715
712 /* transmit this packet */ 716 /* transmit this packet */
713 txptr = db->tx_insert_ptr; 717 txptr = db->tx_insert_ptr;
@@ -721,11 +725,11 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
721 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) { 725 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
722 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ 726 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
723 db->tx_packet_cnt++; /* Ready to send */ 727 db->tx_packet_cnt++; /* Ready to send */
724 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ 728 dw32(DCR1, 0x1); /* Issue Tx polling */
725 dev->trans_start = jiffies; /* saved time stamp */ 729 dev->trans_start = jiffies; /* saved time stamp */
726 } else { 730 } else {
727 db->tx_queue_cnt++; /* queue TX packet */ 731 db->tx_queue_cnt++; /* queue TX packet */
728 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ 732 dw32(DCR1, 0x1); /* Issue Tx polling */
729 } 733 }
730 734
731 /* Tx resource check */ 735 /* Tx resource check */
@@ -734,7 +738,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
734 738
735 /* Restore CR7 to enable interrupt */ 739 /* Restore CR7 to enable interrupt */
736 spin_unlock_irqrestore(&db->lock, flags); 740 spin_unlock_irqrestore(&db->lock, flags);
737 outl(db->cr7_data, dev->base_addr + DCR7); 741 dw32(DCR7, db->cr7_data);
738 742
739 /* free this SKB */ 743 /* free this SKB */
740 dev_kfree_skb(skb); 744 dev_kfree_skb(skb);
@@ -751,7 +755,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
751static int dmfe_stop(struct DEVICE *dev) 755static int dmfe_stop(struct DEVICE *dev)
752{ 756{
753 struct dmfe_board_info *db = netdev_priv(dev); 757 struct dmfe_board_info *db = netdev_priv(dev);
754 unsigned long ioaddr = dev->base_addr; 758 void __iomem *ioaddr = db->ioaddr;
755 759
756 DMFE_DBUG(0, "dmfe_stop", 0); 760 DMFE_DBUG(0, "dmfe_stop", 0);
757 761
@@ -762,12 +766,12 @@ static int dmfe_stop(struct DEVICE *dev)
762 del_timer_sync(&db->timer); 766 del_timer_sync(&db->timer);
763 767
764 /* Reset & stop DM910X board */ 768 /* Reset & stop DM910X board */
765 outl(DM910X_RESET, ioaddr + DCR0); 769 dw32(DCR0, DM910X_RESET);
766 udelay(5); 770 udelay(100);
767 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); 771 phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
768 772
769 /* free interrupt */ 773 /* free interrupt */
770 free_irq(dev->irq, dev); 774 free_irq(db->pdev->irq, dev);
771 775
772 /* free allocated rx buffer */ 776 /* free allocated rx buffer */
773 dmfe_free_rxbuffer(db); 777 dmfe_free_rxbuffer(db);
@@ -794,7 +798,7 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
794{ 798{
795 struct DEVICE *dev = dev_id; 799 struct DEVICE *dev = dev_id;
796 struct dmfe_board_info *db = netdev_priv(dev); 800 struct dmfe_board_info *db = netdev_priv(dev);
797 unsigned long ioaddr = dev->base_addr; 801 void __iomem *ioaddr = db->ioaddr;
798 unsigned long flags; 802 unsigned long flags;
799 803
800 DMFE_DBUG(0, "dmfe_interrupt()", 0); 804 DMFE_DBUG(0, "dmfe_interrupt()", 0);
@@ -802,15 +806,15 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
802 spin_lock_irqsave(&db->lock, flags); 806 spin_lock_irqsave(&db->lock, flags);
803 807
804 /* Got DM910X status */ 808 /* Got DM910X status */
805 db->cr5_data = inl(ioaddr + DCR5); 809 db->cr5_data = dr32(DCR5);
806 outl(db->cr5_data, ioaddr + DCR5); 810 dw32(DCR5, db->cr5_data);
807 if ( !(db->cr5_data & 0xc1) ) { 811 if ( !(db->cr5_data & 0xc1) ) {
808 spin_unlock_irqrestore(&db->lock, flags); 812 spin_unlock_irqrestore(&db->lock, flags);
809 return IRQ_HANDLED; 813 return IRQ_HANDLED;
810 } 814 }
811 815
812 /* Disable all interrupt in CR7 to solve the interrupt edge problem */ 816 /* Disable all interrupt in CR7 to solve the interrupt edge problem */
813 outl(0, ioaddr + DCR7); 817 dw32(DCR7, 0);
814 818
815 /* Check system status */ 819 /* Check system status */
816 if (db->cr5_data & 0x2000) { 820 if (db->cr5_data & 0x2000) {
@@ -838,11 +842,11 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
838 if (db->dm910x_chk_mode & 0x2) { 842 if (db->dm910x_chk_mode & 0x2) {
839 db->dm910x_chk_mode = 0x4; 843 db->dm910x_chk_mode = 0x4;
840 db->cr6_data |= 0x100; 844 db->cr6_data |= 0x100;
841 update_cr6(db->cr6_data, db->ioaddr); 845 update_cr6(db->cr6_data, ioaddr);
842 } 846 }
843 847
844 /* Restore CR7 to enable interrupt mask */ 848 /* Restore CR7 to enable interrupt mask */
845 outl(db->cr7_data, ioaddr + DCR7); 849 dw32(DCR7, db->cr7_data);
846 850
847 spin_unlock_irqrestore(&db->lock, flags); 851 spin_unlock_irqrestore(&db->lock, flags);
848 return IRQ_HANDLED; 852 return IRQ_HANDLED;
@@ -858,11 +862,14 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
858 862
859static void poll_dmfe (struct net_device *dev) 863static void poll_dmfe (struct net_device *dev)
860{ 864{
865 struct dmfe_board_info *db = netdev_priv(dev);
866 const int irq = db->pdev->irq;
867
861 /* disable_irq here is not very nice, but with the lockless 868 /* disable_irq here is not very nice, but with the lockless
862 interrupt handler we have no other choice. */ 869 interrupt handler we have no other choice. */
863 disable_irq(dev->irq); 870 disable_irq(irq);
864 dmfe_interrupt (dev->irq, dev); 871 dmfe_interrupt (irq, dev);
865 enable_irq(dev->irq); 872 enable_irq(irq);
866} 873}
867#endif 874#endif
868 875
@@ -873,7 +880,7 @@ static void poll_dmfe (struct net_device *dev)
873static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db) 880static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
874{ 881{
875 struct tx_desc *txptr; 882 struct tx_desc *txptr;
876 unsigned long ioaddr = dev->base_addr; 883 void __iomem *ioaddr = db->ioaddr;
877 u32 tdes0; 884 u32 tdes0;
878 885
879 txptr = db->tx_remove_ptr; 886 txptr = db->tx_remove_ptr;
@@ -897,7 +904,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
897 db->tx_fifo_underrun++; 904 db->tx_fifo_underrun++;
898 if ( !(db->cr6_data & CR6_SFT) ) { 905 if ( !(db->cr6_data & CR6_SFT) ) {
899 db->cr6_data = db->cr6_data | CR6_SFT; 906 db->cr6_data = db->cr6_data | CR6_SFT;
900 update_cr6(db->cr6_data, db->ioaddr); 907 update_cr6(db->cr6_data, ioaddr);
901 } 908 }
902 } 909 }
903 if (tdes0 & 0x0100) 910 if (tdes0 & 0x0100)
@@ -924,7 +931,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
924 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ 931 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
925 db->tx_packet_cnt++; /* Ready to send */ 932 db->tx_packet_cnt++; /* Ready to send */
926 db->tx_queue_cnt--; 933 db->tx_queue_cnt--;
927 outl(0x1, ioaddr + DCR1); /* Issue Tx polling */ 934 dw32(DCR1, 0x1); /* Issue Tx polling */
928 dev->trans_start = jiffies; /* saved time stamp */ 935 dev->trans_start = jiffies; /* saved time stamp */
929 } 936 }
930 937
@@ -1087,12 +1094,7 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1087 1094
1088 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 1095 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1089 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1096 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1090 if (np->pdev) 1097 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1091 strlcpy(info->bus_info, pci_name(np->pdev),
1092 sizeof(info->bus_info));
1093 else
1094 sprintf(info->bus_info, "EISA 0x%lx %d",
1095 dev->base_addr, dev->irq);
1096} 1098}
1097 1099
1098static int dmfe_ethtool_set_wol(struct net_device *dev, 1100static int dmfe_ethtool_set_wol(struct net_device *dev,
@@ -1132,10 +1134,11 @@ static const struct ethtool_ops netdev_ethtool_ops = {
1132 1134
1133static void dmfe_timer(unsigned long data) 1135static void dmfe_timer(unsigned long data)
1134{ 1136{
1137 struct net_device *dev = (struct net_device *)data;
1138 struct dmfe_board_info *db = netdev_priv(dev);
1139 void __iomem *ioaddr = db->ioaddr;
1135 u32 tmp_cr8; 1140 u32 tmp_cr8;
1136 unsigned char tmp_cr12; 1141 unsigned char tmp_cr12;
1137 struct DEVICE *dev = (struct DEVICE *) data;
1138 struct dmfe_board_info *db = netdev_priv(dev);
1139 unsigned long flags; 1142 unsigned long flags;
1140 1143
1141 int link_ok, link_ok_phy; 1144 int link_ok, link_ok_phy;
@@ -1148,11 +1151,10 @@ static void dmfe_timer(unsigned long data)
1148 db->first_in_callback = 1; 1151 db->first_in_callback = 1;
1149 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) { 1152 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1150 db->cr6_data &= ~0x40000; 1153 db->cr6_data &= ~0x40000;
1151 update_cr6(db->cr6_data, db->ioaddr); 1154 update_cr6(db->cr6_data, ioaddr);
1152 phy_write(db->ioaddr, 1155 phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1153 db->phy_addr, 0, 0x1000, db->chip_id);
1154 db->cr6_data |= 0x40000; 1156 db->cr6_data |= 0x40000;
1155 update_cr6(db->cr6_data, db->ioaddr); 1157 update_cr6(db->cr6_data, ioaddr);
1156 db->timer.expires = DMFE_TIMER_WUT + HZ * 2; 1158 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1157 add_timer(&db->timer); 1159 add_timer(&db->timer);
1158 spin_unlock_irqrestore(&db->lock, flags); 1160 spin_unlock_irqrestore(&db->lock, flags);
@@ -1167,7 +1169,7 @@ static void dmfe_timer(unsigned long data)
1167 db->dm910x_chk_mode = 0x4; 1169 db->dm910x_chk_mode = 0x4;
1168 1170
1169 /* Dynamic reset DM910X : system error or transmit time-out */ 1171 /* Dynamic reset DM910X : system error or transmit time-out */
1170 tmp_cr8 = inl(db->ioaddr + DCR8); 1172 tmp_cr8 = dr32(DCR8);
1171 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { 1173 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1172 db->reset_cr8++; 1174 db->reset_cr8++;
1173 db->wait_reset = 1; 1175 db->wait_reset = 1;
@@ -1177,7 +1179,7 @@ static void dmfe_timer(unsigned long data)
1177 /* TX polling kick monitor */ 1179 /* TX polling kick monitor */
1178 if ( db->tx_packet_cnt && 1180 if ( db->tx_packet_cnt &&
1179 time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) { 1181 time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1180 outl(0x1, dev->base_addr + DCR1); /* Tx polling again */ 1182 dw32(DCR1, 0x1); /* Tx polling again */
1181 1183
1182 /* TX Timeout */ 1184 /* TX Timeout */
1183 if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) { 1185 if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
@@ -1200,9 +1202,9 @@ static void dmfe_timer(unsigned long data)
1200 1202
1201 /* Link status check, Dynamic media type change */ 1203 /* Link status check, Dynamic media type change */
1202 if (db->chip_id == PCI_DM9132_ID) 1204 if (db->chip_id == PCI_DM9132_ID)
1203 tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */ 1205 tmp_cr12 = dr8(DCR9 + 3); /* DM9132 */
1204 else 1206 else
1205 tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */ 1207 tmp_cr12 = dr8(DCR12); /* DM9102/DM9102A */
1206 1208
1207 if ( ((db->chip_id == PCI_DM9102_ID) && 1209 if ( ((db->chip_id == PCI_DM9102_ID) &&
1208 (db->chip_revision == 0x30)) || 1210 (db->chip_revision == 0x30)) ||
@@ -1251,7 +1253,7 @@ static void dmfe_timer(unsigned long data)
1251 /* 10/100M link failed, used 1M Home-Net */ 1253 /* 10/100M link failed, used 1M Home-Net */
1252 db->cr6_data|=0x00040000; /* bit18=1, MII */ 1254 db->cr6_data|=0x00040000; /* bit18=1, MII */
1253 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ 1255 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1254 update_cr6(db->cr6_data, db->ioaddr); 1256 update_cr6(db->cr6_data, ioaddr);
1255 } 1257 }
1256 } else if (!netif_carrier_ok(dev)) { 1258 } else if (!netif_carrier_ok(dev)) {
1257 1259
@@ -1288,17 +1290,18 @@ static void dmfe_timer(unsigned long data)
1288 * Re-initialize DM910X board 1290 * Re-initialize DM910X board
1289 */ 1291 */
1290 1292
1291static void dmfe_dynamic_reset(struct DEVICE *dev) 1293static void dmfe_dynamic_reset(struct net_device *dev)
1292{ 1294{
1293 struct dmfe_board_info *db = netdev_priv(dev); 1295 struct dmfe_board_info *db = netdev_priv(dev);
1296 void __iomem *ioaddr = db->ioaddr;
1294 1297
1295 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0); 1298 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1296 1299
1297 /* Sopt MAC controller */ 1300 /* Sopt MAC controller */
1298 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */ 1301 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1299 update_cr6(db->cr6_data, dev->base_addr); 1302 update_cr6(db->cr6_data, ioaddr);
1300 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */ 1303 dw32(DCR7, 0); /* Disable Interrupt */
1301 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5); 1304 dw32(DCR5, dr32(DCR5));
1302 1305
1303 /* Disable upper layer interface */ 1306 /* Disable upper layer interface */
1304 netif_stop_queue(dev); 1307 netif_stop_queue(dev);
@@ -1364,9 +1367,10 @@ static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1364 * Using Chain structure, and allocate Tx/Rx buffer 1367 * Using Chain structure, and allocate Tx/Rx buffer
1365 */ 1368 */
1366 1369
1367static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr) 1370static void dmfe_descriptor_init(struct net_device *dev)
1368{ 1371{
1369 struct dmfe_board_info *db = netdev_priv(dev); 1372 struct dmfe_board_info *db = netdev_priv(dev);
1373 void __iomem *ioaddr = db->ioaddr;
1370 struct tx_desc *tmp_tx; 1374 struct tx_desc *tmp_tx;
1371 struct rx_desc *tmp_rx; 1375 struct rx_desc *tmp_rx;
1372 unsigned char *tmp_buf; 1376 unsigned char *tmp_buf;
@@ -1379,7 +1383,7 @@ static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
1379 /* tx descriptor start pointer */ 1383 /* tx descriptor start pointer */
1380 db->tx_insert_ptr = db->first_tx_desc; 1384 db->tx_insert_ptr = db->first_tx_desc;
1381 db->tx_remove_ptr = db->first_tx_desc; 1385 db->tx_remove_ptr = db->first_tx_desc;
1382 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ 1386 dw32(DCR4, db->first_tx_desc_dma); /* TX DESC address */
1383 1387
1384 /* rx descriptor start pointer */ 1388 /* rx descriptor start pointer */
1385 db->first_rx_desc = (void *)db->first_tx_desc + 1389 db->first_rx_desc = (void *)db->first_tx_desc +
@@ -1389,7 +1393,7 @@ static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
1389 sizeof(struct tx_desc) * TX_DESC_CNT; 1393 sizeof(struct tx_desc) * TX_DESC_CNT;
1390 db->rx_insert_ptr = db->first_rx_desc; 1394 db->rx_insert_ptr = db->first_rx_desc;
1391 db->rx_ready_ptr = db->first_rx_desc; 1395 db->rx_ready_ptr = db->first_rx_desc;
1392 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ 1396 dw32(DCR3, db->first_rx_desc_dma); /* RX DESC address */
1393 1397
1394 /* Init Transmit chain */ 1398 /* Init Transmit chain */
1395 tmp_buf = db->buf_pool_start; 1399 tmp_buf = db->buf_pool_start;
@@ -1431,14 +1435,14 @@ static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
1431 * Firstly stop DM910X , then written value and start 1435 * Firstly stop DM910X , then written value and start
1432 */ 1436 */
1433 1437
1434static void update_cr6(u32 cr6_data, unsigned long ioaddr) 1438static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1435{ 1439{
1436 u32 cr6_tmp; 1440 u32 cr6_tmp;
1437 1441
1438 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */ 1442 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */
1439 outl(cr6_tmp, ioaddr + DCR6); 1443 dw32(DCR6, cr6_tmp);
1440 udelay(5); 1444 udelay(5);
1441 outl(cr6_data, ioaddr + DCR6); 1445 dw32(DCR6, cr6_data);
1442 udelay(5); 1446 udelay(5);
1443} 1447}
1444 1448
@@ -1448,24 +1452,19 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1448 * This setup frame initialize DM910X address filter mode 1452 * This setup frame initialize DM910X address filter mode
1449*/ 1453*/
1450 1454
1451static void dm9132_id_table(struct DEVICE *dev) 1455static void dm9132_id_table(struct net_device *dev)
1452{ 1456{
1457 struct dmfe_board_info *db = netdev_priv(dev);
1458 void __iomem *ioaddr = db->ioaddr + 0xc0;
1459 u16 *addrptr = (u16 *)dev->dev_addr;
1453 struct netdev_hw_addr *ha; 1460 struct netdev_hw_addr *ha;
1454 u16 * addrptr;
1455 unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
1456 u32 hash_val;
1457 u16 i, hash_table[4]; 1461 u16 i, hash_table[4];
1458 1462
1459 DMFE_DBUG(0, "dm9132_id_table()", 0);
1460
1461 /* Node address */ 1463 /* Node address */
1462 addrptr = (u16 *) dev->dev_addr; 1464 for (i = 0; i < 3; i++) {
1463 outw(addrptr[0], ioaddr); 1465 dw16(0, addrptr[i]);
1464 ioaddr += 4; 1466 ioaddr += 4;
1465 outw(addrptr[1], ioaddr); 1467 }
1466 ioaddr += 4;
1467 outw(addrptr[2], ioaddr);
1468 ioaddr += 4;
1469 1468
1470 /* Clear Hash Table */ 1469 /* Clear Hash Table */
1471 memset(hash_table, 0, sizeof(hash_table)); 1470 memset(hash_table, 0, sizeof(hash_table));
@@ -1475,13 +1474,14 @@ static void dm9132_id_table(struct DEVICE *dev)
1475 1474
1476 /* the multicast address in Hash Table : 64 bits */ 1475 /* the multicast address in Hash Table : 64 bits */
1477 netdev_for_each_mc_addr(ha, dev) { 1476 netdev_for_each_mc_addr(ha, dev) {
1478 hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f; 1477 u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
1478
1479 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); 1479 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1480 } 1480 }
1481 1481
1482 /* Write the hash table to MAC MD table */ 1482 /* Write the hash table to MAC MD table */
1483 for (i = 0; i < 4; i++, ioaddr += 4) 1483 for (i = 0; i < 4; i++, ioaddr += 4)
1484 outw(hash_table[i], ioaddr); 1484 dw16(0, hash_table[i]);
1485} 1485}
1486 1486
1487 1487
@@ -1490,7 +1490,7 @@ static void dm9132_id_table(struct DEVICE *dev)
1490 * This setup frame initialize DM910X address filter mode 1490 * This setup frame initialize DM910X address filter mode
1491 */ 1491 */
1492 1492
1493static void send_filter_frame(struct DEVICE *dev) 1493static void send_filter_frame(struct net_device *dev)
1494{ 1494{
1495 struct dmfe_board_info *db = netdev_priv(dev); 1495 struct dmfe_board_info *db = netdev_priv(dev);
1496 struct netdev_hw_addr *ha; 1496 struct netdev_hw_addr *ha;
@@ -1535,12 +1535,14 @@ static void send_filter_frame(struct DEVICE *dev)
1535 1535
1536 /* Resource Check and Send the setup packet */ 1536 /* Resource Check and Send the setup packet */
1537 if (!db->tx_packet_cnt) { 1537 if (!db->tx_packet_cnt) {
1538 void __iomem *ioaddr = db->ioaddr;
1539
1538 /* Resource Empty */ 1540 /* Resource Empty */
1539 db->tx_packet_cnt++; 1541 db->tx_packet_cnt++;
1540 txptr->tdes0 = cpu_to_le32(0x80000000); 1542 txptr->tdes0 = cpu_to_le32(0x80000000);
1541 update_cr6(db->cr6_data | 0x2000, dev->base_addr); 1543 update_cr6(db->cr6_data | 0x2000, ioaddr);
1542 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ 1544 dw32(DCR1, 0x1); /* Issue Tx polling */
1543 update_cr6(db->cr6_data, dev->base_addr); 1545 update_cr6(db->cr6_data, ioaddr);
1544 dev->trans_start = jiffies; 1546 dev->trans_start = jiffies;
1545 } else 1547 } else
1546 db->tx_queue_cnt++; /* Put in TX queue */ 1548 db->tx_queue_cnt++; /* Put in TX queue */
@@ -1575,43 +1577,59 @@ static void allocate_rx_buffer(struct net_device *dev)
1575 db->rx_insert_ptr = rxptr; 1577 db->rx_insert_ptr = rxptr;
1576} 1578}
1577 1579
1580static void srom_clk_write(void __iomem *ioaddr, u32 data)
1581{
1582 static const u32 cmd[] = {
1583 CR9_SROM_READ | CR9_SRCS,
1584 CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
1585 CR9_SROM_READ | CR9_SRCS
1586 };
1587 int i;
1588
1589 for (i = 0; i < ARRAY_SIZE(cmd); i++) {
1590 dw32(DCR9, data | cmd[i]);
1591 udelay(5);
1592 }
1593}
1578 1594
1579/* 1595/*
1580 * Read one word data from the serial ROM 1596 * Read one word data from the serial ROM
1581 */ 1597 */
1582 1598static u16 read_srom_word(void __iomem *ioaddr, int offset)
1583static u16 read_srom_word(long ioaddr, int offset)
1584{ 1599{
1600 u16 srom_data;
1585 int i; 1601 int i;
1586 u16 srom_data = 0;
1587 long cr9_ioaddr = ioaddr + DCR9;
1588 1602
1589 outl(CR9_SROM_READ, cr9_ioaddr); 1603 dw32(DCR9, CR9_SROM_READ);
1590 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1604 udelay(5);
1605 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1606 udelay(5);
1591 1607
1592 /* Send the Read Command 110b */ 1608 /* Send the Read Command 110b */
1593 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); 1609 srom_clk_write(ioaddr, SROM_DATA_1);
1594 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); 1610 srom_clk_write(ioaddr, SROM_DATA_1);
1595 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr); 1611 srom_clk_write(ioaddr, SROM_DATA_0);
1596 1612
1597 /* Send the offset */ 1613 /* Send the offset */
1598 for (i = 5; i >= 0; i--) { 1614 for (i = 5; i >= 0; i--) {
1599 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0; 1615 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1600 SROM_CLK_WRITE(srom_data, cr9_ioaddr); 1616 srom_clk_write(ioaddr, srom_data);
1601 } 1617 }
1602 1618
1603 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1619 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1620 udelay(5);
1604 1621
1605 for (i = 16; i > 0; i--) { 1622 for (i = 16; i > 0; i--) {
1606 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); 1623 dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1607 udelay(5); 1624 udelay(5);
1608 srom_data = (srom_data << 1) | 1625 srom_data = (srom_data << 1) |
1609 ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); 1626 ((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1610 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1627 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1611 udelay(5); 1628 udelay(5);
1612 } 1629 }
1613 1630
1614 outl(CR9_SROM_READ, cr9_ioaddr); 1631 dw32(DCR9, CR9_SROM_READ);
1632 udelay(5);
1615 return srom_data; 1633 return srom_data;
1616} 1634}
1617 1635
@@ -1620,13 +1638,14 @@ static u16 read_srom_word(long ioaddr, int offset)
1620 * Auto sense the media mode 1638 * Auto sense the media mode
1621 */ 1639 */
1622 1640
1623static u8 dmfe_sense_speed(struct dmfe_board_info * db) 1641static u8 dmfe_sense_speed(struct dmfe_board_info *db)
1624{ 1642{
1643 void __iomem *ioaddr = db->ioaddr;
1625 u8 ErrFlag = 0; 1644 u8 ErrFlag = 0;
1626 u16 phy_mode; 1645 u16 phy_mode;
1627 1646
1628 /* CR6 bit18=0, select 10/100M */ 1647 /* CR6 bit18=0, select 10/100M */
1629 update_cr6( (db->cr6_data & ~0x40000), db->ioaddr); 1648 update_cr6(db->cr6_data & ~0x40000, ioaddr);
1630 1649
1631 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); 1650 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1632 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); 1651 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
@@ -1665,11 +1684,12 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1665 1684
1666static void dmfe_set_phyxcer(struct dmfe_board_info *db) 1685static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1667{ 1686{
1687 void __iomem *ioaddr = db->ioaddr;
1668 u16 phy_reg; 1688 u16 phy_reg;
1669 1689
1670 /* Select 10/100M phyxcer */ 1690 /* Select 10/100M phyxcer */
1671 db->cr6_data &= ~0x40000; 1691 db->cr6_data &= ~0x40000;
1672 update_cr6(db->cr6_data, db->ioaddr); 1692 update_cr6(db->cr6_data, ioaddr);
1673 1693
1674 /* DM9009 Chip: Phyxcer reg18 bit12=0 */ 1694 /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1675 if (db->chip_id == PCI_DM9009_ID) { 1695 if (db->chip_id == PCI_DM9009_ID) {
@@ -1765,18 +1785,15 @@ static void dmfe_process_mode(struct dmfe_board_info *db)
1765 * Write a word to Phy register 1785 * Write a word to Phy register
1766 */ 1786 */
1767 1787
1768static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, 1788static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1769 u16 phy_data, u32 chip_id) 1789 u16 phy_data, u32 chip_id)
1770{ 1790{
1771 u16 i; 1791 u16 i;
1772 unsigned long ioaddr;
1773 1792
1774 if (chip_id == PCI_DM9132_ID) { 1793 if (chip_id == PCI_DM9132_ID) {
1775 ioaddr = iobase + 0x80 + offset * 4; 1794 dw16(0x80 + offset * 4, phy_data);
1776 outw(phy_data, ioaddr);
1777 } else { 1795 } else {
1778 /* DM9102/DM9102A Chip */ 1796 /* DM9102/DM9102A Chip */
1779 ioaddr = iobase + DCR9;
1780 1797
1781 /* Send 33 synchronization clock to Phy controller */ 1798 /* Send 33 synchronization clock to Phy controller */
1782 for (i = 0; i < 35; i++) 1799 for (i = 0; i < 35; i++)
@@ -1816,19 +1833,16 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
1816 * Read a word data from phy register 1833 * Read a word data from phy register
1817 */ 1834 */
1818 1835
1819static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id) 1836static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1820{ 1837{
1821 int i; 1838 int i;
1822 u16 phy_data; 1839 u16 phy_data;
1823 unsigned long ioaddr;
1824 1840
1825 if (chip_id == PCI_DM9132_ID) { 1841 if (chip_id == PCI_DM9132_ID) {
1826 /* DM9132 Chip */ 1842 /* DM9132 Chip */
1827 ioaddr = iobase + 0x80 + offset * 4; 1843 phy_data = dr16(0x80 + offset * 4);
1828 phy_data = inw(ioaddr);
1829 } else { 1844 } else {
1830 /* DM9102/DM9102A Chip */ 1845 /* DM9102/DM9102A Chip */
1831 ioaddr = iobase + DCR9;
1832 1846
1833 /* Send 33 synchronization clock to Phy controller */ 1847 /* Send 33 synchronization clock to Phy controller */
1834 for (i = 0; i < 35; i++) 1848 for (i = 0; i < 35; i++)
@@ -1870,13 +1884,13 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1870 * Write one bit data to Phy Controller 1884 * Write one bit data to Phy Controller
1871 */ 1885 */
1872 1886
1873static void phy_write_1bit(unsigned long ioaddr, u32 phy_data) 1887static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
1874{ 1888{
1875 outl(phy_data, ioaddr); /* MII Clock Low */ 1889 dw32(DCR9, phy_data); /* MII Clock Low */
1876 udelay(1); 1890 udelay(1);
1877 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */ 1891 dw32(DCR9, phy_data | MDCLKH); /* MII Clock High */
1878 udelay(1); 1892 udelay(1);
1879 outl(phy_data, ioaddr); /* MII Clock Low */ 1893 dw32(DCR9, phy_data); /* MII Clock Low */
1880 udelay(1); 1894 udelay(1);
1881} 1895}
1882 1896
@@ -1885,14 +1899,14 @@ static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1885 * Read one bit phy data from PHY controller 1899 * Read one bit phy data from PHY controller
1886 */ 1900 */
1887 1901
1888static u16 phy_read_1bit(unsigned long ioaddr) 1902static u16 phy_read_1bit(void __iomem *ioaddr)
1889{ 1903{
1890 u16 phy_data; 1904 u16 phy_data;
1891 1905
1892 outl(0x50000, ioaddr); 1906 dw32(DCR9, 0x50000);
1893 udelay(1); 1907 udelay(1);
1894 phy_data = ( inl(ioaddr) >> 19 ) & 0x1; 1908 phy_data = (dr32(DCR9) >> 19) & 0x1;
1895 outl(0x40000, ioaddr); 1909 dw32(DCR9, 0x40000);
1896 udelay(1); 1910 udelay(1);
1897 1911
1898 return phy_data; 1912 return phy_data;
@@ -1978,7 +1992,7 @@ static void dmfe_parse_srom(struct dmfe_board_info * db)
1978 1992
1979 /* Check DM9801 or DM9802 present or not */ 1993 /* Check DM9801 or DM9802 present or not */
1980 db->HPNA_present = 0; 1994 db->HPNA_present = 0;
1981 update_cr6(db->cr6_data|0x40000, db->ioaddr); 1995 update_cr6(db->cr6_data | 0x40000, db->ioaddr);
1982 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id); 1996 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1983 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) { 1997 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1984 /* DM9801 or DM9802 present */ 1998 /* DM9801 or DM9802 present */
@@ -2095,6 +2109,7 @@ static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2095{ 2109{
2096 struct net_device *dev = pci_get_drvdata(pci_dev); 2110 struct net_device *dev = pci_get_drvdata(pci_dev);
2097 struct dmfe_board_info *db = netdev_priv(dev); 2111 struct dmfe_board_info *db = netdev_priv(dev);
2112 void __iomem *ioaddr = db->ioaddr;
2098 u32 tmp; 2113 u32 tmp;
2099 2114
2100 /* Disable upper layer interface */ 2115 /* Disable upper layer interface */
@@ -2102,11 +2117,11 @@ static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2102 2117
2103 /* Disable Tx/Rx */ 2118 /* Disable Tx/Rx */
2104 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); 2119 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2105 update_cr6(db->cr6_data, dev->base_addr); 2120 update_cr6(db->cr6_data, ioaddr);
2106 2121
2107 /* Disable Interrupt */ 2122 /* Disable Interrupt */
2108 outl(0, dev->base_addr + DCR7); 2123 dw32(DCR7, 0);
2109 outl(inl (dev->base_addr + DCR5), dev->base_addr + DCR5); 2124 dw32(DCR5, dr32(DCR5));
2110 2125
2111 /* Fre RX buffers */ 2126 /* Fre RX buffers */
2112 dmfe_free_rxbuffer(db); 2127 dmfe_free_rxbuffer(db);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index fea3641d9398..c4f37aca2269 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -328,7 +328,7 @@ static void tulip_up(struct net_device *dev)
328 udelay(100); 328 udelay(100);
329 329
330 if (tulip_debug > 1) 330 if (tulip_debug > 1)
331 netdev_dbg(dev, "tulip_up(), irq==%d\n", dev->irq); 331 netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
332 332
333 iowrite32(tp->rx_ring_dma, ioaddr + CSR3); 333 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
334 iowrite32(tp->tx_ring_dma, ioaddr + CSR4); 334 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
@@ -515,11 +515,13 @@ media_picked:
515static int 515static int
516tulip_open(struct net_device *dev) 516tulip_open(struct net_device *dev)
517{ 517{
518 struct tulip_private *tp = netdev_priv(dev);
518 int retval; 519 int retval;
519 520
520 tulip_init_ring (dev); 521 tulip_init_ring (dev);
521 522
522 retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev); 523 retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
524 dev->name, dev);
523 if (retval) 525 if (retval)
524 goto free_ring; 526 goto free_ring;
525 527
@@ -841,7 +843,7 @@ static int tulip_close (struct net_device *dev)
841 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n", 843 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
842 ioread32 (ioaddr + CSR5)); 844 ioread32 (ioaddr + CSR5));
843 845
844 free_irq (dev->irq, dev); 846 free_irq (tp->pdev->irq, dev);
845 847
846 tulip_free_ring (dev); 848 tulip_free_ring (dev);
847 849
@@ -1489,8 +1491,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1489 1491
1490 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task); 1492 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1491 1493
1492 dev->base_addr = (unsigned long)ioaddr;
1493
1494#ifdef CONFIG_TULIP_MWI 1494#ifdef CONFIG_TULIP_MWI
1495 if (!force_csr0 && (tp->flags & HAS_PCI_MWI)) 1495 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1496 tulip_mwi_config (pdev, dev); 1496 tulip_mwi_config (pdev, dev);
@@ -1650,7 +1650,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1650 for (i = 0; i < 6; i++) 1650 for (i = 0; i < 6; i++)
1651 last_phys_addr[i] = dev->dev_addr[i]; 1651 last_phys_addr[i] = dev->dev_addr[i];
1652 last_irq = irq; 1652 last_irq = irq;
1653 dev->irq = irq;
1654 1653
1655 /* The lower four bits are the media type. */ 1654 /* The lower four bits are the media type. */
1656 if (board_idx >= 0 && board_idx < MAX_UNITS) { 1655 if (board_idx >= 0 && board_idx < MAX_UNITS) {
@@ -1858,7 +1857,8 @@ static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1858 tulip_down(dev); 1857 tulip_down(dev);
1859 1858
1860 netif_device_detach(dev); 1859 netif_device_detach(dev);
1861 free_irq(dev->irq, dev); 1860 /* FIXME: it needlessly adds an error path. */
1861 free_irq(tp->pdev->irq, dev);
1862 1862
1863save_state: 1863save_state:
1864 pci_save_state(pdev); 1864 pci_save_state(pdev);
@@ -1900,7 +1900,9 @@ static int tulip_resume(struct pci_dev *pdev)
1900 return retval; 1900 return retval;
1901 } 1901 }
1902 1902
1903 if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) { 1903 retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1904 dev->name, dev);
1905 if (retval) {
1904 pr_err("request_irq failed in resume\n"); 1906 pr_err("request_irq failed in resume\n");
1905 return retval; 1907 return retval;
1906 } 1908 }
@@ -1960,11 +1962,14 @@ static void __devexit tulip_remove_one (struct pci_dev *pdev)
1960 1962
1961static void poll_tulip (struct net_device *dev) 1963static void poll_tulip (struct net_device *dev)
1962{ 1964{
1965 struct tulip_private *tp = netdev_priv(dev);
1966 const int irq = tp->pdev->irq;
1967
1963 /* disable_irq here is not very nice, but with the lockless 1968 /* disable_irq here is not very nice, but with the lockless
1964 interrupt handler we have no other choice. */ 1969 interrupt handler we have no other choice. */
1965 disable_irq(dev->irq); 1970 disable_irq(irq);
1966 tulip_interrupt (dev->irq, dev); 1971 tulip_interrupt (irq, dev);
1967 enable_irq(dev->irq); 1972 enable_irq(irq);
1968} 1973}
1969#endif 1974#endif
1970 1975
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index fc4001f6a5e4..75d45f8a37dc 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -42,6 +42,8 @@
42#include <asm/dma.h> 42#include <asm/dma.h>
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44 44
45#define uw32(reg, val) iowrite32(val, ioaddr + (reg))
46#define ur32(reg) ioread32(ioaddr + (reg))
45 47
46/* Board/System/Debug information/definition ---------------- */ 48/* Board/System/Debug information/definition ---------------- */
47#define PCI_ULI5261_ID 0x526110B9 /* ULi M5261 ID*/ 49#define PCI_ULI5261_ID 0x526110B9 /* ULi M5261 ID*/
@@ -110,14 +112,6 @@ do { \
110 112
111#define SROM_V41_CODE 0x14 113#define SROM_V41_CODE 0x14
112 114
113#define SROM_CLK_WRITE(data, ioaddr) \
114 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
115 udelay(5); \
116 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
117 udelay(5); \
118 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
119 udelay(5);
120
121/* Structure/enum declaration ------------------------------- */ 115/* Structure/enum declaration ------------------------------- */
122struct tx_desc { 116struct tx_desc {
123 __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */ 117 __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
@@ -132,12 +126,15 @@ struct rx_desc {
132} __attribute__(( aligned(32) )); 126} __attribute__(( aligned(32) ));
133 127
134struct uli526x_board_info { 128struct uli526x_board_info {
135 u32 chip_id; /* Chip vendor/Device ID */ 129 struct uli_phy_ops {
130 void (*write)(struct uli526x_board_info *, u8, u8, u16);
131 u16 (*read)(struct uli526x_board_info *, u8, u8);
132 } phy;
136 struct net_device *next_dev; /* next device */ 133 struct net_device *next_dev; /* next device */
137 struct pci_dev *pdev; /* PCI device */ 134 struct pci_dev *pdev; /* PCI device */
138 spinlock_t lock; 135 spinlock_t lock;
139 136
140 long ioaddr; /* I/O base address */ 137 void __iomem *ioaddr; /* I/O base address */
141 u32 cr0_data; 138 u32 cr0_data;
142 u32 cr5_data; 139 u32 cr5_data;
143 u32 cr6_data; 140 u32 cr6_data;
@@ -227,21 +224,21 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *,
227static int uli526x_stop(struct net_device *); 224static int uli526x_stop(struct net_device *);
228static void uli526x_set_filter_mode(struct net_device *); 225static void uli526x_set_filter_mode(struct net_device *);
229static const struct ethtool_ops netdev_ethtool_ops; 226static const struct ethtool_ops netdev_ethtool_ops;
230static u16 read_srom_word(long, int); 227static u16 read_srom_word(struct uli526x_board_info *, int);
231static irqreturn_t uli526x_interrupt(int, void *); 228static irqreturn_t uli526x_interrupt(int, void *);
232#ifdef CONFIG_NET_POLL_CONTROLLER 229#ifdef CONFIG_NET_POLL_CONTROLLER
233static void uli526x_poll(struct net_device *dev); 230static void uli526x_poll(struct net_device *dev);
234#endif 231#endif
235static void uli526x_descriptor_init(struct net_device *, unsigned long); 232static void uli526x_descriptor_init(struct net_device *, void __iomem *);
236static void allocate_rx_buffer(struct net_device *); 233static void allocate_rx_buffer(struct net_device *);
237static void update_cr6(u32, unsigned long); 234static void update_cr6(u32, void __iomem *);
238static void send_filter_frame(struct net_device *, int); 235static void send_filter_frame(struct net_device *, int);
239static u16 phy_read(unsigned long, u8, u8, u32); 236static u16 phy_readby_cr9(struct uli526x_board_info *, u8, u8);
240static u16 phy_readby_cr10(unsigned long, u8, u8); 237static u16 phy_readby_cr10(struct uli526x_board_info *, u8, u8);
241static void phy_write(unsigned long, u8, u8, u16, u32); 238static void phy_writeby_cr9(struct uli526x_board_info *, u8, u8, u16);
242static void phy_writeby_cr10(unsigned long, u8, u8, u16); 239static void phy_writeby_cr10(struct uli526x_board_info *, u8, u8, u16);
243static void phy_write_1bit(unsigned long, u32, u32); 240static void phy_write_1bit(struct uli526x_board_info *db, u32);
244static u16 phy_read_1bit(unsigned long, u32); 241static u16 phy_read_1bit(struct uli526x_board_info *db);
245static u8 uli526x_sense_speed(struct uli526x_board_info *); 242static u8 uli526x_sense_speed(struct uli526x_board_info *);
246static void uli526x_process_mode(struct uli526x_board_info *); 243static void uli526x_process_mode(struct uli526x_board_info *);
247static void uli526x_timer(unsigned long); 244static void uli526x_timer(unsigned long);
@@ -253,6 +250,18 @@ static void uli526x_free_rxbuffer(struct uli526x_board_info *);
253static void uli526x_init(struct net_device *); 250static void uli526x_init(struct net_device *);
254static void uli526x_set_phyxcer(struct uli526x_board_info *); 251static void uli526x_set_phyxcer(struct uli526x_board_info *);
255 252
253static void srom_clk_write(struct uli526x_board_info *db, u32 data)
254{
255 void __iomem *ioaddr = db->ioaddr;
256
257 uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS);
258 udelay(5);
259 uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
260 udelay(5);
261 uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS);
262 udelay(5);
263}
264
256/* ULI526X network board routine ---------------------------- */ 265/* ULI526X network board routine ---------------------------- */
257 266
258static const struct net_device_ops netdev_ops = { 267static const struct net_device_ops netdev_ops = {
@@ -277,6 +286,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
277{ 286{
278 struct uli526x_board_info *db; /* board information structure */ 287 struct uli526x_board_info *db; /* board information structure */
279 struct net_device *dev; 288 struct net_device *dev;
289 void __iomem *ioaddr;
280 int i, err; 290 int i, err;
281 291
282 ULI526X_DBUG(0, "uli526x_init_one()", 0); 292 ULI526X_DBUG(0, "uli526x_init_one()", 0);
@@ -313,9 +323,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
313 goto err_out_disable; 323 goto err_out_disable;
314 } 324 }
315 325
316 if (pci_request_regions(pdev, DRV_NAME)) { 326 err = pci_request_regions(pdev, DRV_NAME);
327 if (err < 0) {
317 pr_err("Failed to request PCI regions\n"); 328 pr_err("Failed to request PCI regions\n");
318 err = -ENODEV;
319 goto err_out_disable; 329 goto err_out_disable;
320 } 330 }
321 331
@@ -323,32 +333,41 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
323 db = netdev_priv(dev); 333 db = netdev_priv(dev);
324 334
325 /* Allocate Tx/Rx descriptor memory */ 335 /* Allocate Tx/Rx descriptor memory */
336 err = -ENOMEM;
337
326 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); 338 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
327 if(db->desc_pool_ptr == NULL) 339 if (!db->desc_pool_ptr)
328 { 340 goto err_out_release;
329 err = -ENOMEM; 341
330 goto err_out_nomem;
331 }
332 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); 342 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
333 if(db->buf_pool_ptr == NULL) 343 if (!db->buf_pool_ptr)
334 { 344 goto err_out_free_tx_desc;
335 err = -ENOMEM;
336 goto err_out_nomem;
337 }
338 345
339 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; 346 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
340 db->first_tx_desc_dma = db->desc_pool_dma_ptr; 347 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
341 db->buf_pool_start = db->buf_pool_ptr; 348 db->buf_pool_start = db->buf_pool_ptr;
342 db->buf_pool_dma_start = db->buf_pool_dma_ptr; 349 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
343 350
344 db->chip_id = ent->driver_data; 351 switch (ent->driver_data) {
345 db->ioaddr = pci_resource_start(pdev, 0); 352 case PCI_ULI5263_ID:
353 db->phy.write = phy_writeby_cr10;
354 db->phy.read = phy_readby_cr10;
355 break;
356 default:
357 db->phy.write = phy_writeby_cr9;
358 db->phy.read = phy_readby_cr9;
359 break;
360 }
361
362 /* IO region. */
363 ioaddr = pci_iomap(pdev, 0, 0);
364 if (!ioaddr)
365 goto err_out_free_tx_buf;
346 366
367 db->ioaddr = ioaddr;
347 db->pdev = pdev; 368 db->pdev = pdev;
348 db->init = 1; 369 db->init = 1;
349 370
350 dev->base_addr = db->ioaddr;
351 dev->irq = pdev->irq;
352 pci_set_drvdata(pdev, dev); 371 pci_set_drvdata(pdev, dev);
353 372
354 /* Register some necessary functions */ 373 /* Register some necessary functions */
@@ -360,24 +379,24 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
360 379
361 /* read 64 word srom data */ 380 /* read 64 word srom data */
362 for (i = 0; i < 64; i++) 381 for (i = 0; i < 64; i++)
363 ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); 382 ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db, i));
364 383
365 /* Set Node address */ 384 /* Set Node address */
366 if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */ 385 if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */
367 { 386 {
368 outl(0x10000, db->ioaddr + DCR0); //Diagnosis mode 387 uw32(DCR0, 0x10000); //Diagnosis mode
369 outl(0x1c0, db->ioaddr + DCR13); //Reset dianostic pointer port 388 uw32(DCR13, 0x1c0); //Reset dianostic pointer port
370 outl(0, db->ioaddr + DCR14); //Clear reset port 389 uw32(DCR14, 0); //Clear reset port
371 outl(0x10, db->ioaddr + DCR14); //Reset ID Table pointer 390 uw32(DCR14, 0x10); //Reset ID Table pointer
372 outl(0, db->ioaddr + DCR14); //Clear reset port 391 uw32(DCR14, 0); //Clear reset port
373 outl(0, db->ioaddr + DCR13); //Clear CR13 392 uw32(DCR13, 0); //Clear CR13
374 outl(0x1b0, db->ioaddr + DCR13); //Select ID Table access port 393 uw32(DCR13, 0x1b0); //Select ID Table access port
375 //Read MAC address from CR14 394 //Read MAC address from CR14
376 for (i = 0; i < 6; i++) 395 for (i = 0; i < 6; i++)
377 dev->dev_addr[i] = inl(db->ioaddr + DCR14); 396 dev->dev_addr[i] = ur32(DCR14);
378 //Read end 397 //Read end
379 outl(0, db->ioaddr + DCR13); //Clear CR13 398 uw32(DCR13, 0); //Clear CR13
380 outl(0, db->ioaddr + DCR0); //Clear CR0 399 uw32(DCR0, 0); //Clear CR0
381 udelay(10); 400 udelay(10);
382 } 401 }
383 else /*Exist SROM*/ 402 else /*Exist SROM*/
@@ -387,26 +406,26 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
387 } 406 }
388 err = register_netdev (dev); 407 err = register_netdev (dev);
389 if (err) 408 if (err)
390 goto err_out_res; 409 goto err_out_unmap;
391 410
392 netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n", 411 netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
393 ent->driver_data >> 16, pci_name(pdev), 412 ent->driver_data >> 16, pci_name(pdev),
394 dev->dev_addr, dev->irq); 413 dev->dev_addr, pdev->irq);
395 414
396 pci_set_master(pdev); 415 pci_set_master(pdev);
397 416
398 return 0; 417 return 0;
399 418
400err_out_res: 419err_out_unmap:
420 pci_iounmap(pdev, db->ioaddr);
421err_out_free_tx_buf:
422 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
423 db->buf_pool_ptr, db->buf_pool_dma_ptr);
424err_out_free_tx_desc:
425 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
426 db->desc_pool_ptr, db->desc_pool_dma_ptr);
427err_out_release:
401 pci_release_regions(pdev); 428 pci_release_regions(pdev);
402err_out_nomem:
403 if(db->desc_pool_ptr)
404 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
405 db->desc_pool_ptr, db->desc_pool_dma_ptr);
406
407 if(db->buf_pool_ptr != NULL)
408 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
409 db->buf_pool_ptr, db->buf_pool_dma_ptr);
410err_out_disable: 429err_out_disable:
411 pci_disable_device(pdev); 430 pci_disable_device(pdev);
412err_out_free: 431err_out_free:
@@ -422,19 +441,17 @@ static void __devexit uli526x_remove_one (struct pci_dev *pdev)
422 struct net_device *dev = pci_get_drvdata(pdev); 441 struct net_device *dev = pci_get_drvdata(pdev);
423 struct uli526x_board_info *db = netdev_priv(dev); 442 struct uli526x_board_info *db = netdev_priv(dev);
424 443
425 ULI526X_DBUG(0, "uli526x_remove_one()", 0); 444 unregister_netdev(dev);
426 445 pci_iounmap(pdev, db->ioaddr);
427 pci_free_consistent(db->pdev, sizeof(struct tx_desc) * 446 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
428 DESC_ALL_CNT + 0x20, db->desc_pool_ptr, 447 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
429 db->desc_pool_dma_ptr); 448 db->desc_pool_dma_ptr);
430 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, 449 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
431 db->buf_pool_ptr, db->buf_pool_dma_ptr); 450 db->buf_pool_ptr, db->buf_pool_dma_ptr);
432 unregister_netdev(dev);
433 pci_release_regions(pdev); 451 pci_release_regions(pdev);
434 free_netdev(dev); /* free board information */
435 pci_set_drvdata(pdev, NULL);
436 pci_disable_device(pdev); 452 pci_disable_device(pdev);
437 ULI526X_DBUG(0, "uli526x_remove_one() exit", 0); 453 pci_set_drvdata(pdev, NULL);
454 free_netdev(dev);
438} 455}
439 456
440 457
@@ -468,7 +485,8 @@ static int uli526x_open(struct net_device *dev)
468 /* Initialize ULI526X board */ 485 /* Initialize ULI526X board */
469 uli526x_init(dev); 486 uli526x_init(dev);
470 487
471 ret = request_irq(dev->irq, uli526x_interrupt, IRQF_SHARED, dev->name, dev); 488 ret = request_irq(db->pdev->irq, uli526x_interrupt, IRQF_SHARED,
489 dev->name, dev);
472 if (ret) 490 if (ret)
473 return ret; 491 return ret;
474 492
@@ -496,57 +514,57 @@ static int uli526x_open(struct net_device *dev)
496static void uli526x_init(struct net_device *dev) 514static void uli526x_init(struct net_device *dev)
497{ 515{
498 struct uli526x_board_info *db = netdev_priv(dev); 516 struct uli526x_board_info *db = netdev_priv(dev);
499 unsigned long ioaddr = db->ioaddr; 517 struct uli_phy_ops *phy = &db->phy;
518 void __iomem *ioaddr = db->ioaddr;
500 u8 phy_tmp; 519 u8 phy_tmp;
501 u8 timeout; 520 u8 timeout;
502 u16 phy_value;
503 u16 phy_reg_reset; 521 u16 phy_reg_reset;
504 522
505 523
506 ULI526X_DBUG(0, "uli526x_init()", 0); 524 ULI526X_DBUG(0, "uli526x_init()", 0);
507 525
508 /* Reset M526x MAC controller */ 526 /* Reset M526x MAC controller */
509 outl(ULI526X_RESET, ioaddr + DCR0); /* RESET MAC */ 527 uw32(DCR0, ULI526X_RESET); /* RESET MAC */
510 udelay(100); 528 udelay(100);
511 outl(db->cr0_data, ioaddr + DCR0); 529 uw32(DCR0, db->cr0_data);
512 udelay(5); 530 udelay(5);
513 531
514 /* Phy addr : In some boards,M5261/M5263 phy address != 1 */ 532 /* Phy addr : In some boards,M5261/M5263 phy address != 1 */
515 db->phy_addr = 1; 533 db->phy_addr = 1;
516 for(phy_tmp=0;phy_tmp<32;phy_tmp++) 534 for (phy_tmp = 0; phy_tmp < 32; phy_tmp++) {
517 { 535 u16 phy_value;
518 phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add 536
519 if(phy_value != 0xffff&&phy_value!=0) 537 phy_value = phy->read(db, phy_tmp, 3); //peer add
520 { 538 if (phy_value != 0xffff && phy_value != 0) {
521 db->phy_addr = phy_tmp; 539 db->phy_addr = phy_tmp;
522 break; 540 break;
523 } 541 }
524 } 542 }
525 if(phy_tmp == 32) 543
544 if (phy_tmp == 32)
526 pr_warn("Can not find the phy address!!!\n"); 545 pr_warn("Can not find the phy address!!!\n");
527 /* Parser SROM and media mode */ 546 /* Parser SROM and media mode */
528 db->media_mode = uli526x_media_mode; 547 db->media_mode = uli526x_media_mode;
529 548
530 /* phyxcer capability setting */ 549 /* phyxcer capability setting */
531 phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id); 550 phy_reg_reset = phy->read(db, db->phy_addr, 0);
532 phy_reg_reset = (phy_reg_reset | 0x8000); 551 phy_reg_reset = (phy_reg_reset | 0x8000);
533 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id); 552 phy->write(db, db->phy_addr, 0, phy_reg_reset);
534 553
535 /* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management 554 /* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management
536 * functions") or phy data sheet for details on phy reset 555 * functions") or phy data sheet for details on phy reset
537 */ 556 */
538 udelay(500); 557 udelay(500);
539 timeout = 10; 558 timeout = 10;
540 while (timeout-- && 559 while (timeout-- && phy->read(db, db->phy_addr, 0) & 0x8000)
541 phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id) & 0x8000) 560 udelay(100);
542 udelay(100);
543 561
544 /* Process Phyxcer Media Mode */ 562 /* Process Phyxcer Media Mode */
545 uli526x_set_phyxcer(db); 563 uli526x_set_phyxcer(db);
546 564
547 /* Media Mode Process */ 565 /* Media Mode Process */
548 if ( !(db->media_mode & ULI526X_AUTO) ) 566 if ( !(db->media_mode & ULI526X_AUTO) )
549 db->op_mode = db->media_mode; /* Force Mode */ 567 db->op_mode = db->media_mode; /* Force Mode */
550 568
551 /* Initialize Transmit/Receive decriptor and CR3/4 */ 569 /* Initialize Transmit/Receive decriptor and CR3/4 */
552 uli526x_descriptor_init(dev, ioaddr); 570 uli526x_descriptor_init(dev, ioaddr);
@@ -559,10 +577,10 @@ static void uli526x_init(struct net_device *dev)
559 577
560 /* Init CR7, interrupt active bit */ 578 /* Init CR7, interrupt active bit */
561 db->cr7_data = CR7_DEFAULT; 579 db->cr7_data = CR7_DEFAULT;
562 outl(db->cr7_data, ioaddr + DCR7); 580 uw32(DCR7, db->cr7_data);
563 581
564 /* Init CR15, Tx jabber and Rx watchdog timer */ 582 /* Init CR15, Tx jabber and Rx watchdog timer */
565 outl(db->cr15_data, ioaddr + DCR15); 583 uw32(DCR15, db->cr15_data);
566 584
567 /* Enable ULI526X Tx/Rx function */ 585 /* Enable ULI526X Tx/Rx function */
568 db->cr6_data |= CR6_RXSC | CR6_TXSC; 586 db->cr6_data |= CR6_RXSC | CR6_TXSC;
@@ -579,6 +597,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
579 struct net_device *dev) 597 struct net_device *dev)
580{ 598{
581 struct uli526x_board_info *db = netdev_priv(dev); 599 struct uli526x_board_info *db = netdev_priv(dev);
600 void __iomem *ioaddr = db->ioaddr;
582 struct tx_desc *txptr; 601 struct tx_desc *txptr;
583 unsigned long flags; 602 unsigned long flags;
584 603
@@ -604,7 +623,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
604 } 623 }
605 624
606 /* Disable NIC interrupt */ 625 /* Disable NIC interrupt */
607 outl(0, dev->base_addr + DCR7); 626 uw32(DCR7, 0);
608 627
609 /* transmit this packet */ 628 /* transmit this packet */
610 txptr = db->tx_insert_ptr; 629 txptr = db->tx_insert_ptr;
@@ -615,10 +634,10 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
615 db->tx_insert_ptr = txptr->next_tx_desc; 634 db->tx_insert_ptr = txptr->next_tx_desc;
616 635
617 /* Transmit Packet Process */ 636 /* Transmit Packet Process */
618 if ( (db->tx_packet_cnt < TX_DESC_CNT) ) { 637 if (db->tx_packet_cnt < TX_DESC_CNT) {
619 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ 638 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
620 db->tx_packet_cnt++; /* Ready to send */ 639 db->tx_packet_cnt++; /* Ready to send */
621 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ 640 uw32(DCR1, 0x1); /* Issue Tx polling */
622 dev->trans_start = jiffies; /* saved time stamp */ 641 dev->trans_start = jiffies; /* saved time stamp */
623 } 642 }
624 643
@@ -628,7 +647,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
628 647
629 /* Restore CR7 to enable interrupt */ 648 /* Restore CR7 to enable interrupt */
630 spin_unlock_irqrestore(&db->lock, flags); 649 spin_unlock_irqrestore(&db->lock, flags);
631 outl(db->cr7_data, dev->base_addr + DCR7); 650 uw32(DCR7, db->cr7_data);
632 651
633 /* free this SKB */ 652 /* free this SKB */
634 dev_kfree_skb(skb); 653 dev_kfree_skb(skb);
@@ -645,9 +664,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
645static int uli526x_stop(struct net_device *dev) 664static int uli526x_stop(struct net_device *dev)
646{ 665{
647 struct uli526x_board_info *db = netdev_priv(dev); 666 struct uli526x_board_info *db = netdev_priv(dev);
648 unsigned long ioaddr = dev->base_addr; 667 void __iomem *ioaddr = db->ioaddr;
649
650 ULI526X_DBUG(0, "uli526x_stop", 0);
651 668
652 /* disable system */ 669 /* disable system */
653 netif_stop_queue(dev); 670 netif_stop_queue(dev);
@@ -656,12 +673,12 @@ static int uli526x_stop(struct net_device *dev)
656 del_timer_sync(&db->timer); 673 del_timer_sync(&db->timer);
657 674
658 /* Reset & stop ULI526X board */ 675 /* Reset & stop ULI526X board */
659 outl(ULI526X_RESET, ioaddr + DCR0); 676 uw32(DCR0, ULI526X_RESET);
660 udelay(5); 677 udelay(5);
661 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); 678 db->phy.write(db, db->phy_addr, 0, 0x8000);
662 679
663 /* free interrupt */ 680 /* free interrupt */
664 free_irq(dev->irq, dev); 681 free_irq(db->pdev->irq, dev);
665 682
666 /* free allocated rx buffer */ 683 /* free allocated rx buffer */
667 uli526x_free_rxbuffer(db); 684 uli526x_free_rxbuffer(db);
@@ -679,18 +696,18 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
679{ 696{
680 struct net_device *dev = dev_id; 697 struct net_device *dev = dev_id;
681 struct uli526x_board_info *db = netdev_priv(dev); 698 struct uli526x_board_info *db = netdev_priv(dev);
682 unsigned long ioaddr = dev->base_addr; 699 void __iomem *ioaddr = db->ioaddr;
683 unsigned long flags; 700 unsigned long flags;
684 701
685 spin_lock_irqsave(&db->lock, flags); 702 spin_lock_irqsave(&db->lock, flags);
686 outl(0, ioaddr + DCR7); 703 uw32(DCR7, 0);
687 704
688 /* Got ULI526X status */ 705 /* Got ULI526X status */
689 db->cr5_data = inl(ioaddr + DCR5); 706 db->cr5_data = ur32(DCR5);
690 outl(db->cr5_data, ioaddr + DCR5); 707 uw32(DCR5, db->cr5_data);
691 if ( !(db->cr5_data & 0x180c1) ) { 708 if ( !(db->cr5_data & 0x180c1) ) {
692 /* Restore CR7 to enable interrupt mask */ 709 /* Restore CR7 to enable interrupt mask */
693 outl(db->cr7_data, ioaddr + DCR7); 710 uw32(DCR7, db->cr7_data);
694 spin_unlock_irqrestore(&db->lock, flags); 711 spin_unlock_irqrestore(&db->lock, flags);
695 return IRQ_HANDLED; 712 return IRQ_HANDLED;
696 } 713 }
@@ -718,7 +735,7 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
718 uli526x_free_tx_pkt(dev, db); 735 uli526x_free_tx_pkt(dev, db);
719 736
720 /* Restore CR7 to enable interrupt mask */ 737 /* Restore CR7 to enable interrupt mask */
721 outl(db->cr7_data, ioaddr + DCR7); 738 uw32(DCR7, db->cr7_data);
722 739
723 spin_unlock_irqrestore(&db->lock, flags); 740 spin_unlock_irqrestore(&db->lock, flags);
724 return IRQ_HANDLED; 741 return IRQ_HANDLED;
@@ -727,8 +744,10 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
727#ifdef CONFIG_NET_POLL_CONTROLLER 744#ifdef CONFIG_NET_POLL_CONTROLLER
728static void uli526x_poll(struct net_device *dev) 745static void uli526x_poll(struct net_device *dev)
729{ 746{
747 struct uli526x_board_info *db = netdev_priv(dev);
748
730 /* ISR grabs the irqsave lock, so this should be safe */ 749 /* ISR grabs the irqsave lock, so this should be safe */
731 uli526x_interrupt(dev->irq, dev); 750 uli526x_interrupt(db->pdev->irq, dev);
732} 751}
733#endif 752#endif
734 753
@@ -962,12 +981,7 @@ static void netdev_get_drvinfo(struct net_device *dev,
962 981
963 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 982 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
964 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 983 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
965 if (np->pdev) 984 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
966 strlcpy(info->bus_info, pci_name(np->pdev),
967 sizeof(info->bus_info));
968 else
969 sprintf(info->bus_info, "EISA 0x%lx %d",
970 dev->base_addr, dev->irq);
971} 985}
972 986
973static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { 987static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
@@ -1007,18 +1021,20 @@ static const struct ethtool_ops netdev_ethtool_ops = {
1007 1021
1008static void uli526x_timer(unsigned long data) 1022static void uli526x_timer(unsigned long data)
1009{ 1023{
1010 u32 tmp_cr8;
1011 unsigned char tmp_cr12=0;
1012 struct net_device *dev = (struct net_device *) data; 1024 struct net_device *dev = (struct net_device *) data;
1013 struct uli526x_board_info *db = netdev_priv(dev); 1025 struct uli526x_board_info *db = netdev_priv(dev);
1026 struct uli_phy_ops *phy = &db->phy;
1027 void __iomem *ioaddr = db->ioaddr;
1014 unsigned long flags; 1028 unsigned long flags;
1029 u8 tmp_cr12 = 0;
1030 u32 tmp_cr8;
1015 1031
1016 //ULI526X_DBUG(0, "uli526x_timer()", 0); 1032 //ULI526X_DBUG(0, "uli526x_timer()", 0);
1017 spin_lock_irqsave(&db->lock, flags); 1033 spin_lock_irqsave(&db->lock, flags);
1018 1034
1019 1035
1020 /* Dynamic reset ULI526X : system error or transmit time-out */ 1036 /* Dynamic reset ULI526X : system error or transmit time-out */
1021 tmp_cr8 = inl(db->ioaddr + DCR8); 1037 tmp_cr8 = ur32(DCR8);
1022 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { 1038 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1023 db->reset_cr8++; 1039 db->reset_cr8++;
1024 db->wait_reset = 1; 1040 db->wait_reset = 1;
@@ -1028,7 +1044,7 @@ static void uli526x_timer(unsigned long data)
1028 /* TX polling kick monitor */ 1044 /* TX polling kick monitor */
1029 if ( db->tx_packet_cnt && 1045 if ( db->tx_packet_cnt &&
1030 time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) { 1046 time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) {
1031 outl(0x1, dev->base_addr + DCR1); // Tx polling again 1047 uw32(DCR1, 0x1); // Tx polling again
1032 1048
1033 // TX Timeout 1049 // TX Timeout
1034 if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) { 1050 if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) {
@@ -1049,7 +1065,7 @@ static void uli526x_timer(unsigned long data)
1049 } 1065 }
1050 1066
1051 /* Link status check, Dynamic media type change */ 1067 /* Link status check, Dynamic media type change */
1052 if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0) 1068 if ((phy->read(db, db->phy_addr, 5) & 0x01e0)!=0)
1053 tmp_cr12 = 3; 1069 tmp_cr12 = 3;
1054 1070
1055 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) { 1071 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
@@ -1062,7 +1078,7 @@ static void uli526x_timer(unsigned long data)
1062 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ 1078 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1063 /* AUTO don't need */ 1079 /* AUTO don't need */
1064 if ( !(db->media_mode & 0x8) ) 1080 if ( !(db->media_mode & 0x8) )
1065 phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); 1081 phy->write(db, db->phy_addr, 0, 0x1000);
1066 1082
1067 /* AUTO mode, if INT phyxcer link failed, select EXT device */ 1083 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1068 if (db->media_mode & ULI526X_AUTO) { 1084 if (db->media_mode & ULI526X_AUTO) {
@@ -1119,12 +1135,13 @@ static void uli526x_timer(unsigned long data)
1119static void uli526x_reset_prepare(struct net_device *dev) 1135static void uli526x_reset_prepare(struct net_device *dev)
1120{ 1136{
1121 struct uli526x_board_info *db = netdev_priv(dev); 1137 struct uli526x_board_info *db = netdev_priv(dev);
1138 void __iomem *ioaddr = db->ioaddr;
1122 1139
1123 /* Sopt MAC controller */ 1140 /* Sopt MAC controller */
1124 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */ 1141 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1125 update_cr6(db->cr6_data, dev->base_addr); 1142 update_cr6(db->cr6_data, ioaddr);
1126 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */ 1143 uw32(DCR7, 0); /* Disable Interrupt */
1127 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5); 1144 uw32(DCR5, ur32(DCR5));
1128 1145
1129 /* Disable upper layer interface */ 1146 /* Disable upper layer interface */
1130 netif_stop_queue(dev); 1147 netif_stop_queue(dev);
@@ -1289,7 +1306,7 @@ static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * sk
1289 * Using Chain structure, and allocate Tx/Rx buffer 1306 * Using Chain structure, and allocate Tx/Rx buffer
1290 */ 1307 */
1291 1308
1292static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr) 1309static void uli526x_descriptor_init(struct net_device *dev, void __iomem *ioaddr)
1293{ 1310{
1294 struct uli526x_board_info *db = netdev_priv(dev); 1311 struct uli526x_board_info *db = netdev_priv(dev);
1295 struct tx_desc *tmp_tx; 1312 struct tx_desc *tmp_tx;
@@ -1304,14 +1321,14 @@ static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr
1304 /* tx descriptor start pointer */ 1321 /* tx descriptor start pointer */
1305 db->tx_insert_ptr = db->first_tx_desc; 1322 db->tx_insert_ptr = db->first_tx_desc;
1306 db->tx_remove_ptr = db->first_tx_desc; 1323 db->tx_remove_ptr = db->first_tx_desc;
1307 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ 1324 uw32(DCR4, db->first_tx_desc_dma); /* TX DESC address */
1308 1325
1309 /* rx descriptor start pointer */ 1326 /* rx descriptor start pointer */
1310 db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT; 1327 db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
1311 db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT; 1328 db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
1312 db->rx_insert_ptr = db->first_rx_desc; 1329 db->rx_insert_ptr = db->first_rx_desc;
1313 db->rx_ready_ptr = db->first_rx_desc; 1330 db->rx_ready_ptr = db->first_rx_desc;
1314 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ 1331 uw32(DCR3, db->first_rx_desc_dma); /* RX DESC address */
1315 1332
1316 /* Init Transmit chain */ 1333 /* Init Transmit chain */
1317 tmp_buf = db->buf_pool_start; 1334 tmp_buf = db->buf_pool_start;
@@ -1352,11 +1369,9 @@ static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr
1352 * Update CR6 value 1369 * Update CR6 value
1353 * Firstly stop ULI526X, then written value and start 1370 * Firstly stop ULI526X, then written value and start
1354 */ 1371 */
1355 1372static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1356static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1357{ 1373{
1358 1374 uw32(DCR6, cr6_data);
1359 outl(cr6_data, ioaddr + DCR6);
1360 udelay(5); 1375 udelay(5);
1361} 1376}
1362 1377
@@ -1375,6 +1390,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1375static void send_filter_frame(struct net_device *dev, int mc_cnt) 1390static void send_filter_frame(struct net_device *dev, int mc_cnt)
1376{ 1391{
1377 struct uli526x_board_info *db = netdev_priv(dev); 1392 struct uli526x_board_info *db = netdev_priv(dev);
1393 void __iomem *ioaddr = db->ioaddr;
1378 struct netdev_hw_addr *ha; 1394 struct netdev_hw_addr *ha;
1379 struct tx_desc *txptr; 1395 struct tx_desc *txptr;
1380 u16 * addrptr; 1396 u16 * addrptr;
@@ -1420,9 +1436,9 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
1420 /* Resource Empty */ 1436 /* Resource Empty */
1421 db->tx_packet_cnt++; 1437 db->tx_packet_cnt++;
1422 txptr->tdes0 = cpu_to_le32(0x80000000); 1438 txptr->tdes0 = cpu_to_le32(0x80000000);
1423 update_cr6(db->cr6_data | 0x2000, dev->base_addr); 1439 update_cr6(db->cr6_data | 0x2000, ioaddr);
1424 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ 1440 uw32(DCR1, 0x1); /* Issue Tx polling */
1425 update_cr6(db->cr6_data, dev->base_addr); 1441 update_cr6(db->cr6_data, ioaddr);
1426 dev->trans_start = jiffies; 1442 dev->trans_start = jiffies;
1427 } else 1443 } else
1428 netdev_err(dev, "No Tx resource - Send_filter_frame!\n"); 1444 netdev_err(dev, "No Tx resource - Send_filter_frame!\n");
@@ -1465,37 +1481,38 @@ static void allocate_rx_buffer(struct net_device *dev)
1465 * Read one word data from the serial ROM 1481 * Read one word data from the serial ROM
1466 */ 1482 */
1467 1483
1468static u16 read_srom_word(long ioaddr, int offset) 1484static u16 read_srom_word(struct uli526x_board_info *db, int offset)
1469{ 1485{
1470 int i; 1486 void __iomem *ioaddr = db->ioaddr;
1471 u16 srom_data = 0; 1487 u16 srom_data = 0;
1472 long cr9_ioaddr = ioaddr + DCR9; 1488 int i;
1473 1489
1474 outl(CR9_SROM_READ, cr9_ioaddr); 1490 uw32(DCR9, CR9_SROM_READ);
1475 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1491 uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1476 1492
1477 /* Send the Read Command 110b */ 1493 /* Send the Read Command 110b */
1478 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); 1494 srom_clk_write(db, SROM_DATA_1);
1479 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); 1495 srom_clk_write(db, SROM_DATA_1);
1480 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr); 1496 srom_clk_write(db, SROM_DATA_0);
1481 1497
1482 /* Send the offset */ 1498 /* Send the offset */
1483 for (i = 5; i >= 0; i--) { 1499 for (i = 5; i >= 0; i--) {
1484 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0; 1500 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1485 SROM_CLK_WRITE(srom_data, cr9_ioaddr); 1501 srom_clk_write(db, srom_data);
1486 } 1502 }
1487 1503
1488 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1504 uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1489 1505
1490 for (i = 16; i > 0; i--) { 1506 for (i = 16; i > 0; i--) {
1491 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); 1507 uw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1492 udelay(5); 1508 udelay(5);
1493 srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); 1509 srom_data = (srom_data << 1) |
1494 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1510 ((ur32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1511 uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1495 udelay(5); 1512 udelay(5);
1496 } 1513 }
1497 1514
1498 outl(CR9_SROM_READ, cr9_ioaddr); 1515 uw32(DCR9, CR9_SROM_READ);
1499 return srom_data; 1516 return srom_data;
1500} 1517}
1501 1518
@@ -1506,15 +1523,16 @@ static u16 read_srom_word(long ioaddr, int offset)
1506 1523
1507static u8 uli526x_sense_speed(struct uli526x_board_info * db) 1524static u8 uli526x_sense_speed(struct uli526x_board_info * db)
1508{ 1525{
1526 struct uli_phy_ops *phy = &db->phy;
1509 u8 ErrFlag = 0; 1527 u8 ErrFlag = 0;
1510 u16 phy_mode; 1528 u16 phy_mode;
1511 1529
1512 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); 1530 phy_mode = phy->read(db, db->phy_addr, 1);
1513 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); 1531 phy_mode = phy->read(db, db->phy_addr, 1);
1514 1532
1515 if ( (phy_mode & 0x24) == 0x24 ) { 1533 if ( (phy_mode & 0x24) == 0x24 ) {
1516 1534
1517 phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7); 1535 phy_mode = ((phy->read(db, db->phy_addr, 5) & 0x01e0)<<7);
1518 if(phy_mode&0x8000) 1536 if(phy_mode&0x8000)
1519 phy_mode = 0x8000; 1537 phy_mode = 0x8000;
1520 else if(phy_mode&0x4000) 1538 else if(phy_mode&0x4000)
@@ -1549,10 +1567,11 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
1549 1567
1550static void uli526x_set_phyxcer(struct uli526x_board_info *db) 1568static void uli526x_set_phyxcer(struct uli526x_board_info *db)
1551{ 1569{
1570 struct uli_phy_ops *phy = &db->phy;
1552 u16 phy_reg; 1571 u16 phy_reg;
1553 1572
1554 /* Phyxcer capability setting */ 1573 /* Phyxcer capability setting */
1555 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; 1574 phy_reg = phy->read(db, db->phy_addr, 4) & ~0x01e0;
1556 1575
1557 if (db->media_mode & ULI526X_AUTO) { 1576 if (db->media_mode & ULI526X_AUTO) {
1558 /* AUTO Mode */ 1577 /* AUTO Mode */
@@ -1573,10 +1592,10 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *db)
1573 phy_reg|=db->PHY_reg4; 1592 phy_reg|=db->PHY_reg4;
1574 db->media_mode|=ULI526X_AUTO; 1593 db->media_mode|=ULI526X_AUTO;
1575 } 1594 }
1576 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id); 1595 phy->write(db, db->phy_addr, 4, phy_reg);
1577 1596
1578 /* Restart Auto-Negotiation */ 1597 /* Restart Auto-Negotiation */
1579 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id); 1598 phy->write(db, db->phy_addr, 0, 0x1200);
1580 udelay(50); 1599 udelay(50);
1581} 1600}
1582 1601
@@ -1590,6 +1609,7 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *db)
1590 1609
1591static void uli526x_process_mode(struct uli526x_board_info *db) 1610static void uli526x_process_mode(struct uli526x_board_info *db)
1592{ 1611{
1612 struct uli_phy_ops *phy = &db->phy;
1593 u16 phy_reg; 1613 u16 phy_reg;
1594 1614
1595 /* Full Duplex Mode Check */ 1615 /* Full Duplex Mode Check */
@@ -1601,10 +1621,10 @@ static void uli526x_process_mode(struct uli526x_board_info *db)
1601 update_cr6(db->cr6_data, db->ioaddr); 1621 update_cr6(db->cr6_data, db->ioaddr);
1602 1622
1603 /* 10/100M phyxcer force mode need */ 1623 /* 10/100M phyxcer force mode need */
1604 if ( !(db->media_mode & 0x8)) { 1624 if (!(db->media_mode & 0x8)) {
1605 /* Forece Mode */ 1625 /* Forece Mode */
1606 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id); 1626 phy_reg = phy->read(db, db->phy_addr, 6);
1607 if ( !(phy_reg & 0x1) ) { 1627 if (!(phy_reg & 0x1)) {
1608 /* parter without N-Way capability */ 1628 /* parter without N-Way capability */
1609 phy_reg = 0x0; 1629 phy_reg = 0x0;
1610 switch(db->op_mode) { 1630 switch(db->op_mode) {
@@ -1613,148 +1633,126 @@ static void uli526x_process_mode(struct uli526x_board_info *db)
1613 case ULI526X_100MHF: phy_reg = 0x2000; break; 1633 case ULI526X_100MHF: phy_reg = 0x2000; break;
1614 case ULI526X_100MFD: phy_reg = 0x2100; break; 1634 case ULI526X_100MFD: phy_reg = 0x2100; break;
1615 } 1635 }
1616 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); 1636 phy->write(db, db->phy_addr, 0, phy_reg);
1617 } 1637 }
1618 } 1638 }
1619} 1639}
1620 1640
1621 1641
1622/* 1642/* M5261/M5263 Chip */
1623 * Write a word to Phy register 1643static void phy_writeby_cr9(struct uli526x_board_info *db, u8 phy_addr,
1624 */ 1644 u8 offset, u16 phy_data)
1625
1626static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
1627{ 1645{
1628 u16 i; 1646 u16 i;
1629 unsigned long ioaddr;
1630
1631 if(chip_id == PCI_ULI5263_ID)
1632 {
1633 phy_writeby_cr10(iobase, phy_addr, offset, phy_data);
1634 return;
1635 }
1636 /* M5261/M5263 Chip */
1637 ioaddr = iobase + DCR9;
1638 1647
1639 /* Send 33 synchronization clock to Phy controller */ 1648 /* Send 33 synchronization clock to Phy controller */
1640 for (i = 0; i < 35; i++) 1649 for (i = 0; i < 35; i++)
1641 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1650 phy_write_1bit(db, PHY_DATA_1);
1642 1651
1643 /* Send start command(01) to Phy */ 1652 /* Send start command(01) to Phy */
1644 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); 1653 phy_write_1bit(db, PHY_DATA_0);
1645 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1654 phy_write_1bit(db, PHY_DATA_1);
1646 1655
1647 /* Send write command(01) to Phy */ 1656 /* Send write command(01) to Phy */
1648 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); 1657 phy_write_1bit(db, PHY_DATA_0);
1649 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1658 phy_write_1bit(db, PHY_DATA_1);
1650 1659
1651 /* Send Phy address */ 1660 /* Send Phy address */
1652 for (i = 0x10; i > 0; i = i >> 1) 1661 for (i = 0x10; i > 0; i = i >> 1)
1653 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); 1662 phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1654 1663
1655 /* Send register address */ 1664 /* Send register address */
1656 for (i = 0x10; i > 0; i = i >> 1) 1665 for (i = 0x10; i > 0; i = i >> 1)
1657 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); 1666 phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1658 1667
1659 /* written trasnition */ 1668 /* written trasnition */
1660 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1669 phy_write_1bit(db, PHY_DATA_1);
1661 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); 1670 phy_write_1bit(db, PHY_DATA_0);
1662 1671
1663 /* Write a word data to PHY controller */ 1672 /* Write a word data to PHY controller */
1664 for ( i = 0x8000; i > 0; i >>= 1) 1673 for (i = 0x8000; i > 0; i >>= 1)
1665 phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); 1674 phy_write_1bit(db, phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1666
1667} 1675}
1668 1676
1669 1677static u16 phy_readby_cr9(struct uli526x_board_info *db, u8 phy_addr, u8 offset)
1670/*
1671 * Read a word data from phy register
1672 */
1673
1674static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1675{ 1678{
1676 int i;
1677 u16 phy_data; 1679 u16 phy_data;
1678 unsigned long ioaddr; 1680 int i;
1679
1680 if(chip_id == PCI_ULI5263_ID)
1681 return phy_readby_cr10(iobase, phy_addr, offset);
1682 /* M5261/M5263 Chip */
1683 ioaddr = iobase + DCR9;
1684 1681
1685 /* Send 33 synchronization clock to Phy controller */ 1682 /* Send 33 synchronization clock to Phy controller */
1686 for (i = 0; i < 35; i++) 1683 for (i = 0; i < 35; i++)
1687 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1684 phy_write_1bit(db, PHY_DATA_1);
1688 1685
1689 /* Send start command(01) to Phy */ 1686 /* Send start command(01) to Phy */
1690 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); 1687 phy_write_1bit(db, PHY_DATA_0);
1691 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1688 phy_write_1bit(db, PHY_DATA_1);
1692 1689
1693 /* Send read command(10) to Phy */ 1690 /* Send read command(10) to Phy */
1694 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1691 phy_write_1bit(db, PHY_DATA_1);
1695 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); 1692 phy_write_1bit(db, PHY_DATA_0);
1696 1693
1697 /* Send Phy address */ 1694 /* Send Phy address */
1698 for (i = 0x10; i > 0; i = i >> 1) 1695 for (i = 0x10; i > 0; i = i >> 1)
1699 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); 1696 phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1700 1697
1701 /* Send register address */ 1698 /* Send register address */
1702 for (i = 0x10; i > 0; i = i >> 1) 1699 for (i = 0x10; i > 0; i = i >> 1)
1703 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); 1700 phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1704 1701
1705 /* Skip transition state */ 1702 /* Skip transition state */
1706 phy_read_1bit(ioaddr, chip_id); 1703 phy_read_1bit(db);
1707 1704
1708 /* read 16bit data */ 1705 /* read 16bit data */
1709 for (phy_data = 0, i = 0; i < 16; i++) { 1706 for (phy_data = 0, i = 0; i < 16; i++) {
1710 phy_data <<= 1; 1707 phy_data <<= 1;
1711 phy_data |= phy_read_1bit(ioaddr, chip_id); 1708 phy_data |= phy_read_1bit(db);
1712 } 1709 }
1713 1710
1714 return phy_data; 1711 return phy_data;
1715} 1712}
1716 1713
1717static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset) 1714static u16 phy_readby_cr10(struct uli526x_board_info *db, u8 phy_addr,
1715 u8 offset)
1718{ 1716{
1719 unsigned long ioaddr,cr10_value; 1717 void __iomem *ioaddr = db->ioaddr;
1718 u32 cr10_value = phy_addr;
1720 1719
1721 ioaddr = iobase + DCR10; 1720 cr10_value = (cr10_value << 5) + offset;
1722 cr10_value = phy_addr; 1721 cr10_value = (cr10_value << 16) + 0x08000000;
1723 cr10_value = (cr10_value<<5) + offset; 1722 uw32(DCR10, cr10_value);
1724 cr10_value = (cr10_value<<16) + 0x08000000;
1725 outl(cr10_value,ioaddr);
1726 udelay(1); 1723 udelay(1);
1727 while(1) 1724 while (1) {
1728 { 1725 cr10_value = ur32(DCR10);
1729 cr10_value = inl(ioaddr); 1726 if (cr10_value & 0x10000000)
1730 if(cr10_value&0x10000000)
1731 break; 1727 break;
1732 } 1728 }
1733 return cr10_value & 0x0ffff; 1729 return cr10_value & 0x0ffff;
1734} 1730}
1735 1731
1736static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data) 1732static void phy_writeby_cr10(struct uli526x_board_info *db, u8 phy_addr,
1733 u8 offset, u16 phy_data)
1737{ 1734{
1738 unsigned long ioaddr,cr10_value; 1735 void __iomem *ioaddr = db->ioaddr;
1736 u32 cr10_value = phy_addr;
1739 1737
1740 ioaddr = iobase + DCR10; 1738 cr10_value = (cr10_value << 5) + offset;
1741 cr10_value = phy_addr; 1739 cr10_value = (cr10_value << 16) + 0x04000000 + phy_data;
1742 cr10_value = (cr10_value<<5) + offset; 1740 uw32(DCR10, cr10_value);
1743 cr10_value = (cr10_value<<16) + 0x04000000 + phy_data;
1744 outl(cr10_value,ioaddr);
1745 udelay(1); 1741 udelay(1);
1746} 1742}
1747/* 1743/*
1748 * Write one bit data to Phy Controller 1744 * Write one bit data to Phy Controller
1749 */ 1745 */
1750 1746
1751static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id) 1747static void phy_write_1bit(struct uli526x_board_info *db, u32 data)
1752{ 1748{
1753 outl(phy_data , ioaddr); /* MII Clock Low */ 1749 void __iomem *ioaddr = db->ioaddr;
1750
1751 uw32(DCR9, data); /* MII Clock Low */
1754 udelay(1); 1752 udelay(1);
1755 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */ 1753 uw32(DCR9, data | MDCLKH); /* MII Clock High */
1756 udelay(1); 1754 udelay(1);
1757 outl(phy_data , ioaddr); /* MII Clock Low */ 1755 uw32(DCR9, data); /* MII Clock Low */
1758 udelay(1); 1756 udelay(1);
1759} 1757}
1760 1758
@@ -1763,14 +1761,15 @@ static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
1763 * Read one bit phy data from PHY controller 1761 * Read one bit phy data from PHY controller
1764 */ 1762 */
1765 1763
1766static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id) 1764static u16 phy_read_1bit(struct uli526x_board_info *db)
1767{ 1765{
1766 void __iomem *ioaddr = db->ioaddr;
1768 u16 phy_data; 1767 u16 phy_data;
1769 1768
1770 outl(0x50000 , ioaddr); 1769 uw32(DCR9, 0x50000);
1771 udelay(1); 1770 udelay(1);
1772 phy_data = ( inl(ioaddr) >> 19 ) & 0x1; 1771 phy_data = (ur32(DCR9) >> 19) & 0x1;
1773 outl(0x40000 , ioaddr); 1772 uw32(DCR9, 0x40000);
1774 udelay(1); 1773 udelay(1);
1775 1774
1776 return phy_data; 1775 return phy_data;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 2ac6fff0363a..4d1ffca83c82 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -400,9 +400,6 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
400 No hold time required! */ 400 No hold time required! */
401 iowrite32(0x00000001, ioaddr + PCIBusCfg); 401 iowrite32(0x00000001, ioaddr + PCIBusCfg);
402 402
403 dev->base_addr = (unsigned long)ioaddr;
404 dev->irq = irq;
405
406 np = netdev_priv(dev); 403 np = netdev_priv(dev);
407 np->pci_dev = pdev; 404 np->pci_dev = pdev;
408 np->chip_id = chip_idx; 405 np->chip_id = chip_idx;
@@ -635,17 +632,18 @@ static int netdev_open(struct net_device *dev)
635{ 632{
636 struct netdev_private *np = netdev_priv(dev); 633 struct netdev_private *np = netdev_priv(dev);
637 void __iomem *ioaddr = np->base_addr; 634 void __iomem *ioaddr = np->base_addr;
635 const int irq = np->pci_dev->irq;
638 int i; 636 int i;
639 637
640 iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */ 638 iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */
641 639
642 netif_device_detach(dev); 640 netif_device_detach(dev);
643 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); 641 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
644 if (i) 642 if (i)
645 goto out_err; 643 goto out_err;
646 644
647 if (debug > 1) 645 if (debug > 1)
648 netdev_dbg(dev, "w89c840_open() irq %d\n", dev->irq); 646 netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
649 647
650 if((i=alloc_ringdesc(dev))) 648 if((i=alloc_ringdesc(dev)))
651 goto out_err; 649 goto out_err;
@@ -932,6 +930,7 @@ static void tx_timeout(struct net_device *dev)
932{ 930{
933 struct netdev_private *np = netdev_priv(dev); 931 struct netdev_private *np = netdev_priv(dev);
934 void __iomem *ioaddr = np->base_addr; 932 void __iomem *ioaddr = np->base_addr;
933 const int irq = np->pci_dev->irq;
935 934
936 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n", 935 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
937 ioread32(ioaddr + IntrStatus)); 936 ioread32(ioaddr + IntrStatus));
@@ -951,7 +950,7 @@ static void tx_timeout(struct net_device *dev)
951 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes); 950 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
952 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C)); 951 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
953 952
954 disable_irq(dev->irq); 953 disable_irq(irq);
955 spin_lock_irq(&np->lock); 954 spin_lock_irq(&np->lock);
956 /* 955 /*
957 * Under high load dirty_tx and the internal tx descriptor pointer 956 * Under high load dirty_tx and the internal tx descriptor pointer
@@ -966,7 +965,7 @@ static void tx_timeout(struct net_device *dev)
966 init_rxtx_rings(dev); 965 init_rxtx_rings(dev);
967 init_registers(dev); 966 init_registers(dev);
968 spin_unlock_irq(&np->lock); 967 spin_unlock_irq(&np->lock);
969 enable_irq(dev->irq); 968 enable_irq(irq);
970 969
971 netif_wake_queue(dev); 970 netif_wake_queue(dev);
972 dev->trans_start = jiffies; /* prevent tx timeout */ 971 dev->trans_start = jiffies; /* prevent tx timeout */
@@ -1500,7 +1499,7 @@ static int netdev_close(struct net_device *dev)
1500 iowrite32(0x0000, ioaddr + IntrEnable); 1499 iowrite32(0x0000, ioaddr + IntrEnable);
1501 spin_unlock_irq(&np->lock); 1500 spin_unlock_irq(&np->lock);
1502 1501
1503 free_irq(dev->irq, dev); 1502 free_irq(np->pci_dev->irq, dev);
1504 wmb(); 1503 wmb();
1505 netif_device_attach(dev); 1504 netif_device_attach(dev);
1506 1505
@@ -1589,7 +1588,7 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1589 iowrite32(0, ioaddr + IntrEnable); 1588 iowrite32(0, ioaddr + IntrEnable);
1590 spin_unlock_irq(&np->lock); 1589 spin_unlock_irq(&np->lock);
1591 1590
1592 synchronize_irq(dev->irq); 1591 synchronize_irq(np->pci_dev->irq);
1593 netif_tx_disable(dev); 1592 netif_tx_disable(dev);
1594 1593
1595 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; 1594 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index fdb329fe6e8e..138bf83bc98e 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -41,7 +41,9 @@ MODULE_DESCRIPTION("Xircom Cardbus ethernet driver");
41MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>"); 41MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>");
42MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
43 43
44 44#define xw32(reg, val) iowrite32(val, ioaddr + (reg))
45#define xr32(reg) ioread32(ioaddr + (reg))
46#define xr8(reg) ioread8(ioaddr + (reg))
45 47
46/* IO registers on the card, offsets */ 48/* IO registers on the card, offsets */
47#define CSR0 0x00 49#define CSR0 0x00
@@ -83,7 +85,7 @@ struct xircom_private {
83 85
84 struct sk_buff *tx_skb[4]; 86 struct sk_buff *tx_skb[4];
85 87
86 unsigned long io_port; 88 void __iomem *ioaddr;
87 int open; 89 int open;
88 90
89 /* transmit_used is the rotating counter that indicates which transmit 91 /* transmit_used is the rotating counter that indicates which transmit
@@ -137,7 +139,7 @@ static int link_status(struct xircom_private *card);
137 139
138 140
139static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = { 141static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = {
140 {0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,}, 142 { PCI_VDEVICE(XIRCOM, 0x0003), },
141 {0,}, 143 {0,},
142}; 144};
143MODULE_DEVICE_TABLE(pci, xircom_pci_table); 145MODULE_DEVICE_TABLE(pci, xircom_pci_table);
@@ -146,9 +148,7 @@ static struct pci_driver xircom_ops = {
146 .name = "xircom_cb", 148 .name = "xircom_cb",
147 .id_table = xircom_pci_table, 149 .id_table = xircom_pci_table,
148 .probe = xircom_probe, 150 .probe = xircom_probe,
149 .remove = xircom_remove, 151 .remove = __devexit_p(xircom_remove),
150 .suspend =NULL,
151 .resume =NULL
152}; 152};
153 153
154 154
@@ -192,15 +192,18 @@ static const struct net_device_ops netdev_ops = {
192 */ 192 */
193static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id) 193static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
194{ 194{
195 struct device *d = &pdev->dev;
195 struct net_device *dev = NULL; 196 struct net_device *dev = NULL;
196 struct xircom_private *private; 197 struct xircom_private *private;
197 unsigned long flags; 198 unsigned long flags;
198 unsigned short tmp16; 199 unsigned short tmp16;
200 int rc;
199 201
200 /* First do the PCI initialisation */ 202 /* First do the PCI initialisation */
201 203
202 if (pci_enable_device(pdev)) 204 rc = pci_enable_device(pdev);
203 return -ENODEV; 205 if (rc < 0)
206 goto out;
204 207
205 /* disable all powermanagement */ 208 /* disable all powermanagement */
206 pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000); 209 pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000);
@@ -211,11 +214,13 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
211 pci_read_config_word (pdev,PCI_STATUS, &tmp16); 214 pci_read_config_word (pdev,PCI_STATUS, &tmp16);
212 pci_write_config_word (pdev, PCI_STATUS,tmp16); 215 pci_write_config_word (pdev, PCI_STATUS,tmp16);
213 216
214 if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) { 217 rc = pci_request_regions(pdev, "xircom_cb");
218 if (rc < 0) {
215 pr_err("%s: failed to allocate io-region\n", __func__); 219 pr_err("%s: failed to allocate io-region\n", __func__);
216 return -ENODEV; 220 goto err_disable;
217 } 221 }
218 222
223 rc = -ENOMEM;
219 /* 224 /*
220 Before changing the hardware, allocate the memory. 225 Before changing the hardware, allocate the memory.
221 This way, we can fail gracefully if not enough memory 226 This way, we can fail gracefully if not enough memory
@@ -223,17 +228,21 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
223 */ 228 */
224 dev = alloc_etherdev(sizeof(struct xircom_private)); 229 dev = alloc_etherdev(sizeof(struct xircom_private));
225 if (!dev) 230 if (!dev)
226 goto device_fail; 231 goto err_release;
227 232
228 private = netdev_priv(dev); 233 private = netdev_priv(dev);
229 234
230 /* Allocate the send/receive buffers */ 235 /* Allocate the send/receive buffers */
231 private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle); 236 private->rx_buffer = dma_alloc_coherent(d, 8192,
237 &private->rx_dma_handle,
238 GFP_KERNEL);
232 if (private->rx_buffer == NULL) { 239 if (private->rx_buffer == NULL) {
233 pr_err("%s: no memory for rx buffer\n", __func__); 240 pr_err("%s: no memory for rx buffer\n", __func__);
234 goto rx_buf_fail; 241 goto rx_buf_fail;
235 } 242 }
236 private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle); 243 private->tx_buffer = dma_alloc_coherent(d, 8192,
244 &private->tx_dma_handle,
245 GFP_KERNEL);
237 if (private->tx_buffer == NULL) { 246 if (private->tx_buffer == NULL) {
238 pr_err("%s: no memory for tx buffer\n", __func__); 247 pr_err("%s: no memory for tx buffer\n", __func__);
239 goto tx_buf_fail; 248 goto tx_buf_fail;
@@ -244,10 +253,13 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
244 253
245 private->dev = dev; 254 private->dev = dev;
246 private->pdev = pdev; 255 private->pdev = pdev;
247 private->io_port = pci_resource_start(pdev, 0); 256
257 /* IO range. */
258 private->ioaddr = pci_iomap(pdev, 0, 0);
259 if (!private->ioaddr)
260 goto reg_fail;
261
248 spin_lock_init(&private->lock); 262 spin_lock_init(&private->lock);
249 dev->irq = pdev->irq;
250 dev->base_addr = private->io_port;
251 263
252 initialize_card(private); 264 initialize_card(private);
253 read_mac_address(private); 265 read_mac_address(private);
@@ -256,9 +268,10 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
256 dev->netdev_ops = &netdev_ops; 268 dev->netdev_ops = &netdev_ops;
257 pci_set_drvdata(pdev, dev); 269 pci_set_drvdata(pdev, dev);
258 270
259 if (register_netdev(dev)) { 271 rc = register_netdev(dev);
272 if (rc < 0) {
260 pr_err("%s: netdevice registration failed\n", __func__); 273 pr_err("%s: netdevice registration failed\n", __func__);
261 goto reg_fail; 274 goto err_unmap;
262 } 275 }
263 276
264 netdev_info(dev, "Xircom cardbus revision %i at irq %i\n", 277 netdev_info(dev, "Xircom cardbus revision %i at irq %i\n",
@@ -273,17 +286,23 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
273 spin_unlock_irqrestore(&private->lock,flags); 286 spin_unlock_irqrestore(&private->lock,flags);
274 287
275 trigger_receive(private); 288 trigger_receive(private);
289out:
290 return rc;
276 291
277 return 0; 292err_unmap:
278 293 pci_iounmap(pdev, private->ioaddr);
279reg_fail: 294reg_fail:
280 kfree(private->tx_buffer); 295 pci_set_drvdata(pdev, NULL);
296 dma_free_coherent(d, 8192, private->tx_buffer, private->tx_dma_handle);
281tx_buf_fail: 297tx_buf_fail:
282 kfree(private->rx_buffer); 298 dma_free_coherent(d, 8192, private->rx_buffer, private->rx_dma_handle);
283rx_buf_fail: 299rx_buf_fail:
284 free_netdev(dev); 300 free_netdev(dev);
285device_fail: 301err_release:
286 return -ENODEV; 302 pci_release_regions(pdev);
303err_disable:
304 pci_disable_device(pdev);
305 goto out;
287} 306}
288 307
289 308
@@ -297,25 +316,28 @@ static void __devexit xircom_remove(struct pci_dev *pdev)
297{ 316{
298 struct net_device *dev = pci_get_drvdata(pdev); 317 struct net_device *dev = pci_get_drvdata(pdev);
299 struct xircom_private *card = netdev_priv(dev); 318 struct xircom_private *card = netdev_priv(dev);
319 struct device *d = &pdev->dev;
300 320
301 pci_free_consistent(pdev,8192,card->rx_buffer,card->rx_dma_handle);
302 pci_free_consistent(pdev,8192,card->tx_buffer,card->tx_dma_handle);
303
304 release_region(dev->base_addr, 128);
305 unregister_netdev(dev); 321 unregister_netdev(dev);
306 free_netdev(dev); 322 pci_iounmap(pdev, card->ioaddr);
307 pci_set_drvdata(pdev, NULL); 323 pci_set_drvdata(pdev, NULL);
324 dma_free_coherent(d, 8192, card->tx_buffer, card->tx_dma_handle);
325 dma_free_coherent(d, 8192, card->rx_buffer, card->rx_dma_handle);
326 free_netdev(dev);
327 pci_release_regions(pdev);
328 pci_disable_device(pdev);
308} 329}
309 330
310static irqreturn_t xircom_interrupt(int irq, void *dev_instance) 331static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
311{ 332{
312 struct net_device *dev = (struct net_device *) dev_instance; 333 struct net_device *dev = (struct net_device *) dev_instance;
313 struct xircom_private *card = netdev_priv(dev); 334 struct xircom_private *card = netdev_priv(dev);
335 void __iomem *ioaddr = card->ioaddr;
314 unsigned int status; 336 unsigned int status;
315 int i; 337 int i;
316 338
317 spin_lock(&card->lock); 339 spin_lock(&card->lock);
318 status = inl(card->io_port+CSR5); 340 status = xr32(CSR5);
319 341
320#if defined DEBUG && DEBUG > 1 342#if defined DEBUG && DEBUG > 1
321 print_binary(status); 343 print_binary(status);
@@ -345,7 +367,7 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
345 /* Clear all remaining interrupts */ 367 /* Clear all remaining interrupts */
346 status |= 0xffffffff; /* FIXME: make this clear only the 368 status |= 0xffffffff; /* FIXME: make this clear only the
347 real existing bits */ 369 real existing bits */
348 outl(status,card->io_port+CSR5); 370 xw32(CSR5, status);
349 371
350 372
351 for (i=0;i<NUMDESCRIPTORS;i++) 373 for (i=0;i<NUMDESCRIPTORS;i++)
@@ -423,11 +445,11 @@ static netdev_tx_t xircom_start_xmit(struct sk_buff *skb,
423static int xircom_open(struct net_device *dev) 445static int xircom_open(struct net_device *dev)
424{ 446{
425 struct xircom_private *xp = netdev_priv(dev); 447 struct xircom_private *xp = netdev_priv(dev);
448 const int irq = xp->pdev->irq;
426 int retval; 449 int retval;
427 450
428 netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n", 451 netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n", irq);
429 dev->irq); 452 retval = request_irq(irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
430 retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
431 if (retval) 453 if (retval)
432 return retval; 454 return retval;
433 455
@@ -459,7 +481,7 @@ static int xircom_close(struct net_device *dev)
459 spin_unlock_irqrestore(&card->lock,flags); 481 spin_unlock_irqrestore(&card->lock,flags);
460 482
461 card->open = 0; 483 card->open = 0;
462 free_irq(dev->irq,dev); 484 free_irq(card->pdev->irq, dev);
463 485
464 return 0; 486 return 0;
465 487
@@ -469,35 +491,39 @@ static int xircom_close(struct net_device *dev)
469#ifdef CONFIG_NET_POLL_CONTROLLER 491#ifdef CONFIG_NET_POLL_CONTROLLER
470static void xircom_poll_controller(struct net_device *dev) 492static void xircom_poll_controller(struct net_device *dev)
471{ 493{
472 disable_irq(dev->irq); 494 struct xircom_private *xp = netdev_priv(dev);
473 xircom_interrupt(dev->irq, dev); 495 const int irq = xp->pdev->irq;
474 enable_irq(dev->irq); 496
497 disable_irq(irq);
498 xircom_interrupt(irq, dev);
499 enable_irq(irq);
475} 500}
476#endif 501#endif
477 502
478 503
479static void initialize_card(struct xircom_private *card) 504static void initialize_card(struct xircom_private *card)
480{ 505{
481 unsigned int val; 506 void __iomem *ioaddr = card->ioaddr;
482 unsigned long flags; 507 unsigned long flags;
508 u32 val;
483 509
484 spin_lock_irqsave(&card->lock, flags); 510 spin_lock_irqsave(&card->lock, flags);
485 511
486 /* First: reset the card */ 512 /* First: reset the card */
487 val = inl(card->io_port + CSR0); 513 val = xr32(CSR0);
488 val |= 0x01; /* Software reset */ 514 val |= 0x01; /* Software reset */
489 outl(val, card->io_port + CSR0); 515 xw32(CSR0, val);
490 516
491 udelay(100); /* give the card some time to reset */ 517 udelay(100); /* give the card some time to reset */
492 518
493 val = inl(card->io_port + CSR0); 519 val = xr32(CSR0);
494 val &= ~0x01; /* disable Software reset */ 520 val &= ~0x01; /* disable Software reset */
495 outl(val, card->io_port + CSR0); 521 xw32(CSR0, val);
496 522
497 523
498 val = 0; /* Value 0x00 is a safe and conservative value 524 val = 0; /* Value 0x00 is a safe and conservative value
499 for the PCI configuration settings */ 525 for the PCI configuration settings */
500 outl(val, card->io_port + CSR0); 526 xw32(CSR0, val);
501 527
502 528
503 disable_all_interrupts(card); 529 disable_all_interrupts(card);
@@ -515,10 +541,9 @@ ignored; I chose zero.
515*/ 541*/
516static void trigger_transmit(struct xircom_private *card) 542static void trigger_transmit(struct xircom_private *card)
517{ 543{
518 unsigned int val; 544 void __iomem *ioaddr = card->ioaddr;
519 545
520 val = 0; 546 xw32(CSR1, 0);
521 outl(val, card->io_port + CSR1);
522} 547}
523 548
524/* 549/*
@@ -530,10 +555,9 @@ ignored; I chose zero.
530*/ 555*/
531static void trigger_receive(struct xircom_private *card) 556static void trigger_receive(struct xircom_private *card)
532{ 557{
533 unsigned int val; 558 void __iomem *ioaddr = card->ioaddr;
534 559
535 val = 0; 560 xw32(CSR2, 0);
536 outl(val, card->io_port + CSR2);
537} 561}
538 562
539/* 563/*
@@ -542,6 +566,7 @@ descriptors and programs the addresses into the card.
542*/ 566*/
543static void setup_descriptors(struct xircom_private *card) 567static void setup_descriptors(struct xircom_private *card)
544{ 568{
569 void __iomem *ioaddr = card->ioaddr;
545 u32 address; 570 u32 address;
546 int i; 571 int i;
547 572
@@ -571,7 +596,7 @@ static void setup_descriptors(struct xircom_private *card)
571 wmb(); 596 wmb();
572 /* Write the receive descriptor ring address to the card */ 597 /* Write the receive descriptor ring address to the card */
573 address = card->rx_dma_handle; 598 address = card->rx_dma_handle;
574 outl(address, card->io_port + CSR3); /* Receive descr list address */ 599 xw32(CSR3, address); /* Receive descr list address */
575 600
576 601
577 /* transmit descriptors */ 602 /* transmit descriptors */
@@ -596,7 +621,7 @@ static void setup_descriptors(struct xircom_private *card)
596 wmb(); 621 wmb();
597 /* wite the transmit descriptor ring to the card */ 622 /* wite the transmit descriptor ring to the card */
598 address = card->tx_dma_handle; 623 address = card->tx_dma_handle;
599 outl(address, card->io_port + CSR4); /* xmit descr list address */ 624 xw32(CSR4, address); /* xmit descr list address */
600} 625}
601 626
602/* 627/*
@@ -605,11 +630,12 @@ valid by setting the address in the card to 0x00.
605*/ 630*/
606static void remove_descriptors(struct xircom_private *card) 631static void remove_descriptors(struct xircom_private *card)
607{ 632{
633 void __iomem *ioaddr = card->ioaddr;
608 unsigned int val; 634 unsigned int val;
609 635
610 val = 0; 636 val = 0;
611 outl(val, card->io_port + CSR3); /* Receive descriptor address */ 637 xw32(CSR3, val); /* Receive descriptor address */
612 outl(val, card->io_port + CSR4); /* Send descriptor address */ 638 xw32(CSR4, val); /* Send descriptor address */
613} 639}
614 640
615/* 641/*
@@ -620,17 +646,17 @@ This function also clears the status-bit.
620*/ 646*/
621static int link_status_changed(struct xircom_private *card) 647static int link_status_changed(struct xircom_private *card)
622{ 648{
649 void __iomem *ioaddr = card->ioaddr;
623 unsigned int val; 650 unsigned int val;
624 651
625 val = inl(card->io_port + CSR5); /* Status register */ 652 val = xr32(CSR5); /* Status register */
626 653 if (!(val & (1 << 27))) /* no change */
627 if ((val & (1 << 27)) == 0) /* no change */
628 return 0; 654 return 0;
629 655
630 /* clear the event by writing a 1 to the bit in the 656 /* clear the event by writing a 1 to the bit in the
631 status register. */ 657 status register. */
632 val = (1 << 27); 658 val = (1 << 27);
633 outl(val, card->io_port + CSR5); 659 xw32(CSR5, val);
634 660
635 return 1; 661 return 1;
636} 662}
@@ -642,11 +668,9 @@ in a non-stopped state.
642*/ 668*/
643static int transmit_active(struct xircom_private *card) 669static int transmit_active(struct xircom_private *card)
644{ 670{
645 unsigned int val; 671 void __iomem *ioaddr = card->ioaddr;
646
647 val = inl(card->io_port + CSR5); /* Status register */
648 672
649 if ((val & (7 << 20)) == 0) /* transmitter disabled */ 673 if (!(xr32(CSR5) & (7 << 20))) /* transmitter disabled */
650 return 0; 674 return 0;
651 675
652 return 1; 676 return 1;
@@ -658,11 +682,9 @@ in a non-stopped state.
658*/ 682*/
659static int receive_active(struct xircom_private *card) 683static int receive_active(struct xircom_private *card)
660{ 684{
661 unsigned int val; 685 void __iomem *ioaddr = card->ioaddr;
662
663 val = inl(card->io_port + CSR5); /* Status register */
664 686
665 if ((val & (7 << 17)) == 0) /* receiver disabled */ 687 if (!(xr32(CSR5) & (7 << 17))) /* receiver disabled */
666 return 0; 688 return 0;
667 689
668 return 1; 690 return 1;
@@ -680,10 +702,11 @@ must be called with the lock held and interrupts disabled.
680*/ 702*/
681static void activate_receiver(struct xircom_private *card) 703static void activate_receiver(struct xircom_private *card)
682{ 704{
705 void __iomem *ioaddr = card->ioaddr;
683 unsigned int val; 706 unsigned int val;
684 int counter; 707 int counter;
685 708
686 val = inl(card->io_port + CSR6); /* Operation mode */ 709 val = xr32(CSR6); /* Operation mode */
687 710
688 /* If the "active" bit is set and the receiver is already 711 /* If the "active" bit is set and the receiver is already
689 active, no need to do the expensive thing */ 712 active, no need to do the expensive thing */
@@ -692,7 +715,7 @@ static void activate_receiver(struct xircom_private *card)
692 715
693 716
694 val = val & ~2; /* disable the receiver */ 717 val = val & ~2; /* disable the receiver */
695 outl(val, card->io_port + CSR6); 718 xw32(CSR6, val);
696 719
697 counter = 10; 720 counter = 10;
698 while (counter > 0) { 721 while (counter > 0) {
@@ -706,9 +729,9 @@ static void activate_receiver(struct xircom_private *card)
706 } 729 }
707 730
708 /* enable the receiver */ 731 /* enable the receiver */
709 val = inl(card->io_port + CSR6); /* Operation mode */ 732 val = xr32(CSR6); /* Operation mode */
710 val = val | 2; /* enable the receiver */ 733 val = val | 2; /* enable the receiver */
711 outl(val, card->io_port + CSR6); 734 xw32(CSR6, val);
712 735
713 /* now wait for the card to activate again */ 736 /* now wait for the card to activate again */
714 counter = 10; 737 counter = 10;
@@ -733,12 +756,13 @@ must be called with the lock held and interrupts disabled.
733*/ 756*/
734static void deactivate_receiver(struct xircom_private *card) 757static void deactivate_receiver(struct xircom_private *card)
735{ 758{
759 void __iomem *ioaddr = card->ioaddr;
736 unsigned int val; 760 unsigned int val;
737 int counter; 761 int counter;
738 762
739 val = inl(card->io_port + CSR6); /* Operation mode */ 763 val = xr32(CSR6); /* Operation mode */
740 val = val & ~2; /* disable the receiver */ 764 val = val & ~2; /* disable the receiver */
741 outl(val, card->io_port + CSR6); 765 xw32(CSR6, val);
742 766
743 counter = 10; 767 counter = 10;
744 while (counter > 0) { 768 while (counter > 0) {
@@ -765,10 +789,11 @@ must be called with the lock held and interrupts disabled.
765*/ 789*/
766static void activate_transmitter(struct xircom_private *card) 790static void activate_transmitter(struct xircom_private *card)
767{ 791{
792 void __iomem *ioaddr = card->ioaddr;
768 unsigned int val; 793 unsigned int val;
769 int counter; 794 int counter;
770 795
771 val = inl(card->io_port + CSR6); /* Operation mode */ 796 val = xr32(CSR6); /* Operation mode */
772 797
773 /* If the "active" bit is set and the receiver is already 798 /* If the "active" bit is set and the receiver is already
774 active, no need to do the expensive thing */ 799 active, no need to do the expensive thing */
@@ -776,7 +801,7 @@ static void activate_transmitter(struct xircom_private *card)
776 return; 801 return;
777 802
778 val = val & ~(1 << 13); /* disable the transmitter */ 803 val = val & ~(1 << 13); /* disable the transmitter */
779 outl(val, card->io_port + CSR6); 804 xw32(CSR6, val);
780 805
781 counter = 10; 806 counter = 10;
782 while (counter > 0) { 807 while (counter > 0) {
@@ -791,9 +816,9 @@ static void activate_transmitter(struct xircom_private *card)
791 } 816 }
792 817
793 /* enable the transmitter */ 818 /* enable the transmitter */
794 val = inl(card->io_port + CSR6); /* Operation mode */ 819 val = xr32(CSR6); /* Operation mode */
795 val = val | (1 << 13); /* enable the transmitter */ 820 val = val | (1 << 13); /* enable the transmitter */
796 outl(val, card->io_port + CSR6); 821 xw32(CSR6, val);
797 822
798 /* now wait for the card to activate again */ 823 /* now wait for the card to activate again */
799 counter = 10; 824 counter = 10;
@@ -818,12 +843,13 @@ must be called with the lock held and interrupts disabled.
818*/ 843*/
819static void deactivate_transmitter(struct xircom_private *card) 844static void deactivate_transmitter(struct xircom_private *card)
820{ 845{
846 void __iomem *ioaddr = card->ioaddr;
821 unsigned int val; 847 unsigned int val;
822 int counter; 848 int counter;
823 849
824 val = inl(card->io_port + CSR6); /* Operation mode */ 850 val = xr32(CSR6); /* Operation mode */
825 val = val & ~2; /* disable the transmitter */ 851 val = val & ~2; /* disable the transmitter */
826 outl(val, card->io_port + CSR6); 852 xw32(CSR6, val);
827 853
828 counter = 20; 854 counter = 20;
829 while (counter > 0) { 855 while (counter > 0) {
@@ -846,11 +872,12 @@ must be called with the lock held and interrupts disabled.
846*/ 872*/
847static void enable_transmit_interrupt(struct xircom_private *card) 873static void enable_transmit_interrupt(struct xircom_private *card)
848{ 874{
875 void __iomem *ioaddr = card->ioaddr;
849 unsigned int val; 876 unsigned int val;
850 877
851 val = inl(card->io_port + CSR7); /* Interrupt enable register */ 878 val = xr32(CSR7); /* Interrupt enable register */
852 val |= 1; /* enable the transmit interrupt */ 879 val |= 1; /* enable the transmit interrupt */
853 outl(val, card->io_port + CSR7); 880 xw32(CSR7, val);
854} 881}
855 882
856 883
@@ -861,11 +888,12 @@ must be called with the lock held and interrupts disabled.
861*/ 888*/
862static void enable_receive_interrupt(struct xircom_private *card) 889static void enable_receive_interrupt(struct xircom_private *card)
863{ 890{
891 void __iomem *ioaddr = card->ioaddr;
864 unsigned int val; 892 unsigned int val;
865 893
866 val = inl(card->io_port + CSR7); /* Interrupt enable register */ 894 val = xr32(CSR7); /* Interrupt enable register */
867 val = val | (1 << 6); /* enable the receive interrupt */ 895 val = val | (1 << 6); /* enable the receive interrupt */
868 outl(val, card->io_port + CSR7); 896 xw32(CSR7, val);
869} 897}
870 898
871/* 899/*
@@ -875,11 +903,12 @@ must be called with the lock held and interrupts disabled.
875*/ 903*/
876static void enable_link_interrupt(struct xircom_private *card) 904static void enable_link_interrupt(struct xircom_private *card)
877{ 905{
906 void __iomem *ioaddr = card->ioaddr;
878 unsigned int val; 907 unsigned int val;
879 908
880 val = inl(card->io_port + CSR7); /* Interrupt enable register */ 909 val = xr32(CSR7); /* Interrupt enable register */
881 val = val | (1 << 27); /* enable the link status chage interrupt */ 910 val = val | (1 << 27); /* enable the link status chage interrupt */
882 outl(val, card->io_port + CSR7); 911 xw32(CSR7, val);
883} 912}
884 913
885 914
@@ -891,10 +920,9 @@ must be called with the lock held and interrupts disabled.
891*/ 920*/
892static void disable_all_interrupts(struct xircom_private *card) 921static void disable_all_interrupts(struct xircom_private *card)
893{ 922{
894 unsigned int val; 923 void __iomem *ioaddr = card->ioaddr;
895 924
896 val = 0; /* disable all interrupts */ 925 xw32(CSR7, 0);
897 outl(val, card->io_port + CSR7);
898} 926}
899 927
900/* 928/*
@@ -904,9 +932,10 @@ must be called with the lock held and interrupts disabled.
904*/ 932*/
905static void enable_common_interrupts(struct xircom_private *card) 933static void enable_common_interrupts(struct xircom_private *card)
906{ 934{
935 void __iomem *ioaddr = card->ioaddr;
907 unsigned int val; 936 unsigned int val;
908 937
909 val = inl(card->io_port + CSR7); /* Interrupt enable register */ 938 val = xr32(CSR7); /* Interrupt enable register */
910 val |= (1<<16); /* Normal Interrupt Summary */ 939 val |= (1<<16); /* Normal Interrupt Summary */
911 val |= (1<<15); /* Abnormal Interrupt Summary */ 940 val |= (1<<15); /* Abnormal Interrupt Summary */
912 val |= (1<<13); /* Fatal bus error */ 941 val |= (1<<13); /* Fatal bus error */
@@ -915,7 +944,7 @@ static void enable_common_interrupts(struct xircom_private *card)
915 val |= (1<<5); /* Transmit Underflow */ 944 val |= (1<<5); /* Transmit Underflow */
916 val |= (1<<2); /* Transmit Buffer Unavailable */ 945 val |= (1<<2); /* Transmit Buffer Unavailable */
917 val |= (1<<1); /* Transmit Process Stopped */ 946 val |= (1<<1); /* Transmit Process Stopped */
918 outl(val, card->io_port + CSR7); 947 xw32(CSR7, val);
919} 948}
920 949
921/* 950/*
@@ -925,11 +954,12 @@ must be called with the lock held and interrupts disabled.
925*/ 954*/
926static int enable_promisc(struct xircom_private *card) 955static int enable_promisc(struct xircom_private *card)
927{ 956{
957 void __iomem *ioaddr = card->ioaddr;
928 unsigned int val; 958 unsigned int val;
929 959
930 val = inl(card->io_port + CSR6); 960 val = xr32(CSR6);
931 val = val | (1 << 6); 961 val = val | (1 << 6);
932 outl(val, card->io_port + CSR6); 962 xw32(CSR6, val);
933 963
934 return 1; 964 return 1;
935} 965}
@@ -944,13 +974,16 @@ Must be called in locked state with interrupts disabled
944*/ 974*/
945static int link_status(struct xircom_private *card) 975static int link_status(struct xircom_private *card)
946{ 976{
947 unsigned int val; 977 void __iomem *ioaddr = card->ioaddr;
978 u8 val;
948 979
949 val = inb(card->io_port + CSR12); 980 val = xr8(CSR12);
950 981
951 if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */ 982 /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
983 if (!(val & (1 << 2)))
952 return 10; 984 return 10;
953 if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */ 985 /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
986 if (!(val & (1 << 1)))
954 return 100; 987 return 100;
955 988
956 /* If we get here -> no link at all */ 989 /* If we get here -> no link at all */
@@ -969,29 +1002,31 @@ static int link_status(struct xircom_private *card)
969 */ 1002 */
970static void read_mac_address(struct xircom_private *card) 1003static void read_mac_address(struct xircom_private *card)
971{ 1004{
972 unsigned char j, tuple, link, data_id, data_count; 1005 void __iomem *ioaddr = card->ioaddr;
973 unsigned long flags; 1006 unsigned long flags;
1007 u8 link;
974 int i; 1008 int i;
975 1009
976 spin_lock_irqsave(&card->lock, flags); 1010 spin_lock_irqsave(&card->lock, flags);
977 1011
978 outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */ 1012 xw32(CSR9, 1 << 12); /* enable boot rom access */
979 for (i = 0x100; i < 0x1f7; i += link + 2) { 1013 for (i = 0x100; i < 0x1f7; i += link + 2) {
980 outl(i, card->io_port + CSR10); 1014 u8 tuple, data_id, data_count;
981 tuple = inl(card->io_port + CSR9) & 0xff; 1015
982 outl(i + 1, card->io_port + CSR10); 1016 xw32(CSR10, i);
983 link = inl(card->io_port + CSR9) & 0xff; 1017 tuple = xr32(CSR9);
984 outl(i + 2, card->io_port + CSR10); 1018 xw32(CSR10, i + 1);
985 data_id = inl(card->io_port + CSR9) & 0xff; 1019 link = xr32(CSR9);
986 outl(i + 3, card->io_port + CSR10); 1020 xw32(CSR10, i + 2);
987 data_count = inl(card->io_port + CSR9) & 0xff; 1021 data_id = xr32(CSR9);
1022 xw32(CSR10, i + 3);
1023 data_count = xr32(CSR9);
988 if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) { 1024 if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
989 /* 1025 int j;
990 * This is it. We have the data we want. 1026
991 */
992 for (j = 0; j < 6; j++) { 1027 for (j = 0; j < 6; j++) {
993 outl(i + j + 4, card->io_port + CSR10); 1028 xw32(CSR10, i + j + 4);
994 card->dev->dev_addr[j] = inl(card->io_port + CSR9) & 0xff; 1029 card->dev->dev_addr[j] = xr32(CSR9) & 0xff;
995 } 1030 }
996 break; 1031 break;
997 } else if (link == 0) { 1032 } else if (link == 0) {
@@ -1010,6 +1045,7 @@ static void read_mac_address(struct xircom_private *card)
1010 */ 1045 */
1011static void transceiver_voodoo(struct xircom_private *card) 1046static void transceiver_voodoo(struct xircom_private *card)
1012{ 1047{
1048 void __iomem *ioaddr = card->ioaddr;
1013 unsigned long flags; 1049 unsigned long flags;
1014 1050
1015 /* disable all powermanagement */ 1051 /* disable all powermanagement */
@@ -1019,14 +1055,14 @@ static void transceiver_voodoo(struct xircom_private *card)
1019 1055
1020 spin_lock_irqsave(&card->lock, flags); 1056 spin_lock_irqsave(&card->lock, flags);
1021 1057
1022 outl(0x0008, card->io_port + CSR15); 1058 xw32(CSR15, 0x0008);
1023 udelay(25); 1059 udelay(25);
1024 outl(0xa8050000, card->io_port + CSR15); 1060 xw32(CSR15, 0xa8050000);
1025 udelay(25); 1061 udelay(25);
1026 outl(0xa00f0000, card->io_port + CSR15); 1062 xw32(CSR15, 0xa00f0000);
1027 udelay(25); 1063 udelay(25);
1028 1064
1029 spin_unlock_irqrestore(&card->lock, flags); 1065 spin_unlock_irqrestore(&card->lock, flags);
1030 1066
1031 netif_start_queue(card->dev); 1067 netif_start_queue(card->dev);
1032} 1068}
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index b2dc2c81a147..ef4499d2ee4b 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -16,6 +16,13 @@
16#include "dl2k.h" 16#include "dl2k.h"
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18 18
19#define dw32(reg, val) iowrite32(val, ioaddr + (reg))
20#define dw16(reg, val) iowrite16(val, ioaddr + (reg))
21#define dw8(reg, val) iowrite8(val, ioaddr + (reg))
22#define dr32(reg) ioread32(ioaddr + (reg))
23#define dr16(reg) ioread16(ioaddr + (reg))
24#define dr8(reg) ioread8(ioaddr + (reg))
25
19static char version[] __devinitdata = 26static char version[] __devinitdata =
20 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; 27 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
21#define MAX_UNITS 8 28#define MAX_UNITS 8
@@ -49,8 +56,13 @@ module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
49/* Enable the default interrupts */ 56/* Enable the default interrupts */
50#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \ 57#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
51 UpdateStats | LinkEvent) 58 UpdateStats | LinkEvent)
52#define EnableInt() \ 59
53writew(DEFAULT_INTR, ioaddr + IntEnable) 60static void dl2k_enable_int(struct netdev_private *np)
61{
62 void __iomem *ioaddr = np->ioaddr;
63
64 dw16(IntEnable, DEFAULT_INTR);
65}
54 66
55static const int max_intrloop = 50; 67static const int max_intrloop = 50;
56static const int multicast_filter_limit = 0x40; 68static const int multicast_filter_limit = 0x40;
@@ -73,7 +85,7 @@ static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
73static int rio_close (struct net_device *dev); 85static int rio_close (struct net_device *dev);
74static int find_miiphy (struct net_device *dev); 86static int find_miiphy (struct net_device *dev);
75static int parse_eeprom (struct net_device *dev); 87static int parse_eeprom (struct net_device *dev);
76static int read_eeprom (long ioaddr, int eep_addr); 88static int read_eeprom (struct netdev_private *, int eep_addr);
77static int mii_wait_link (struct net_device *dev, int wait); 89static int mii_wait_link (struct net_device *dev, int wait);
78static int mii_set_media (struct net_device *dev); 90static int mii_set_media (struct net_device *dev);
79static int mii_get_media (struct net_device *dev); 91static int mii_get_media (struct net_device *dev);
@@ -106,7 +118,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
106 static int card_idx; 118 static int card_idx;
107 int chip_idx = ent->driver_data; 119 int chip_idx = ent->driver_data;
108 int err, irq; 120 int err, irq;
109 long ioaddr; 121 void __iomem *ioaddr;
110 static int version_printed; 122 static int version_printed;
111 void *ring_space; 123 void *ring_space;
112 dma_addr_t ring_dma; 124 dma_addr_t ring_dma;
@@ -124,26 +136,29 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
124 goto err_out_disable; 136 goto err_out_disable;
125 137
126 pci_set_master (pdev); 138 pci_set_master (pdev);
139
140 err = -ENOMEM;
141
127 dev = alloc_etherdev (sizeof (*np)); 142 dev = alloc_etherdev (sizeof (*np));
128 if (!dev) { 143 if (!dev)
129 err = -ENOMEM;
130 goto err_out_res; 144 goto err_out_res;
131 }
132 SET_NETDEV_DEV(dev, &pdev->dev); 145 SET_NETDEV_DEV(dev, &pdev->dev);
133 146
134#ifdef MEM_MAPPING 147 np = netdev_priv(dev);
135 ioaddr = pci_resource_start (pdev, 1); 148
136 ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE); 149 /* IO registers range. */
137 if (!ioaddr) { 150 ioaddr = pci_iomap(pdev, 0, 0);
138 err = -ENOMEM; 151 if (!ioaddr)
139 goto err_out_dev; 152 goto err_out_dev;
140 } 153 np->eeprom_addr = ioaddr;
141#else 154
142 ioaddr = pci_resource_start (pdev, 0); 155#ifdef MEM_MAPPING
156 /* MM registers range. */
157 ioaddr = pci_iomap(pdev, 1, 0);
158 if (!ioaddr)
159 goto err_out_iounmap;
143#endif 160#endif
144 dev->base_addr = ioaddr; 161 np->ioaddr = ioaddr;
145 dev->irq = irq;
146 np = netdev_priv(dev);
147 np->chip_id = chip_idx; 162 np->chip_id = chip_idx;
148 np->pdev = pdev; 163 np->pdev = pdev;
149 spin_lock_init (&np->tx_lock); 164 spin_lock_init (&np->tx_lock);
@@ -239,7 +254,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
239 goto err_out_unmap_rx; 254 goto err_out_unmap_rx;
240 255
241 /* Fiber device? */ 256 /* Fiber device? */
242 np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0; 257 np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
243 np->link_status = 0; 258 np->link_status = 0;
244 /* Set media and reset PHY */ 259 /* Set media and reset PHY */
245 if (np->phy_media) { 260 if (np->phy_media) {
@@ -276,22 +291,20 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
276 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); 291 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
277 return 0; 292 return 0;
278 293
279 err_out_unmap_rx: 294err_out_unmap_rx:
280 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); 295 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
281 err_out_unmap_tx: 296err_out_unmap_tx:
282 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 297 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
283 err_out_iounmap: 298err_out_iounmap:
284#ifdef MEM_MAPPING 299#ifdef MEM_MAPPING
285 iounmap ((void *) ioaddr); 300 pci_iounmap(pdev, np->ioaddr);
286
287 err_out_dev:
288#endif 301#endif
302 pci_iounmap(pdev, np->eeprom_addr);
303err_out_dev:
289 free_netdev (dev); 304 free_netdev (dev);
290 305err_out_res:
291 err_out_res:
292 pci_release_regions (pdev); 306 pci_release_regions (pdev);
293 307err_out_disable:
294 err_out_disable:
295 pci_disable_device (pdev); 308 pci_disable_device (pdev);
296 return err; 309 return err;
297} 310}
@@ -299,11 +312,9 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
299static int 312static int
300find_miiphy (struct net_device *dev) 313find_miiphy (struct net_device *dev)
301{ 314{
315 struct netdev_private *np = netdev_priv(dev);
302 int i, phy_found = 0; 316 int i, phy_found = 0;
303 struct netdev_private *np;
304 long ioaddr;
305 np = netdev_priv(dev); 317 np = netdev_priv(dev);
306 ioaddr = dev->base_addr;
307 np->phy_addr = 1; 318 np->phy_addr = 1;
308 319
309 for (i = 31; i >= 0; i--) { 320 for (i = 31; i >= 0; i--) {
@@ -323,26 +334,19 @@ find_miiphy (struct net_device *dev)
323static int 334static int
324parse_eeprom (struct net_device *dev) 335parse_eeprom (struct net_device *dev)
325{ 336{
337 struct netdev_private *np = netdev_priv(dev);
338 void __iomem *ioaddr = np->ioaddr;
326 int i, j; 339 int i, j;
327 long ioaddr = dev->base_addr;
328 u8 sromdata[256]; 340 u8 sromdata[256];
329 u8 *psib; 341 u8 *psib;
330 u32 crc; 342 u32 crc;
331 PSROM_t psrom = (PSROM_t) sromdata; 343 PSROM_t psrom = (PSROM_t) sromdata;
332 struct netdev_private *np = netdev_priv(dev);
333 344
334 int cid, next; 345 int cid, next;
335 346
336#ifdef MEM_MAPPING 347 for (i = 0; i < 128; i++)
337 ioaddr = pci_resource_start (np->pdev, 0); 348 ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i));
338#endif 349
339 /* Read eeprom */
340 for (i = 0; i < 128; i++) {
341 ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom (ioaddr, i));
342 }
343#ifdef MEM_MAPPING
344 ioaddr = dev->base_addr;
345#endif
346 if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ 350 if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */
347 /* Check CRC */ 351 /* Check CRC */
348 crc = ~ether_crc_le (256 - 4, sromdata); 352 crc = ~ether_crc_le (256 - 4, sromdata);
@@ -378,8 +382,7 @@ parse_eeprom (struct net_device *dev)
378 return 0; 382 return 0;
379 case 2: /* Duplex Polarity */ 383 case 2: /* Duplex Polarity */
380 np->duplex_polarity = psib[i]; 384 np->duplex_polarity = psib[i];
381 writeb (readb (ioaddr + PhyCtrl) | psib[i], 385 dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]);
382 ioaddr + PhyCtrl);
383 break; 386 break;
384 case 3: /* Wake Polarity */ 387 case 3: /* Wake Polarity */
385 np->wake_polarity = psib[i]; 388 np->wake_polarity = psib[i];
@@ -407,59 +410,57 @@ static int
407rio_open (struct net_device *dev) 410rio_open (struct net_device *dev)
408{ 411{
409 struct netdev_private *np = netdev_priv(dev); 412 struct netdev_private *np = netdev_priv(dev);
410 long ioaddr = dev->base_addr; 413 void __iomem *ioaddr = np->ioaddr;
414 const int irq = np->pdev->irq;
411 int i; 415 int i;
412 u16 macctrl; 416 u16 macctrl;
413 417
414 i = request_irq (dev->irq, rio_interrupt, IRQF_SHARED, dev->name, dev); 418 i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
415 if (i) 419 if (i)
416 return i; 420 return i;
417 421
418 /* Reset all logic functions */ 422 /* Reset all logic functions */
419 writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset, 423 dw16(ASICCtrl + 2,
420 ioaddr + ASICCtrl + 2); 424 GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
421 mdelay(10); 425 mdelay(10);
422 426
423 /* DebugCtrl bit 4, 5, 9 must set */ 427 /* DebugCtrl bit 4, 5, 9 must set */
424 writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl); 428 dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
425 429
426 /* Jumbo frame */ 430 /* Jumbo frame */
427 if (np->jumbo != 0) 431 if (np->jumbo != 0)
428 writew (MAX_JUMBO+14, ioaddr + MaxFrameSize); 432 dw16(MaxFrameSize, MAX_JUMBO+14);
429 433
430 alloc_list (dev); 434 alloc_list (dev);
431 435
432 /* Get station address */ 436 /* Get station address */
433 for (i = 0; i < 6; i++) 437 for (i = 0; i < 6; i++)
434 writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i); 438 dw8(StationAddr0 + i, dev->dev_addr[i]);
435 439
436 set_multicast (dev); 440 set_multicast (dev);
437 if (np->coalesce) { 441 if (np->coalesce) {
438 writel (np->rx_coalesce | np->rx_timeout << 16, 442 dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16);
439 ioaddr + RxDMAIntCtrl);
440 } 443 }
441 /* Set RIO to poll every N*320nsec. */ 444 /* Set RIO to poll every N*320nsec. */
442 writeb (0x20, ioaddr + RxDMAPollPeriod); 445 dw8(RxDMAPollPeriod, 0x20);
443 writeb (0xff, ioaddr + TxDMAPollPeriod); 446 dw8(TxDMAPollPeriod, 0xff);
444 writeb (0x30, ioaddr + RxDMABurstThresh); 447 dw8(RxDMABurstThresh, 0x30);
445 writeb (0x30, ioaddr + RxDMAUrgentThresh); 448 dw8(RxDMAUrgentThresh, 0x30);
446 writel (0x0007ffff, ioaddr + RmonStatMask); 449 dw32(RmonStatMask, 0x0007ffff);
447 /* clear statistics */ 450 /* clear statistics */
448 clear_stats (dev); 451 clear_stats (dev);
449 452
450 /* VLAN supported */ 453 /* VLAN supported */
451 if (np->vlan) { 454 if (np->vlan) {
452 /* priority field in RxDMAIntCtrl */ 455 /* priority field in RxDMAIntCtrl */
453 writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10, 456 dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10);
454 ioaddr + RxDMAIntCtrl);
455 /* VLANId */ 457 /* VLANId */
456 writew (np->vlan, ioaddr + VLANId); 458 dw16(VLANId, np->vlan);
457 /* Length/Type should be 0x8100 */ 459 /* Length/Type should be 0x8100 */
458 writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag); 460 dw32(VLANTag, 0x8100 << 16 | np->vlan);
459 /* Enable AutoVLANuntagging, but disable AutoVLANtagging. 461 /* Enable AutoVLANuntagging, but disable AutoVLANtagging.
460 VLAN information tagged by TFC' VID, CFI fields. */ 462 VLAN information tagged by TFC' VID, CFI fields. */
461 writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging, 463 dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
462 ioaddr + MACCtrl);
463 } 464 }
464 465
465 init_timer (&np->timer); 466 init_timer (&np->timer);
@@ -469,20 +470,18 @@ rio_open (struct net_device *dev)
469 add_timer (&np->timer); 470 add_timer (&np->timer);
470 471
471 /* Start Tx/Rx */ 472 /* Start Tx/Rx */
472 writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable, 473 dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable);
473 ioaddr + MACCtrl);
474 474
475 macctrl = 0; 475 macctrl = 0;
476 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 476 macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
477 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 477 macctrl |= (np->full_duplex) ? DuplexSelect : 0;
478 macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; 478 macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
479 macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; 479 macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
480 writew(macctrl, ioaddr + MACCtrl); 480 dw16(MACCtrl, macctrl);
481 481
482 netif_start_queue (dev); 482 netif_start_queue (dev);
483 483
484 /* Enable default interrupts */ 484 dl2k_enable_int(np);
485 EnableInt ();
486 return 0; 485 return 0;
487} 486}
488 487
@@ -533,10 +532,11 @@ rio_timer (unsigned long data)
533static void 532static void
534rio_tx_timeout (struct net_device *dev) 533rio_tx_timeout (struct net_device *dev)
535{ 534{
536 long ioaddr = dev->base_addr; 535 struct netdev_private *np = netdev_priv(dev);
536 void __iomem *ioaddr = np->ioaddr;
537 537
538 printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n", 538 printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
539 dev->name, readl (ioaddr + TxStatus)); 539 dev->name, dr32(TxStatus));
540 rio_free_tx(dev, 0); 540 rio_free_tx(dev, 0);
541 dev->if_port = 0; 541 dev->if_port = 0;
542 dev->trans_start = jiffies; /* prevent tx timeout */ 542 dev->trans_start = jiffies; /* prevent tx timeout */
@@ -547,6 +547,7 @@ static void
547alloc_list (struct net_device *dev) 547alloc_list (struct net_device *dev)
548{ 548{
549 struct netdev_private *np = netdev_priv(dev); 549 struct netdev_private *np = netdev_priv(dev);
550 void __iomem *ioaddr = np->ioaddr;
550 int i; 551 int i;
551 552
552 np->cur_rx = np->cur_tx = 0; 553 np->cur_rx = np->cur_tx = 0;
@@ -594,24 +595,23 @@ alloc_list (struct net_device *dev)
594 } 595 }
595 596
596 /* Set RFDListPtr */ 597 /* Set RFDListPtr */
597 writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0); 598 dw32(RFDListPtr0, np->rx_ring_dma);
598 writel (0, dev->base_addr + RFDListPtr1); 599 dw32(RFDListPtr1, 0);
599} 600}
600 601
601static netdev_tx_t 602static netdev_tx_t
602start_xmit (struct sk_buff *skb, struct net_device *dev) 603start_xmit (struct sk_buff *skb, struct net_device *dev)
603{ 604{
604 struct netdev_private *np = netdev_priv(dev); 605 struct netdev_private *np = netdev_priv(dev);
606 void __iomem *ioaddr = np->ioaddr;
605 struct netdev_desc *txdesc; 607 struct netdev_desc *txdesc;
606 unsigned entry; 608 unsigned entry;
607 u32 ioaddr;
608 u64 tfc_vlan_tag = 0; 609 u64 tfc_vlan_tag = 0;
609 610
610 if (np->link_status == 0) { /* Link Down */ 611 if (np->link_status == 0) { /* Link Down */
611 dev_kfree_skb(skb); 612 dev_kfree_skb(skb);
612 return NETDEV_TX_OK; 613 return NETDEV_TX_OK;
613 } 614 }
614 ioaddr = dev->base_addr;
615 entry = np->cur_tx % TX_RING_SIZE; 615 entry = np->cur_tx % TX_RING_SIZE;
616 np->tx_skbuff[entry] = skb; 616 np->tx_skbuff[entry] = skb;
617 txdesc = &np->tx_ring[entry]; 617 txdesc = &np->tx_ring[entry];
@@ -646,9 +646,9 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
646 (1 << FragCountShift)); 646 (1 << FragCountShift));
647 647
648 /* TxDMAPollNow */ 648 /* TxDMAPollNow */
649 writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl); 649 dw32(DMACtrl, dr32(DMACtrl) | 0x00001000);
650 /* Schedule ISR */ 650 /* Schedule ISR */
651 writel(10000, ioaddr + CountDown); 651 dw32(CountDown, 10000);
652 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; 652 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
653 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 653 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
654 < TX_QUEUE_LEN - 1 && np->speed != 10) { 654 < TX_QUEUE_LEN - 1 && np->speed != 10) {
@@ -658,10 +658,10 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
658 } 658 }
659 659
660 /* The first TFDListPtr */ 660 /* The first TFDListPtr */
661 if (readl (dev->base_addr + TFDListPtr0) == 0) { 661 if (!dr32(TFDListPtr0)) {
662 writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc), 662 dw32(TFDListPtr0, np->tx_ring_dma +
663 dev->base_addr + TFDListPtr0); 663 entry * sizeof (struct netdev_desc));
664 writel (0, dev->base_addr + TFDListPtr1); 664 dw32(TFDListPtr1, 0);
665 } 665 }
666 666
667 return NETDEV_TX_OK; 667 return NETDEV_TX_OK;
@@ -671,17 +671,15 @@ static irqreturn_t
671rio_interrupt (int irq, void *dev_instance) 671rio_interrupt (int irq, void *dev_instance)
672{ 672{
673 struct net_device *dev = dev_instance; 673 struct net_device *dev = dev_instance;
674 struct netdev_private *np; 674 struct netdev_private *np = netdev_priv(dev);
675 void __iomem *ioaddr = np->ioaddr;
675 unsigned int_status; 676 unsigned int_status;
676 long ioaddr;
677 int cnt = max_intrloop; 677 int cnt = max_intrloop;
678 int handled = 0; 678 int handled = 0;
679 679
680 ioaddr = dev->base_addr;
681 np = netdev_priv(dev);
682 while (1) { 680 while (1) {
683 int_status = readw (ioaddr + IntStatus); 681 int_status = dr16(IntStatus);
684 writew (int_status, ioaddr + IntStatus); 682 dw16(IntStatus, int_status);
685 int_status &= DEFAULT_INTR; 683 int_status &= DEFAULT_INTR;
686 if (int_status == 0 || --cnt < 0) 684 if (int_status == 0 || --cnt < 0)
687 break; 685 break;
@@ -692,7 +690,7 @@ rio_interrupt (int irq, void *dev_instance)
692 /* TxDMAComplete interrupt */ 690 /* TxDMAComplete interrupt */
693 if ((int_status & (TxDMAComplete|IntRequested))) { 691 if ((int_status & (TxDMAComplete|IntRequested))) {
694 int tx_status; 692 int tx_status;
695 tx_status = readl (ioaddr + TxStatus); 693 tx_status = dr32(TxStatus);
696 if (tx_status & 0x01) 694 if (tx_status & 0x01)
697 tx_error (dev, tx_status); 695 tx_error (dev, tx_status);
698 /* Free used tx skbuffs */ 696 /* Free used tx skbuffs */
@@ -705,7 +703,7 @@ rio_interrupt (int irq, void *dev_instance)
705 rio_error (dev, int_status); 703 rio_error (dev, int_status);
706 } 704 }
707 if (np->cur_tx != np->old_tx) 705 if (np->cur_tx != np->old_tx)
708 writel (100, ioaddr + CountDown); 706 dw32(CountDown, 100);
709 return IRQ_RETVAL(handled); 707 return IRQ_RETVAL(handled);
710} 708}
711 709
@@ -765,13 +763,11 @@ rio_free_tx (struct net_device *dev, int irq)
765static void 763static void
766tx_error (struct net_device *dev, int tx_status) 764tx_error (struct net_device *dev, int tx_status)
767{ 765{
768 struct netdev_private *np; 766 struct netdev_private *np = netdev_priv(dev);
769 long ioaddr = dev->base_addr; 767 void __iomem *ioaddr = np->ioaddr;
770 int frame_id; 768 int frame_id;
771 int i; 769 int i;
772 770
773 np = netdev_priv(dev);
774
775 frame_id = (tx_status & 0xffff0000); 771 frame_id = (tx_status & 0xffff0000);
776 printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", 772 printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
777 dev->name, tx_status, frame_id); 773 dev->name, tx_status, frame_id);
@@ -779,23 +775,21 @@ tx_error (struct net_device *dev, int tx_status)
779 /* Ttransmit Underrun */ 775 /* Ttransmit Underrun */
780 if (tx_status & 0x10) { 776 if (tx_status & 0x10) {
781 np->stats.tx_fifo_errors++; 777 np->stats.tx_fifo_errors++;
782 writew (readw (ioaddr + TxStartThresh) + 0x10, 778 dw16(TxStartThresh, dr16(TxStartThresh) + 0x10);
783 ioaddr + TxStartThresh);
784 /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */ 779 /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
785 writew (TxReset | DMAReset | FIFOReset | NetworkReset, 780 dw16(ASICCtrl + 2,
786 ioaddr + ASICCtrl + 2); 781 TxReset | DMAReset | FIFOReset | NetworkReset);
787 /* Wait for ResetBusy bit clear */ 782 /* Wait for ResetBusy bit clear */
788 for (i = 50; i > 0; i--) { 783 for (i = 50; i > 0; i--) {
789 if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0) 784 if (!(dr16(ASICCtrl + 2) & ResetBusy))
790 break; 785 break;
791 mdelay (1); 786 mdelay (1);
792 } 787 }
793 rio_free_tx (dev, 1); 788 rio_free_tx (dev, 1);
794 /* Reset TFDListPtr */ 789 /* Reset TFDListPtr */
795 writel (np->tx_ring_dma + 790 dw32(TFDListPtr0, np->tx_ring_dma +
796 np->old_tx * sizeof (struct netdev_desc), 791 np->old_tx * sizeof (struct netdev_desc));
797 dev->base_addr + TFDListPtr0); 792 dw32(TFDListPtr1, 0);
798 writel (0, dev->base_addr + TFDListPtr1);
799 793
800 /* Let TxStartThresh stay default value */ 794 /* Let TxStartThresh stay default value */
801 } 795 }
@@ -803,10 +797,10 @@ tx_error (struct net_device *dev, int tx_status)
803 if (tx_status & 0x04) { 797 if (tx_status & 0x04) {
804 np->stats.tx_fifo_errors++; 798 np->stats.tx_fifo_errors++;
805 /* TxReset and clear FIFO */ 799 /* TxReset and clear FIFO */
806 writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2); 800 dw16(ASICCtrl + 2, TxReset | FIFOReset);
807 /* Wait reset done */ 801 /* Wait reset done */
808 for (i = 50; i > 0; i--) { 802 for (i = 50; i > 0; i--) {
809 if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0) 803 if (!(dr16(ASICCtrl + 2) & ResetBusy))
810 break; 804 break;
811 mdelay (1); 805 mdelay (1);
812 } 806 }
@@ -821,7 +815,7 @@ tx_error (struct net_device *dev, int tx_status)
821 np->stats.collisions++; 815 np->stats.collisions++;
822#endif 816#endif
823 /* Restart the Tx */ 817 /* Restart the Tx */
824 writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl); 818 dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
825} 819}
826 820
827static int 821static int
@@ -931,8 +925,8 @@ receive_packet (struct net_device *dev)
931static void 925static void
932rio_error (struct net_device *dev, int int_status) 926rio_error (struct net_device *dev, int int_status)
933{ 927{
934 long ioaddr = dev->base_addr;
935 struct netdev_private *np = netdev_priv(dev); 928 struct netdev_private *np = netdev_priv(dev);
929 void __iomem *ioaddr = np->ioaddr;
936 u16 macctrl; 930 u16 macctrl;
937 931
938 /* Link change event */ 932 /* Link change event */
@@ -954,7 +948,7 @@ rio_error (struct net_device *dev, int int_status)
954 TxFlowControlEnable : 0; 948 TxFlowControlEnable : 0;
955 macctrl |= (np->rx_flow) ? 949 macctrl |= (np->rx_flow) ?
956 RxFlowControlEnable : 0; 950 RxFlowControlEnable : 0;
957 writew(macctrl, ioaddr + MACCtrl); 951 dw16(MACCtrl, macctrl);
958 np->link_status = 1; 952 np->link_status = 1;
959 netif_carrier_on(dev); 953 netif_carrier_on(dev);
960 } else { 954 } else {
@@ -974,7 +968,7 @@ rio_error (struct net_device *dev, int int_status)
974 if (int_status & HostError) { 968 if (int_status & HostError) {
975 printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", 969 printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
976 dev->name, int_status); 970 dev->name, int_status);
977 writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2); 971 dw16(ASICCtrl + 2, GlobalReset | HostReset);
978 mdelay (500); 972 mdelay (500);
979 } 973 }
980} 974}
@@ -982,8 +976,8 @@ rio_error (struct net_device *dev, int int_status)
982static struct net_device_stats * 976static struct net_device_stats *
983get_stats (struct net_device *dev) 977get_stats (struct net_device *dev)
984{ 978{
985 long ioaddr = dev->base_addr;
986 struct netdev_private *np = netdev_priv(dev); 979 struct netdev_private *np = netdev_priv(dev);
980 void __iomem *ioaddr = np->ioaddr;
987#ifdef MEM_MAPPING 981#ifdef MEM_MAPPING
988 int i; 982 int i;
989#endif 983#endif
@@ -992,106 +986,107 @@ get_stats (struct net_device *dev)
992 /* All statistics registers need to be acknowledged, 986 /* All statistics registers need to be acknowledged,
993 else statistic overflow could cause problems */ 987 else statistic overflow could cause problems */
994 988
995 np->stats.rx_packets += readl (ioaddr + FramesRcvOk); 989 np->stats.rx_packets += dr32(FramesRcvOk);
996 np->stats.tx_packets += readl (ioaddr + FramesXmtOk); 990 np->stats.tx_packets += dr32(FramesXmtOk);
997 np->stats.rx_bytes += readl (ioaddr + OctetRcvOk); 991 np->stats.rx_bytes += dr32(OctetRcvOk);
998 np->stats.tx_bytes += readl (ioaddr + OctetXmtOk); 992 np->stats.tx_bytes += dr32(OctetXmtOk);
999 993
1000 np->stats.multicast = readl (ioaddr + McstFramesRcvdOk); 994 np->stats.multicast = dr32(McstFramesRcvdOk);
1001 np->stats.collisions += readl (ioaddr + SingleColFrames) 995 np->stats.collisions += dr32(SingleColFrames)
1002 + readl (ioaddr + MultiColFrames); 996 + dr32(MultiColFrames);
1003 997
1004 /* detailed tx errors */ 998 /* detailed tx errors */
1005 stat_reg = readw (ioaddr + FramesAbortXSColls); 999 stat_reg = dr16(FramesAbortXSColls);
1006 np->stats.tx_aborted_errors += stat_reg; 1000 np->stats.tx_aborted_errors += stat_reg;
1007 np->stats.tx_errors += stat_reg; 1001 np->stats.tx_errors += stat_reg;
1008 1002
1009 stat_reg = readw (ioaddr + CarrierSenseErrors); 1003 stat_reg = dr16(CarrierSenseErrors);
1010 np->stats.tx_carrier_errors += stat_reg; 1004 np->stats.tx_carrier_errors += stat_reg;
1011 np->stats.tx_errors += stat_reg; 1005 np->stats.tx_errors += stat_reg;
1012 1006
1013 /* Clear all other statistic register. */ 1007 /* Clear all other statistic register. */
1014 readl (ioaddr + McstOctetXmtOk); 1008 dr32(McstOctetXmtOk);
1015 readw (ioaddr + BcstFramesXmtdOk); 1009 dr16(BcstFramesXmtdOk);
1016 readl (ioaddr + McstFramesXmtdOk); 1010 dr32(McstFramesXmtdOk);
1017 readw (ioaddr + BcstFramesRcvdOk); 1011 dr16(BcstFramesRcvdOk);
1018 readw (ioaddr + MacControlFramesRcvd); 1012 dr16(MacControlFramesRcvd);
1019 readw (ioaddr + FrameTooLongErrors); 1013 dr16(FrameTooLongErrors);
1020 readw (ioaddr + InRangeLengthErrors); 1014 dr16(InRangeLengthErrors);
1021 readw (ioaddr + FramesCheckSeqErrors); 1015 dr16(FramesCheckSeqErrors);
1022 readw (ioaddr + FramesLostRxErrors); 1016 dr16(FramesLostRxErrors);
1023 readl (ioaddr + McstOctetXmtOk); 1017 dr32(McstOctetXmtOk);
1024 readl (ioaddr + BcstOctetXmtOk); 1018 dr32(BcstOctetXmtOk);
1025 readl (ioaddr + McstFramesXmtdOk); 1019 dr32(McstFramesXmtdOk);
1026 readl (ioaddr + FramesWDeferredXmt); 1020 dr32(FramesWDeferredXmt);
1027 readl (ioaddr + LateCollisions); 1021 dr32(LateCollisions);
1028 readw (ioaddr + BcstFramesXmtdOk); 1022 dr16(BcstFramesXmtdOk);
1029 readw (ioaddr + MacControlFramesXmtd); 1023 dr16(MacControlFramesXmtd);
1030 readw (ioaddr + FramesWEXDeferal); 1024 dr16(FramesWEXDeferal);
1031 1025
1032#ifdef MEM_MAPPING 1026#ifdef MEM_MAPPING
1033 for (i = 0x100; i <= 0x150; i += 4) 1027 for (i = 0x100; i <= 0x150; i += 4)
1034 readl (ioaddr + i); 1028 dr32(i);
1035#endif 1029#endif
1036 readw (ioaddr + TxJumboFrames); 1030 dr16(TxJumboFrames);
1037 readw (ioaddr + RxJumboFrames); 1031 dr16(RxJumboFrames);
1038 readw (ioaddr + TCPCheckSumErrors); 1032 dr16(TCPCheckSumErrors);
1039 readw (ioaddr + UDPCheckSumErrors); 1033 dr16(UDPCheckSumErrors);
1040 readw (ioaddr + IPCheckSumErrors); 1034 dr16(IPCheckSumErrors);
1041 return &np->stats; 1035 return &np->stats;
1042} 1036}
1043 1037
1044static int 1038static int
1045clear_stats (struct net_device *dev) 1039clear_stats (struct net_device *dev)
1046{ 1040{
1047 long ioaddr = dev->base_addr; 1041 struct netdev_private *np = netdev_priv(dev);
1042 void __iomem *ioaddr = np->ioaddr;
1048#ifdef MEM_MAPPING 1043#ifdef MEM_MAPPING
1049 int i; 1044 int i;
1050#endif 1045#endif
1051 1046
1052 /* All statistics registers need to be acknowledged, 1047 /* All statistics registers need to be acknowledged,
1053 else statistic overflow could cause problems */ 1048 else statistic overflow could cause problems */
1054 readl (ioaddr + FramesRcvOk); 1049 dr32(FramesRcvOk);
1055 readl (ioaddr + FramesXmtOk); 1050 dr32(FramesXmtOk);
1056 readl (ioaddr + OctetRcvOk); 1051 dr32(OctetRcvOk);
1057 readl (ioaddr + OctetXmtOk); 1052 dr32(OctetXmtOk);
1058 1053
1059 readl (ioaddr + McstFramesRcvdOk); 1054 dr32(McstFramesRcvdOk);
1060 readl (ioaddr + SingleColFrames); 1055 dr32(SingleColFrames);
1061 readl (ioaddr + MultiColFrames); 1056 dr32(MultiColFrames);
1062 readl (ioaddr + LateCollisions); 1057 dr32(LateCollisions);
1063 /* detailed rx errors */ 1058 /* detailed rx errors */
1064 readw (ioaddr + FrameTooLongErrors); 1059 dr16(FrameTooLongErrors);
1065 readw (ioaddr + InRangeLengthErrors); 1060 dr16(InRangeLengthErrors);
1066 readw (ioaddr + FramesCheckSeqErrors); 1061 dr16(FramesCheckSeqErrors);
1067 readw (ioaddr + FramesLostRxErrors); 1062 dr16(FramesLostRxErrors);
1068 1063
1069 /* detailed tx errors */ 1064 /* detailed tx errors */
1070 readw (ioaddr + FramesAbortXSColls); 1065 dr16(FramesAbortXSColls);
1071 readw (ioaddr + CarrierSenseErrors); 1066 dr16(CarrierSenseErrors);
1072 1067
1073 /* Clear all other statistic register. */ 1068 /* Clear all other statistic register. */
1074 readl (ioaddr + McstOctetXmtOk); 1069 dr32(McstOctetXmtOk);
1075 readw (ioaddr + BcstFramesXmtdOk); 1070 dr16(BcstFramesXmtdOk);
1076 readl (ioaddr + McstFramesXmtdOk); 1071 dr32(McstFramesXmtdOk);
1077 readw (ioaddr + BcstFramesRcvdOk); 1072 dr16(BcstFramesRcvdOk);
1078 readw (ioaddr + MacControlFramesRcvd); 1073 dr16(MacControlFramesRcvd);
1079 readl (ioaddr + McstOctetXmtOk); 1074 dr32(McstOctetXmtOk);
1080 readl (ioaddr + BcstOctetXmtOk); 1075 dr32(BcstOctetXmtOk);
1081 readl (ioaddr + McstFramesXmtdOk); 1076 dr32(McstFramesXmtdOk);
1082 readl (ioaddr + FramesWDeferredXmt); 1077 dr32(FramesWDeferredXmt);
1083 readw (ioaddr + BcstFramesXmtdOk); 1078 dr16(BcstFramesXmtdOk);
1084 readw (ioaddr + MacControlFramesXmtd); 1079 dr16(MacControlFramesXmtd);
1085 readw (ioaddr + FramesWEXDeferal); 1080 dr16(FramesWEXDeferal);
1086#ifdef MEM_MAPPING 1081#ifdef MEM_MAPPING
1087 for (i = 0x100; i <= 0x150; i += 4) 1082 for (i = 0x100; i <= 0x150; i += 4)
1088 readl (ioaddr + i); 1083 dr32(i);
1089#endif 1084#endif
1090 readw (ioaddr + TxJumboFrames); 1085 dr16(TxJumboFrames);
1091 readw (ioaddr + RxJumboFrames); 1086 dr16(RxJumboFrames);
1092 readw (ioaddr + TCPCheckSumErrors); 1087 dr16(TCPCheckSumErrors);
1093 readw (ioaddr + UDPCheckSumErrors); 1088 dr16(UDPCheckSumErrors);
1094 readw (ioaddr + IPCheckSumErrors); 1089 dr16(IPCheckSumErrors);
1095 return 0; 1090 return 0;
1096} 1091}
1097 1092
@@ -1114,10 +1109,10 @@ change_mtu (struct net_device *dev, int new_mtu)
1114static void 1109static void
1115set_multicast (struct net_device *dev) 1110set_multicast (struct net_device *dev)
1116{ 1111{
1117 long ioaddr = dev->base_addr; 1112 struct netdev_private *np = netdev_priv(dev);
1113 void __iomem *ioaddr = np->ioaddr;
1118 u32 hash_table[2]; 1114 u32 hash_table[2];
1119 u16 rx_mode = 0; 1115 u16 rx_mode = 0;
1120 struct netdev_private *np = netdev_priv(dev);
1121 1116
1122 hash_table[0] = hash_table[1] = 0; 1117 hash_table[0] = hash_table[1] = 0;
1123 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ 1118 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
@@ -1153,9 +1148,9 @@ set_multicast (struct net_device *dev)
1153 rx_mode |= ReceiveVLANMatch; 1148 rx_mode |= ReceiveVLANMatch;
1154 } 1149 }
1155 1150
1156 writel (hash_table[0], ioaddr + HashTable0); 1151 dw32(HashTable0, hash_table[0]);
1157 writel (hash_table[1], ioaddr + HashTable1); 1152 dw32(HashTable1, hash_table[1]);
1158 writew (rx_mode, ioaddr + ReceiveMode); 1153 dw16(ReceiveMode, rx_mode);
1159} 1154}
1160 1155
1161static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1156static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
@@ -1318,15 +1313,15 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1318#define EEP_BUSY 0x8000 1313#define EEP_BUSY 0x8000
1319/* Read the EEPROM word */ 1314/* Read the EEPROM word */
1320/* We use I/O instruction to read/write eeprom to avoid fail on some machines */ 1315/* We use I/O instruction to read/write eeprom to avoid fail on some machines */
1321static int 1316static int read_eeprom(struct netdev_private *np, int eep_addr)
1322read_eeprom (long ioaddr, int eep_addr)
1323{ 1317{
1318 void __iomem *ioaddr = np->eeprom_addr;
1324 int i = 1000; 1319 int i = 1000;
1325 outw (EEP_READ | (eep_addr & 0xff), ioaddr + EepromCtrl); 1320
1321 dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff));
1326 while (i-- > 0) { 1322 while (i-- > 0) {
1327 if (!(inw (ioaddr + EepromCtrl) & EEP_BUSY)) { 1323 if (!(dr16(EepromCtrl) & EEP_BUSY))
1328 return inw (ioaddr + EepromData); 1324 return dr16(EepromData);
1329 }
1330 } 1325 }
1331 return 0; 1326 return 0;
1332} 1327}
@@ -1336,38 +1331,40 @@ enum phy_ctrl_bits {
1336 MII_DUPLEX = 0x08, 1331 MII_DUPLEX = 0x08,
1337}; 1332};
1338 1333
1339#define mii_delay() readb(ioaddr) 1334#define mii_delay() dr8(PhyCtrl)
1340static void 1335static void
1341mii_sendbit (struct net_device *dev, u32 data) 1336mii_sendbit (struct net_device *dev, u32 data)
1342{ 1337{
1343 long ioaddr = dev->base_addr + PhyCtrl; 1338 struct netdev_private *np = netdev_priv(dev);
1344 data = (data) ? MII_DATA1 : 0; 1339 void __iomem *ioaddr = np->ioaddr;
1345 data |= MII_WRITE; 1340
1346 data |= (readb (ioaddr) & 0xf8) | MII_WRITE; 1341 data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE;
1347 writeb (data, ioaddr); 1342 dw8(PhyCtrl, data);
1348 mii_delay (); 1343 mii_delay ();
1349 writeb (data | MII_CLK, ioaddr); 1344 dw8(PhyCtrl, data | MII_CLK);
1350 mii_delay (); 1345 mii_delay ();
1351} 1346}
1352 1347
1353static int 1348static int
1354mii_getbit (struct net_device *dev) 1349mii_getbit (struct net_device *dev)
1355{ 1350{
1356 long ioaddr = dev->base_addr + PhyCtrl; 1351 struct netdev_private *np = netdev_priv(dev);
1352 void __iomem *ioaddr = np->ioaddr;
1357 u8 data; 1353 u8 data;
1358 1354
1359 data = (readb (ioaddr) & 0xf8) | MII_READ; 1355 data = (dr8(PhyCtrl) & 0xf8) | MII_READ;
1360 writeb (data, ioaddr); 1356 dw8(PhyCtrl, data);
1361 mii_delay (); 1357 mii_delay ();
1362 writeb (data | MII_CLK, ioaddr); 1358 dw8(PhyCtrl, data | MII_CLK);
1363 mii_delay (); 1359 mii_delay ();
1364 return ((readb (ioaddr) >> 1) & 1); 1360 return (dr8(PhyCtrl) >> 1) & 1;
1365} 1361}
1366 1362
1367static void 1363static void
1368mii_send_bits (struct net_device *dev, u32 data, int len) 1364mii_send_bits (struct net_device *dev, u32 data, int len)
1369{ 1365{
1370 int i; 1366 int i;
1367
1371 for (i = len - 1; i >= 0; i--) { 1368 for (i = len - 1; i >= 0; i--) {
1372 mii_sendbit (dev, data & (1 << i)); 1369 mii_sendbit (dev, data & (1 << i));
1373 } 1370 }
@@ -1721,28 +1718,29 @@ mii_set_media_pcs (struct net_device *dev)
1721static int 1718static int
1722rio_close (struct net_device *dev) 1719rio_close (struct net_device *dev)
1723{ 1720{
1724 long ioaddr = dev->base_addr;
1725 struct netdev_private *np = netdev_priv(dev); 1721 struct netdev_private *np = netdev_priv(dev);
1722 void __iomem *ioaddr = np->ioaddr;
1723
1724 struct pci_dev *pdev = np->pdev;
1726 struct sk_buff *skb; 1725 struct sk_buff *skb;
1727 int i; 1726 int i;
1728 1727
1729 netif_stop_queue (dev); 1728 netif_stop_queue (dev);
1730 1729
1731 /* Disable interrupts */ 1730 /* Disable interrupts */
1732 writew (0, ioaddr + IntEnable); 1731 dw16(IntEnable, 0);
1733 1732
1734 /* Stop Tx and Rx logics */ 1733 /* Stop Tx and Rx logics */
1735 writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl); 1734 dw32(MACCtrl, TxDisable | RxDisable | StatsDisable);
1736 1735
1737 free_irq (dev->irq, dev); 1736 free_irq(pdev->irq, dev);
1738 del_timer_sync (&np->timer); 1737 del_timer_sync (&np->timer);
1739 1738
1740 /* Free all the skbuffs in the queue. */ 1739 /* Free all the skbuffs in the queue. */
1741 for (i = 0; i < RX_RING_SIZE; i++) { 1740 for (i = 0; i < RX_RING_SIZE; i++) {
1742 skb = np->rx_skbuff[i]; 1741 skb = np->rx_skbuff[i];
1743 if (skb) { 1742 if (skb) {
1744 pci_unmap_single(np->pdev, 1743 pci_unmap_single(pdev, desc_to_dma(&np->rx_ring[i]),
1745 desc_to_dma(&np->rx_ring[i]),
1746 skb->len, PCI_DMA_FROMDEVICE); 1744 skb->len, PCI_DMA_FROMDEVICE);
1747 dev_kfree_skb (skb); 1745 dev_kfree_skb (skb);
1748 np->rx_skbuff[i] = NULL; 1746 np->rx_skbuff[i] = NULL;
@@ -1753,8 +1751,7 @@ rio_close (struct net_device *dev)
1753 for (i = 0; i < TX_RING_SIZE; i++) { 1751 for (i = 0; i < TX_RING_SIZE; i++) {
1754 skb = np->tx_skbuff[i]; 1752 skb = np->tx_skbuff[i];
1755 if (skb) { 1753 if (skb) {
1756 pci_unmap_single(np->pdev, 1754 pci_unmap_single(pdev, desc_to_dma(&np->tx_ring[i]),
1757 desc_to_dma(&np->tx_ring[i]),
1758 skb->len, PCI_DMA_TODEVICE); 1755 skb->len, PCI_DMA_TODEVICE);
1759 dev_kfree_skb (skb); 1756 dev_kfree_skb (skb);
1760 np->tx_skbuff[i] = NULL; 1757 np->tx_skbuff[i] = NULL;
@@ -1778,8 +1775,9 @@ rio_remove1 (struct pci_dev *pdev)
1778 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, 1775 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
1779 np->tx_ring_dma); 1776 np->tx_ring_dma);
1780#ifdef MEM_MAPPING 1777#ifdef MEM_MAPPING
1781 iounmap ((char *) (dev->base_addr)); 1778 pci_iounmap(pdev, np->ioaddr);
1782#endif 1779#endif
1780 pci_iounmap(pdev, np->eeprom_addr);
1783 free_netdev (dev); 1781 free_netdev (dev);
1784 pci_release_regions (pdev); 1782 pci_release_regions (pdev);
1785 pci_disable_device (pdev); 1783 pci_disable_device (pdev);
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index ba0adcafa55a..40ba6e02988c 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -42,23 +42,6 @@
42#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc) 42#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
43#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc) 43#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
44 44
45/* This driver was written to use PCI memory space, however x86-oriented
46 hardware often uses I/O space accesses. */
47#ifndef MEM_MAPPING
48#undef readb
49#undef readw
50#undef readl
51#undef writeb
52#undef writew
53#undef writel
54#define readb inb
55#define readw inw
56#define readl inl
57#define writeb outb
58#define writew outw
59#define writel outl
60#endif
61
62/* Offsets to the device registers. 45/* Offsets to the device registers.
63 Unlike software-only systems, device drivers interact with complex hardware. 46 Unlike software-only systems, device drivers interact with complex hardware.
64 It's not useful to define symbolic names for every register bit in the 47 It's not useful to define symbolic names for every register bit in the
@@ -391,6 +374,8 @@ struct netdev_private {
391 dma_addr_t tx_ring_dma; 374 dma_addr_t tx_ring_dma;
392 dma_addr_t rx_ring_dma; 375 dma_addr_t rx_ring_dma;
393 struct pci_dev *pdev; 376 struct pci_dev *pdev;
377 void __iomem *ioaddr;
378 void __iomem *eeprom_addr;
394 spinlock_t tx_lock; 379 spinlock_t tx_lock;
395 spinlock_t rx_lock; 380 spinlock_t rx_lock;
396 struct net_device_stats stats; 381 struct net_device_stats stats;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index d783f4f96ec0..d7bb52a7bda1 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -522,9 +522,6 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
522 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET)); 522 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
523 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 523 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
524 524
525 dev->base_addr = (unsigned long)ioaddr;
526 dev->irq = irq;
527
528 np = netdev_priv(dev); 525 np = netdev_priv(dev);
529 np->base = ioaddr; 526 np->base = ioaddr;
530 np->pci_dev = pdev; 527 np->pci_dev = pdev;
@@ -828,18 +825,19 @@ static int netdev_open(struct net_device *dev)
828{ 825{
829 struct netdev_private *np = netdev_priv(dev); 826 struct netdev_private *np = netdev_priv(dev);
830 void __iomem *ioaddr = np->base; 827 void __iomem *ioaddr = np->base;
828 const int irq = np->pci_dev->irq;
831 unsigned long flags; 829 unsigned long flags;
832 int i; 830 int i;
833 831
834 /* Do we need to reset the chip??? */ 832 /* Do we need to reset the chip??? */
835 833
836 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); 834 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
837 if (i) 835 if (i)
838 return i; 836 return i;
839 837
840 if (netif_msg_ifup(np)) 838 if (netif_msg_ifup(np))
841 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", 839 printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
842 dev->name, dev->irq); 840
843 init_ring(dev); 841 init_ring(dev);
844 842
845 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr); 843 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
@@ -1814,7 +1812,7 @@ static int netdev_close(struct net_device *dev)
1814 } 1812 }
1815#endif /* __i386__ debugging only */ 1813#endif /* __i386__ debugging only */
1816 1814
1817 free_irq(dev->irq, dev); 1815 free_irq(np->pci_dev->irq, dev);
1818 1816
1819 del_timer_sync(&np->timer); 1817 del_timer_sync(&np->timer);
1820 1818
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index b276469f74e9..290b26f868c9 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -815,6 +815,7 @@ static const struct ethtool_ops dnet_ethtool_ops = {
815 .set_settings = dnet_set_settings, 815 .set_settings = dnet_set_settings,
816 .get_drvinfo = dnet_get_drvinfo, 816 .get_drvinfo = dnet_get_drvinfo,
817 .get_link = ethtool_op_get_link, 817 .get_link = ethtool_op_get_link,
818 .get_ts_info = ethtool_op_get_ts_info,
818}; 819};
819 820
820static const struct net_device_ops dnet_netdev_ops = { 821static const struct net_device_ops dnet_netdev_ops = {
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 9576ac002c23..ad69cf89491c 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -313,6 +313,23 @@ struct be_vf_cfg {
313#define BE_UC_PMAC_COUNT 30 313#define BE_UC_PMAC_COUNT 30
314#define BE_VF_UC_PMAC_COUNT 2 314#define BE_VF_UC_PMAC_COUNT 2
315 315
316struct phy_info {
317 u8 transceiver;
318 u8 autoneg;
319 u8 fc_autoneg;
320 u8 port_type;
321 u16 phy_type;
322 u16 interface_type;
323 u32 misc_params;
324 u16 auto_speeds_supported;
325 u16 fixed_speeds_supported;
326 int link_speed;
327 int forced_port_speed;
328 u32 dac_cable_len;
329 u32 advertising;
330 u32 supported;
331};
332
316struct be_adapter { 333struct be_adapter {
317 struct pci_dev *pdev; 334 struct pci_dev *pdev;
318 struct net_device *netdev; 335 struct net_device *netdev;
@@ -377,10 +394,6 @@ struct be_adapter {
377 u32 rx_fc; /* Rx flow control */ 394 u32 rx_fc; /* Rx flow control */
378 u32 tx_fc; /* Tx flow control */ 395 u32 tx_fc; /* Tx flow control */
379 bool stats_cmd_sent; 396 bool stats_cmd_sent;
380 int link_speed;
381 u8 port_type;
382 u8 transceiver;
383 u8 autoneg;
384 u8 generation; /* BladeEngine ASIC generation */ 397 u8 generation; /* BladeEngine ASIC generation */
385 u32 flash_status; 398 u32 flash_status;
386 struct completion flash_compl; 399 struct completion flash_compl;
@@ -392,6 +405,7 @@ struct be_adapter {
392 u32 sli_family; 405 u32 sli_family;
393 u8 hba_port_num; 406 u8 hba_port_num;
394 u16 pvid; 407 u16 pvid;
408 struct phy_info phy;
395 u8 wol_cap; 409 u8 wol_cap;
396 bool wol; 410 bool wol;
397 u32 max_pmac_cnt; /* Max secondary UC MACs programmable */ 411 u32 max_pmac_cnt; /* Max secondary UC MACs programmable */
@@ -583,4 +597,5 @@ extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
583extern void be_parse_stats(struct be_adapter *adapter); 597extern void be_parse_stats(struct be_adapter *adapter);
584extern int be_load_fw(struct be_adapter *adapter, u8 *func); 598extern int be_load_fw(struct be_adapter *adapter, u8 *func);
585extern bool be_is_wol_supported(struct be_adapter *adapter); 599extern bool be_is_wol_supported(struct be_adapter *adapter);
600extern bool be_pause_supported(struct be_adapter *adapter);
586#endif /* BE_H */ 601#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 67b030d72df1..22be08c03594 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -126,7 +126,7 @@ static void be_async_link_state_process(struct be_adapter *adapter,
126 struct be_async_event_link_state *evt) 126 struct be_async_event_link_state *evt)
127{ 127{
128 /* When link status changes, link speed must be re-queried from FW */ 128 /* When link status changes, link speed must be re-queried from FW */
129 adapter->link_speed = -1; 129 adapter->phy.link_speed = -1;
130 130
131 /* For the initial link status do not rely on the ASYNC event as 131 /* For the initial link status do not rely on the ASYNC event as
132 * it may not be received in some cases. 132 * it may not be received in some cases.
@@ -153,7 +153,7 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
153{ 153{
154 if (evt->physical_port == adapter->port_num) { 154 if (evt->physical_port == adapter->port_num) {
155 /* qos_link_speed is in units of 10 Mbps */ 155 /* qos_link_speed is in units of 10 Mbps */
156 adapter->link_speed = evt->qos_link_speed * 10; 156 adapter->phy.link_speed = evt->qos_link_speed * 10;
157 } 157 }
158} 158}
159 159
@@ -2136,8 +2136,7 @@ err:
2136 return status; 2136 return status;
2137} 2137}
2138 2138
2139int be_cmd_get_phy_info(struct be_adapter *adapter, 2139int be_cmd_get_phy_info(struct be_adapter *adapter)
2140 struct be_phy_info *phy_info)
2141{ 2140{
2142 struct be_mcc_wrb *wrb; 2141 struct be_mcc_wrb *wrb;
2143 struct be_cmd_req_get_phy_info *req; 2142 struct be_cmd_req_get_phy_info *req;
@@ -2170,9 +2169,15 @@ int be_cmd_get_phy_info(struct be_adapter *adapter,
2170 if (!status) { 2169 if (!status) {
2171 struct be_phy_info *resp_phy_info = 2170 struct be_phy_info *resp_phy_info =
2172 cmd.va + sizeof(struct be_cmd_req_hdr); 2171 cmd.va + sizeof(struct be_cmd_req_hdr);
2173 phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type); 2172 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2174 phy_info->interface_type = 2173 adapter->phy.interface_type =
2175 le16_to_cpu(resp_phy_info->interface_type); 2174 le16_to_cpu(resp_phy_info->interface_type);
2175 adapter->phy.auto_speeds_supported =
2176 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2177 adapter->phy.fixed_speeds_supported =
2178 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2179 adapter->phy.misc_params =
2180 le32_to_cpu(resp_phy_info->misc_params);
2176 } 2181 }
2177 pci_free_consistent(adapter->pdev, cmd.size, 2182 pci_free_consistent(adapter->pdev, cmd.size,
2178 cmd.va, cmd.dma); 2183 cmd.va, cmd.dma);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d5b680c56af0..3c543610906a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1309,9 +1309,36 @@ enum {
1309 PHY_TYPE_KX4_10GB, 1309 PHY_TYPE_KX4_10GB,
1310 PHY_TYPE_BASET_10GB, 1310 PHY_TYPE_BASET_10GB,
1311 PHY_TYPE_BASET_1GB, 1311 PHY_TYPE_BASET_1GB,
1312 PHY_TYPE_BASEX_1GB,
1313 PHY_TYPE_SGMII,
1312 PHY_TYPE_DISABLED = 255 1314 PHY_TYPE_DISABLED = 255
1313}; 1315};
1314 1316
1317#define BE_SUPPORTED_SPEED_NONE 0
1318#define BE_SUPPORTED_SPEED_10MBPS 1
1319#define BE_SUPPORTED_SPEED_100MBPS 2
1320#define BE_SUPPORTED_SPEED_1GBPS 4
1321#define BE_SUPPORTED_SPEED_10GBPS 8
1322
1323#define BE_AN_EN 0x2
1324#define BE_PAUSE_SYM_EN 0x80
1325
1326/* MAC speed valid values */
1327#define SPEED_DEFAULT 0x0
1328#define SPEED_FORCED_10GB 0x1
1329#define SPEED_FORCED_1GB 0x2
1330#define SPEED_AUTONEG_10GB 0x3
1331#define SPEED_AUTONEG_1GB 0x4
1332#define SPEED_AUTONEG_100MB 0x5
1333#define SPEED_AUTONEG_10GB_1GB 0x6
1334#define SPEED_AUTONEG_10GB_1GB_100MB 0x7
1335#define SPEED_AUTONEG_1GB_100MB 0x8
1336#define SPEED_AUTONEG_10MB 0x9
1337#define SPEED_AUTONEG_1GB_100MB_10MB 0xa
1338#define SPEED_AUTONEG_100MB_10MB 0xb
1339#define SPEED_FORCED_100MB 0xc
1340#define SPEED_FORCED_10MB 0xd
1341
1315struct be_cmd_req_get_phy_info { 1342struct be_cmd_req_get_phy_info {
1316 struct be_cmd_req_hdr hdr; 1343 struct be_cmd_req_hdr hdr;
1317 u8 rsvd0[24]; 1344 u8 rsvd0[24];
@@ -1321,7 +1348,11 @@ struct be_phy_info {
1321 u16 phy_type; 1348 u16 phy_type;
1322 u16 interface_type; 1349 u16 interface_type;
1323 u32 misc_params; 1350 u32 misc_params;
1324 u32 future_use[4]; 1351 u16 ext_phy_details;
1352 u16 rsvd;
1353 u16 auto_speeds_supported;
1354 u16 fixed_speeds_supported;
1355 u32 future_use[2];
1325}; 1356};
1326 1357
1327struct be_cmd_resp_get_phy_info { 1358struct be_cmd_resp_get_phy_info {
@@ -1655,8 +1686,7 @@ extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1655 struct be_dma_mem *nonemb_cmd); 1686 struct be_dma_mem *nonemb_cmd);
1656extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 1687extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1657 u8 loopback_type, u8 enable); 1688 u8 loopback_type, u8 enable);
1658extern int be_cmd_get_phy_info(struct be_adapter *adapter, 1689extern int be_cmd_get_phy_info(struct be_adapter *adapter);
1659 struct be_phy_info *phy_info);
1660extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); 1690extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
1661extern void be_detect_dump_ue(struct be_adapter *adapter); 1691extern void be_detect_dump_ue(struct be_adapter *adapter);
1662extern int be_cmd_get_die_temperature(struct be_adapter *adapter); 1692extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index c1ff73cb0e62..dc9f74c69c40 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -433,102 +433,193 @@ static int be_get_sset_count(struct net_device *netdev, int stringset)
433 } 433 }
434} 434}
435 435
436static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len)
437{
438 u32 port;
439
440 switch (phy_type) {
441 case PHY_TYPE_BASET_1GB:
442 case PHY_TYPE_BASEX_1GB:
443 case PHY_TYPE_SGMII:
444 port = PORT_TP;
445 break;
446 case PHY_TYPE_SFP_PLUS_10GB:
447 port = dac_cable_len ? PORT_DA : PORT_FIBRE;
448 break;
449 case PHY_TYPE_XFP_10GB:
450 case PHY_TYPE_SFP_1GB:
451 port = PORT_FIBRE;
452 break;
453 case PHY_TYPE_BASET_10GB:
454 port = PORT_TP;
455 break;
456 default:
457 port = PORT_OTHER;
458 }
459
460 return port;
461}
462
463static u32 convert_to_et_setting(u32 if_type, u32 if_speeds)
464{
465 u32 val = 0;
466
467 switch (if_type) {
468 case PHY_TYPE_BASET_1GB:
469 case PHY_TYPE_BASEX_1GB:
470 case PHY_TYPE_SGMII:
471 val |= SUPPORTED_TP;
472 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
473 val |= SUPPORTED_1000baseT_Full;
474 if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
475 val |= SUPPORTED_100baseT_Full;
476 if (if_speeds & BE_SUPPORTED_SPEED_10MBPS)
477 val |= SUPPORTED_10baseT_Full;
478 break;
479 case PHY_TYPE_KX4_10GB:
480 val |= SUPPORTED_Backplane;
481 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
482 val |= SUPPORTED_1000baseKX_Full;
483 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
484 val |= SUPPORTED_10000baseKX4_Full;
485 break;
486 case PHY_TYPE_KR_10GB:
487 val |= SUPPORTED_Backplane |
488 SUPPORTED_10000baseKR_Full;
489 break;
490 case PHY_TYPE_SFP_PLUS_10GB:
491 case PHY_TYPE_XFP_10GB:
492 case PHY_TYPE_SFP_1GB:
493 val |= SUPPORTED_FIBRE;
494 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
495 val |= SUPPORTED_10000baseT_Full;
496 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
497 val |= SUPPORTED_1000baseT_Full;
498 break;
499 case PHY_TYPE_BASET_10GB:
500 val |= SUPPORTED_TP;
501 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
502 val |= SUPPORTED_10000baseT_Full;
503 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
504 val |= SUPPORTED_1000baseT_Full;
505 if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
506 val |= SUPPORTED_100baseT_Full;
507 break;
508 default:
509 val |= SUPPORTED_TP;
510 }
511
512 return val;
513}
514
515static int convert_to_et_speed(u32 be_speed)
516{
517 int et_speed = SPEED_10000;
518
519 switch (be_speed) {
520 case PHY_LINK_SPEED_10MBPS:
521 et_speed = SPEED_10;
522 break;
523 case PHY_LINK_SPEED_100MBPS:
524 et_speed = SPEED_100;
525 break;
526 case PHY_LINK_SPEED_1GBPS:
527 et_speed = SPEED_1000;
528 break;
529 case PHY_LINK_SPEED_10GBPS:
530 et_speed = SPEED_10000;
531 break;
532 }
533
534 return et_speed;
535}
536
537bool be_pause_supported(struct be_adapter *adapter)
538{
539 return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
540 adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ?
541 false : true;
542}
543
436static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 544static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
437{ 545{
438 struct be_adapter *adapter = netdev_priv(netdev); 546 struct be_adapter *adapter = netdev_priv(netdev);
439 struct be_phy_info phy_info; 547 u8 port_speed = 0;
440 u8 mac_speed = 0;
441 u16 link_speed = 0; 548 u16 link_speed = 0;
442 u8 link_status; 549 u8 link_status;
550 u32 et_speed = 0;
443 int status; 551 int status;
444 552
445 if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) { 553 if (adapter->phy.link_speed < 0 || !(netdev->flags & IFF_UP)) {
446 status = be_cmd_link_status_query(adapter, &mac_speed, 554 if (adapter->phy.forced_port_speed < 0) {
447 &link_speed, &link_status, 0); 555 status = be_cmd_link_status_query(adapter, &port_speed,
448 if (!status) 556 &link_speed, &link_status, 0);
449 be_link_status_update(adapter, link_status); 557 if (!status)
450 558 be_link_status_update(adapter, link_status);
451 /* link_speed is in units of 10 Mbps */ 559 if (link_speed)
452 if (link_speed) { 560 et_speed = link_speed;
453 ethtool_cmd_speed_set(ecmd, link_speed*10); 561 else
562 et_speed = convert_to_et_speed(port_speed);
454 } else { 563 } else {
455 switch (mac_speed) { 564 et_speed = adapter->phy.forced_port_speed;
456 case PHY_LINK_SPEED_10MBPS:
457 ethtool_cmd_speed_set(ecmd, SPEED_10);
458 break;
459 case PHY_LINK_SPEED_100MBPS:
460 ethtool_cmd_speed_set(ecmd, SPEED_100);
461 break;
462 case PHY_LINK_SPEED_1GBPS:
463 ethtool_cmd_speed_set(ecmd, SPEED_1000);
464 break;
465 case PHY_LINK_SPEED_10GBPS:
466 ethtool_cmd_speed_set(ecmd, SPEED_10000);
467 break;
468 case PHY_LINK_SPEED_ZERO:
469 ethtool_cmd_speed_set(ecmd, 0);
470 break;
471 }
472 } 565 }
473 566
474 status = be_cmd_get_phy_info(adapter, &phy_info); 567 ethtool_cmd_speed_set(ecmd, et_speed);
475 if (!status) { 568
476 switch (phy_info.interface_type) { 569 status = be_cmd_get_phy_info(adapter);
477 case PHY_TYPE_XFP_10GB: 570 if (status)
478 case PHY_TYPE_SFP_1GB: 571 return status;
479 case PHY_TYPE_SFP_PLUS_10GB: 572
480 ecmd->port = PORT_FIBRE; 573 ecmd->supported =
481 break; 574 convert_to_et_setting(adapter->phy.interface_type,
482 default: 575 adapter->phy.auto_speeds_supported |
483 ecmd->port = PORT_TP; 576 adapter->phy.fixed_speeds_supported);
484 break; 577 ecmd->advertising =
485 } 578 convert_to_et_setting(adapter->phy.interface_type,
579 adapter->phy.auto_speeds_supported);
486 580
487 switch (phy_info.interface_type) { 581 ecmd->port = be_get_port_type(adapter->phy.interface_type,
488 case PHY_TYPE_KR_10GB: 582 adapter->phy.dac_cable_len);
489 case PHY_TYPE_KX4_10GB: 583
490 ecmd->autoneg = AUTONEG_ENABLE; 584 if (adapter->phy.auto_speeds_supported) {
585 ecmd->supported |= SUPPORTED_Autoneg;
586 ecmd->autoneg = AUTONEG_ENABLE;
587 ecmd->advertising |= ADVERTISED_Autoneg;
588 }
589
590 if (be_pause_supported(adapter)) {
591 ecmd->supported |= SUPPORTED_Pause;
592 ecmd->advertising |= ADVERTISED_Pause;
593 }
594
595 switch (adapter->phy.interface_type) {
596 case PHY_TYPE_KR_10GB:
597 case PHY_TYPE_KX4_10GB:
491 ecmd->transceiver = XCVR_INTERNAL; 598 ecmd->transceiver = XCVR_INTERNAL;
492 break; 599 break;
493 default: 600 default:
494 ecmd->autoneg = AUTONEG_DISABLE; 601 ecmd->transceiver = XCVR_EXTERNAL;
495 ecmd->transceiver = XCVR_EXTERNAL; 602 break;
496 break;
497 }
498 } 603 }
499 604
500 /* Save for future use */ 605 /* Save for future use */
501 adapter->link_speed = ethtool_cmd_speed(ecmd); 606 adapter->phy.link_speed = ethtool_cmd_speed(ecmd);
502 adapter->port_type = ecmd->port; 607 adapter->phy.port_type = ecmd->port;
503 adapter->transceiver = ecmd->transceiver; 608 adapter->phy.transceiver = ecmd->transceiver;
504 adapter->autoneg = ecmd->autoneg; 609 adapter->phy.autoneg = ecmd->autoneg;
610 adapter->phy.advertising = ecmd->advertising;
611 adapter->phy.supported = ecmd->supported;
505 } else { 612 } else {
506 ethtool_cmd_speed_set(ecmd, adapter->link_speed); 613 ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed);
507 ecmd->port = adapter->port_type; 614 ecmd->port = adapter->phy.port_type;
508 ecmd->transceiver = adapter->transceiver; 615 ecmd->transceiver = adapter->phy.transceiver;
509 ecmd->autoneg = adapter->autoneg; 616 ecmd->autoneg = adapter->phy.autoneg;
617 ecmd->advertising = adapter->phy.advertising;
618 ecmd->supported = adapter->phy.supported;
510 } 619 }
511 620
512 ecmd->duplex = DUPLEX_FULL; 621 ecmd->duplex = DUPLEX_FULL;
513 ecmd->phy_address = adapter->port_num; 622 ecmd->phy_address = adapter->port_num;
514 switch (ecmd->port) {
515 case PORT_FIBRE:
516 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
517 break;
518 case PORT_TP:
519 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
520 break;
521 case PORT_AUI:
522 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
523 break;
524 }
525
526 if (ecmd->autoneg) {
527 ecmd->supported |= SUPPORTED_1000baseT_Full;
528 ecmd->supported |= SUPPORTED_Autoneg;
529 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
530 ADVERTISED_1000baseT_Full);
531 }
532 623
533 return 0; 624 return 0;
534} 625}
@@ -548,7 +639,7 @@ be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
548 struct be_adapter *adapter = netdev_priv(netdev); 639 struct be_adapter *adapter = netdev_priv(netdev);
549 640
550 be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause); 641 be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
551 ecmd->autoneg = 0; 642 ecmd->autoneg = adapter->phy.fc_autoneg;
552} 643}
553 644
554static int 645static int
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 528a886bc2cd..a5bc6084be05 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2571,11 +2571,12 @@ err:
2571static void be_setup_init(struct be_adapter *adapter) 2571static void be_setup_init(struct be_adapter *adapter)
2572{ 2572{
2573 adapter->vlan_prio_bmap = 0xff; 2573 adapter->vlan_prio_bmap = 0xff;
2574 adapter->link_speed = -1; 2574 adapter->phy.link_speed = -1;
2575 adapter->if_handle = -1; 2575 adapter->if_handle = -1;
2576 adapter->be3_native = false; 2576 adapter->be3_native = false;
2577 adapter->promiscuous = false; 2577 adapter->promiscuous = false;
2578 adapter->eq_next_idx = 0; 2578 adapter->eq_next_idx = 0;
2579 adapter->phy.forced_port_speed = -1;
2579} 2580}
2580 2581
2581static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac) 2582static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
@@ -2707,6 +2708,10 @@ static int be_setup(struct be_adapter *adapter)
2707 goto err; 2708 goto err;
2708 } 2709 }
2709 2710
2711 be_cmd_get_phy_info(adapter);
2712 if (be_pause_supported(adapter))
2713 adapter->phy.fc_autoneg = 1;
2714
2710 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 2715 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2711 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; 2716 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2712 2717
@@ -2760,17 +2765,8 @@ static bool be_flash_redboot(struct be_adapter *adapter,
2760 2765
2761static bool phy_flashing_required(struct be_adapter *adapter) 2766static bool phy_flashing_required(struct be_adapter *adapter)
2762{ 2767{
2763 int status = 0; 2768 return (adapter->phy.phy_type == TN_8022 &&
2764 struct be_phy_info phy_info; 2769 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2765
2766 status = be_cmd_get_phy_info(adapter, &phy_info);
2767 if (status)
2768 return false;
2769 if ((phy_info.phy_type == TN_8022) &&
2770 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2771 return true;
2772 }
2773 return false;
2774} 2770}
2775 2771
2776static int be_flash_data(struct be_adapter *adapter, 2772static int be_flash_data(struct be_adapter *adapter,
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 1637b9862292..9d71c9cc300b 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -545,9 +545,6 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
545 /* Reset the chip to erase previous misconfiguration. */ 545 /* Reset the chip to erase previous misconfiguration. */
546 iowrite32(0x00000001, ioaddr + BCR); 546 iowrite32(0x00000001, ioaddr + BCR);
547 547
548 dev->base_addr = (unsigned long)ioaddr;
549 dev->irq = irq;
550
551 /* Make certain the descriptor lists are aligned. */ 548 /* Make certain the descriptor lists are aligned. */
552 np = netdev_priv(dev); 549 np = netdev_priv(dev);
553 np->mem = ioaddr; 550 np->mem = ioaddr;
@@ -832,11 +829,13 @@ static int netdev_open(struct net_device *dev)
832{ 829{
833 struct netdev_private *np = netdev_priv(dev); 830 struct netdev_private *np = netdev_priv(dev);
834 void __iomem *ioaddr = np->mem; 831 void __iomem *ioaddr = np->mem;
835 int i; 832 const int irq = np->pci_dev->irq;
833 int rc, i;
836 834
837 iowrite32(0x00000001, ioaddr + BCR); /* Reset */ 835 iowrite32(0x00000001, ioaddr + BCR); /* Reset */
838 836
839 if (request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev)) 837 rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
838 if (rc)
840 return -EAGAIN; 839 return -EAGAIN;
841 840
842 for (i = 0; i < 3; i++) 841 for (i = 0; i < 3; i++)
@@ -924,8 +923,7 @@ static int netdev_open(struct net_device *dev)
924 np->reset_timer.data = (unsigned long) dev; 923 np->reset_timer.data = (unsigned long) dev;
925 np->reset_timer.function = reset_timer; 924 np->reset_timer.function = reset_timer;
926 np->reset_timer_armed = 0; 925 np->reset_timer_armed = 0;
927 926 return rc;
928 return 0;
929} 927}
930 928
931 929
@@ -1910,7 +1908,7 @@ static int netdev_close(struct net_device *dev)
1910 del_timer_sync(&np->timer); 1908 del_timer_sync(&np->timer);
1911 del_timer_sync(&np->reset_timer); 1909 del_timer_sync(&np->reset_timer);
1912 1910
1913 free_irq(dev->irq, dev); 1911 free_irq(np->pci_dev->irq, dev);
1914 1912
1915 /* Free all the skbuffs in the Rx queue. */ 1913 /* Free all the skbuffs in the Rx queue. */
1916 for (i = 0; i < RX_RING_SIZE; i++) { 1914 for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index a12b3f5bc025..7fa0227c9c02 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -1161,6 +1161,7 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
1161 .set_settings = fec_enet_set_settings, 1161 .set_settings = fec_enet_set_settings,
1162 .get_drvinfo = fec_enet_get_drvinfo, 1162 .get_drvinfo = fec_enet_get_drvinfo,
1163 .get_link = ethtool_op_get_link, 1163 .get_link = ethtool_op_get_link,
1164 .get_ts_info = ethtool_op_get_ts_info,
1164}; 1165};
1165 1166
1166static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 1167static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 7b34d8c698da..97f947b3d94a 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -811,6 +811,7 @@ static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
811 .get_link = ethtool_op_get_link, 811 .get_link = ethtool_op_get_link,
812 .get_msglevel = mpc52xx_fec_get_msglevel, 812 .get_msglevel = mpc52xx_fec_get_msglevel,
813 .set_msglevel = mpc52xx_fec_set_msglevel, 813 .set_msglevel = mpc52xx_fec_set_msglevel,
814 .get_ts_info = ethtool_op_get_ts_info,
814}; 815};
815 816
816 817
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index e4e6cd2c5f82..2b7633f766d9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -963,6 +963,7 @@ static const struct ethtool_ops fs_ethtool_ops = {
963 .get_msglevel = fs_get_msglevel, 963 .get_msglevel = fs_get_msglevel,
964 .set_msglevel = fs_set_msglevel, 964 .set_msglevel = fs_set_msglevel,
965 .get_regs = fs_get_regs, 965 .get_regs = fs_get_regs,
966 .get_ts_info = ethtool_op_get_ts_info,
966}; 967};
967 968
968static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 969static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index e7bed5303997..1adb0245b9dd 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -136,7 +136,7 @@ static void gfar_netpoll(struct net_device *dev);
136int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); 136int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
137static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); 137static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
138static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 138static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
139 int amount_pull); 139 int amount_pull, struct napi_struct *napi);
140void gfar_halt(struct net_device *dev); 140void gfar_halt(struct net_device *dev);
141static void gfar_halt_nodisable(struct net_device *dev); 141static void gfar_halt_nodisable(struct net_device *dev);
142void gfar_start(struct net_device *dev); 142void gfar_start(struct net_device *dev);
@@ -2675,12 +2675,12 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2675/* gfar_process_frame() -- handle one incoming packet if skb 2675/* gfar_process_frame() -- handle one incoming packet if skb
2676 * isn't NULL. */ 2676 * isn't NULL. */
2677static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 2677static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2678 int amount_pull) 2678 int amount_pull, struct napi_struct *napi)
2679{ 2679{
2680 struct gfar_private *priv = netdev_priv(dev); 2680 struct gfar_private *priv = netdev_priv(dev);
2681 struct rxfcb *fcb = NULL; 2681 struct rxfcb *fcb = NULL;
2682 2682
2683 int ret; 2683 gro_result_t ret;
2684 2684
2685 /* fcb is at the beginning if exists */ 2685 /* fcb is at the beginning if exists */
2686 fcb = (struct rxfcb *)skb->data; 2686 fcb = (struct rxfcb *)skb->data;
@@ -2719,9 +2719,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2719 __vlan_hwaccel_put_tag(skb, fcb->vlctl); 2719 __vlan_hwaccel_put_tag(skb, fcb->vlctl);
2720 2720
2721 /* Send the packet up the stack */ 2721 /* Send the packet up the stack */
2722 ret = netif_receive_skb(skb); 2722 ret = napi_gro_receive(napi, skb);
2723 2723
2724 if (NET_RX_DROP == ret) 2724 if (GRO_DROP == ret)
2725 priv->extra_stats.kernel_dropped++; 2725 priv->extra_stats.kernel_dropped++;
2726 2726
2727 return 0; 2727 return 0;
@@ -2783,7 +2783,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2783 skb_put(skb, pkt_len); 2783 skb_put(skb, pkt_len);
2784 rx_queue->stats.rx_bytes += pkt_len; 2784 rx_queue->stats.rx_bytes += pkt_len;
2785 skb_record_rx_queue(skb, rx_queue->qindex); 2785 skb_record_rx_queue(skb, rx_queue->qindex);
2786 gfar_process_frame(dev, skb, amount_pull); 2786 gfar_process_frame(dev, skb, amount_pull,
2787 &rx_queue->grp->napi);
2787 2788
2788 } else { 2789 } else {
2789 netif_warn(priv, rx_err, dev, "Missing skb!\n"); 2790 netif_warn(priv, rx_err, dev, "Missing skb!\n");
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 4c9f8d487dbb..2136c7ff5e6d 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1210,4 +1210,7 @@ struct filer_table {
1210 struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20]; 1210 struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20];
1211}; 1211};
1212 1212
1213/* The gianfar_ptp module will set this variable */
1214extern int gfar_phc_index;
1215
1213#endif /* __GIANFAR_H */ 1216#endif /* __GIANFAR_H */
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 8d74efd04bb9..8a025570d97e 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -26,6 +26,7 @@
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/netdevice.h> 27#include <linux/netdevice.h>
28#include <linux/etherdevice.h> 28#include <linux/etherdevice.h>
29#include <linux/net_tstamp.h>
29#include <linux/skbuff.h> 30#include <linux/skbuff.h>
30#include <linux/spinlock.h> 31#include <linux/spinlock.h>
31#include <linux/mm.h> 32#include <linux/mm.h>
@@ -1739,6 +1740,34 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1739 return ret; 1740 return ret;
1740} 1741}
1741 1742
1743int gfar_phc_index = -1;
1744
1745static int gfar_get_ts_info(struct net_device *dev,
1746 struct ethtool_ts_info *info)
1747{
1748 struct gfar_private *priv = netdev_priv(dev);
1749
1750 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
1751 info->so_timestamping =
1752 SOF_TIMESTAMPING_RX_SOFTWARE |
1753 SOF_TIMESTAMPING_SOFTWARE;
1754 info->phc_index = -1;
1755 return 0;
1756 }
1757 info->so_timestamping =
1758 SOF_TIMESTAMPING_TX_HARDWARE |
1759 SOF_TIMESTAMPING_RX_HARDWARE |
1760 SOF_TIMESTAMPING_RAW_HARDWARE;
1761 info->phc_index = gfar_phc_index;
1762 info->tx_types =
1763 (1 << HWTSTAMP_TX_OFF) |
1764 (1 << HWTSTAMP_TX_ON);
1765 info->rx_filters =
1766 (1 << HWTSTAMP_FILTER_NONE) |
1767 (1 << HWTSTAMP_FILTER_ALL);
1768 return 0;
1769}
1770
1742const struct ethtool_ops gfar_ethtool_ops = { 1771const struct ethtool_ops gfar_ethtool_ops = {
1743 .get_settings = gfar_gsettings, 1772 .get_settings = gfar_gsettings,
1744 .set_settings = gfar_ssettings, 1773 .set_settings = gfar_ssettings,
@@ -1761,4 +1790,5 @@ const struct ethtool_ops gfar_ethtool_ops = {
1761#endif 1790#endif
1762 .set_rxnfc = gfar_set_nfc, 1791 .set_rxnfc = gfar_set_nfc,
1763 .get_rxnfc = gfar_get_nfc, 1792 .get_rxnfc = gfar_get_nfc,
1793 .get_ts_info = gfar_get_ts_info,
1764}; 1794};
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 5fd620bec15c..c08e5d40fecb 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -515,6 +515,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
515 err = PTR_ERR(etsects->clock); 515 err = PTR_ERR(etsects->clock);
516 goto no_clock; 516 goto no_clock;
517 } 517 }
518 gfar_phc_clock = ptp_clock_index(etsects->clock);
518 519
519 dev_set_drvdata(&dev->dev, etsects); 520 dev_set_drvdata(&dev->dev, etsects);
520 521
@@ -538,6 +539,7 @@ static int gianfar_ptp_remove(struct platform_device *dev)
538 gfar_write(&etsects->regs->tmr_temask, 0); 539 gfar_write(&etsects->regs->tmr_temask, 0);
539 gfar_write(&etsects->regs->tmr_ctrl, 0); 540 gfar_write(&etsects->regs->tmr_ctrl, 0);
540 541
542 gfar_phc_clock = -1;
541 ptp_clock_unregister(etsects->clock); 543 ptp_clock_unregister(etsects->clock);
542 iounmap(etsects->regs); 544 iounmap(etsects->regs);
543 release_resource(etsects->rsrc); 545 release_resource(etsects->rsrc);
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index a97257f91a3d..37b035306013 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -415,6 +415,7 @@ static const struct ethtool_ops uec_ethtool_ops = {
415 .get_ethtool_stats = uec_get_ethtool_stats, 415 .get_ethtool_stats = uec_get_ethtool_stats,
416 .get_wol = uec_get_wol, 416 .get_wol = uec_get_wol,
417 .set_wol = uec_set_wol, 417 .set_wol = uec_set_wol,
418 .get_ts_info = ethtool_op_get_ts_info,
418}; 419};
419 420
420void uec_set_ethtool_ops(struct net_device *netdev) 421void uec_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 76213162fbe3..74215c05d799 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -7,7 +7,7 @@ config NET_VENDOR_INTEL
7 default y 7 default y
8 depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \ 8 depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \
9 ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \ 9 ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
10 GSC || BVME6000 || MVME16x || ARCH_ENP2611 || \ 10 GSC || BVME6000 || MVME16x || \
11 (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \ 11 (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \
12 EXPERIMENTAL 12 EXPERIMENTAL
13 ---help--- 13 ---help---
@@ -120,6 +120,17 @@ config IGB_DCA
120 driver. DCA is a method for warming the CPU cache before data 120 driver. DCA is a method for warming the CPU cache before data
121 is used, with the intent of lessening the impact of cache misses. 121 is used, with the intent of lessening the impact of cache misses.
122 122
123config IGB_PTP
124 bool "PTP Hardware Clock (PHC)"
125 default y
126 depends on IGB && PTP_1588_CLOCK
127 ---help---
128 Say Y here if you want to use PTP Hardware Clock (PHC) in the
129 driver. Only the basic clock operations have been implemented.
130
131 Every timestamp and clock read operations must consult the
132 overflow counter to form a correct time value.
133
123config IGBVF 134config IGBVF
124 tristate "Intel(R) 82576 Virtual Function Ethernet support" 135 tristate "Intel(R) 82576 Virtual Function Ethernet support"
125 depends on PCI 136 depends on PCI
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index e498effb85d9..ada720b42ff6 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1759,6 +1759,7 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1759 skb->data, skb->len, PCI_DMA_TODEVICE)); 1759 skb->data, skb->len, PCI_DMA_TODEVICE));
1760 /* check for mapping failure? */ 1760 /* check for mapping failure? */
1761 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); 1761 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1762 skb_tx_timestamp(skb);
1762} 1763}
1763 1764
1764static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, 1765static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
@@ -2733,6 +2734,7 @@ static const struct ethtool_ops e100_ethtool_ops = {
2733 .set_phys_id = e100_set_phys_id, 2734 .set_phys_id = e100_set_phys_id,
2734 .get_ethtool_stats = e100_get_ethtool_stats, 2735 .get_ethtool_stats = e100_get_ethtool_stats,
2735 .get_sset_count = e100_get_sset_count, 2736 .get_sset_count = e100_get_sset_count,
2737 .get_ts_info = ethtool_op_get_ts_info,
2736}; 2738};
2737 2739
2738static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2740static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 4348b6fd44fa..3d712f262e83 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -827,9 +827,10 @@ static int e1000_set_features(struct net_device *netdev,
827 if (changed & NETIF_F_HW_VLAN_RX) 827 if (changed & NETIF_F_HW_VLAN_RX)
828 e1000_vlan_mode(netdev, features); 828 e1000_vlan_mode(netdev, features);
829 829
830 if (!(changed & NETIF_F_RXCSUM)) 830 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
831 return 0; 831 return 0;
832 832
833 netdev->features = features;
833 adapter->rx_csum = !!(features & NETIF_F_RXCSUM); 834 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
834 835
835 if (netif_running(netdev)) 836 if (netif_running(netdev))
@@ -1074,6 +1075,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1074 1075
1075 netdev->features |= netdev->hw_features; 1076 netdev->features |= netdev->hw_features;
1076 netdev->hw_features |= NETIF_F_RXCSUM; 1077 netdev->hw_features |= NETIF_F_RXCSUM;
1078 netdev->hw_features |= NETIF_F_RXALL;
1077 netdev->hw_features |= NETIF_F_RXFCS; 1079 netdev->hw_features |= NETIF_F_RXFCS;
1078 1080
1079 if (pci_using_dac) { 1081 if (pci_using_dac) {
@@ -1841,6 +1843,22 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1841 break; 1843 break;
1842 } 1844 }
1843 1845
1846 /* This is useful for sniffing bad packets. */
1847 if (adapter->netdev->features & NETIF_F_RXALL) {
1848 /* UPE and MPE will be handled by normal PROMISC logic
1849 * in e1000e_set_rx_mode */
1850 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1851 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1852 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1853
1854 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1855 E1000_RCTL_DPF | /* Allow filtered pause */
1856 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1857 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1858 * and that breaks VLANs.
1859 */
1860 }
1861
1844 ew32(RCTL, rctl); 1862 ew32(RCTL, rctl);
1845} 1863}
1846 1864
@@ -4057,6 +4075,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4057 irq_flags); 4075 irq_flags);
4058 length--; 4076 length--;
4059 } else { 4077 } else {
4078 if (netdev->features & NETIF_F_RXALL)
4079 goto process_skb;
4060 /* recycle both page and skb */ 4080 /* recycle both page and skb */
4061 buffer_info->skb = skb; 4081 buffer_info->skb = skb;
4062 /* an error means any chain goes out the window 4082 /* an error means any chain goes out the window
@@ -4069,6 +4089,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4069 } 4089 }
4070 4090
4071#define rxtop rx_ring->rx_skb_top 4091#define rxtop rx_ring->rx_skb_top
4092process_skb:
4072 if (!(status & E1000_RXD_STAT_EOP)) { 4093 if (!(status & E1000_RXD_STAT_EOP)) {
4073 /* this descriptor is only the beginning (or middle) */ 4094 /* this descriptor is only the beginning (or middle) */
4074 if (!rxtop) { 4095 if (!rxtop) {
@@ -4276,12 +4297,15 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4276 flags); 4297 flags);
4277 length--; 4298 length--;
4278 } else { 4299 } else {
4300 if (netdev->features & NETIF_F_RXALL)
4301 goto process_skb;
4279 /* recycle */ 4302 /* recycle */
4280 buffer_info->skb = skb; 4303 buffer_info->skb = skb;
4281 goto next_desc; 4304 goto next_desc;
4282 } 4305 }
4283 } 4306 }
4284 4307
4308process_skb:
4285 total_rx_bytes += (length - 4); /* don't count FCS */ 4309 total_rx_bytes += (length - 4); /* don't count FCS */
4286 total_rx_packets++; 4310 total_rx_packets++;
4287 4311
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index bac9dda31b6c..fbc84d415762 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -228,9 +228,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw)
228 /* FWSM register */ 228 /* FWSM register */
229 mac->has_fwsm = true; 229 mac->has_fwsm = true;
230 /* ARC supported; valid only if manageability features are enabled. */ 230 /* ARC supported; valid only if manageability features are enabled. */
231 mac->arc_subsystem_valid = 231 mac->arc_subsystem_valid = !!(er32(FWSM) & E1000_FWSM_MODE_MASK);
232 (er32(FWSM) & E1000_FWSM_MODE_MASK)
233 ? true : false;
234 /* Adaptive IFS not supported */ 232 /* Adaptive IFS not supported */
235 mac->adaptive_ifs = false; 233 mac->adaptive_ifs = false;
236 234
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index b3fdc6977f2e..609c18cb300a 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -295,9 +295,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
295 * ARC supported; valid only if manageability features are 295 * ARC supported; valid only if manageability features are
296 * enabled. 296 * enabled.
297 */ 297 */
298 mac->arc_subsystem_valid = 298 mac->arc_subsystem_valid = !!(er32(FWSM) &
299 (er32(FWSM) & E1000_FWSM_MODE_MASK) 299 E1000_FWSM_MODE_MASK);
300 ? true : false;
301 break; 300 break;
302 case e1000_82574: 301 case e1000_82574:
303 case e1000_82583: 302 case e1000_82583:
@@ -798,7 +797,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
798 /* Check for pending operations. */ 797 /* Check for pending operations. */
799 for (i = 0; i < E1000_FLASH_UPDATES; i++) { 798 for (i = 0; i < E1000_FLASH_UPDATES; i++) {
800 usleep_range(1000, 2000); 799 usleep_range(1000, 2000);
801 if ((er32(EECD) & E1000_EECD_FLUPD) == 0) 800 if (!(er32(EECD) & E1000_EECD_FLUPD))
802 break; 801 break;
803 } 802 }
804 803
@@ -822,7 +821,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
822 821
823 for (i = 0; i < E1000_FLASH_UPDATES; i++) { 822 for (i = 0; i < E1000_FLASH_UPDATES; i++) {
824 usleep_range(1000, 2000); 823 usleep_range(1000, 2000);
825 if ((er32(EECD) & E1000_EECD_FLUPD) == 0) 824 if (!(er32(EECD) & E1000_EECD_FLUPD))
826 break; 825 break;
827 } 826 }
828 827
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index db35dd5d96de..4f1edd9c22f1 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -259,8 +259,7 @@ static int e1000_set_settings(struct net_device *netdev,
259 * cannot be changed 259 * cannot be changed
260 */ 260 */
261 if (hw->phy.ops.check_reset_block(hw)) { 261 if (hw->phy.ops.check_reset_block(hw)) {
262 e_err("Cannot change link characteristics when SoL/IDER is " 262 e_err("Cannot change link characteristics when SoL/IDER is active.\n");
263 "active.\n");
264 return -EINVAL; 263 return -EINVAL;
265 } 264 }
266 265
@@ -403,15 +402,15 @@ static void e1000_get_regs(struct net_device *netdev,
403 regs_buff[1] = er32(STATUS); 402 regs_buff[1] = er32(STATUS);
404 403
405 regs_buff[2] = er32(RCTL); 404 regs_buff[2] = er32(RCTL);
406 regs_buff[3] = er32(RDLEN); 405 regs_buff[3] = er32(RDLEN(0));
407 regs_buff[4] = er32(RDH); 406 regs_buff[4] = er32(RDH(0));
408 regs_buff[5] = er32(RDT); 407 regs_buff[5] = er32(RDT(0));
409 regs_buff[6] = er32(RDTR); 408 regs_buff[6] = er32(RDTR);
410 409
411 regs_buff[7] = er32(TCTL); 410 regs_buff[7] = er32(TCTL);
412 regs_buff[8] = er32(TDLEN); 411 regs_buff[8] = er32(TDLEN(0));
413 regs_buff[9] = er32(TDH); 412 regs_buff[9] = er32(TDH(0));
414 regs_buff[10] = er32(TDT); 413 regs_buff[10] = er32(TDT(0));
415 regs_buff[11] = er32(TIDV); 414 regs_buff[11] = er32(TIDV);
416 415
417 regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ 416 regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
@@ -727,9 +726,8 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
727 (test[pat] & write)); 726 (test[pat] & write));
728 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); 727 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
729 if (val != (test[pat] & write & mask)) { 728 if (val != (test[pat] & write & mask)) {
730 e_err("pattern test reg %04X failed: got 0x%08X " 729 e_err("pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
731 "expected 0x%08X\n", reg + offset, val, 730 reg + offset, val, (test[pat] & write & mask));
732 (test[pat] & write & mask));
733 *data = reg; 731 *data = reg;
734 return 1; 732 return 1;
735 } 733 }
@@ -744,8 +742,8 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
744 __ew32(&adapter->hw, reg, write & mask); 742 __ew32(&adapter->hw, reg, write & mask);
745 val = __er32(&adapter->hw, reg); 743 val = __er32(&adapter->hw, reg);
746 if ((write & mask) != (val & mask)) { 744 if ((write & mask) != (val & mask)) {
747 e_err("set/check reg %04X test failed: got 0x%08X " 745 e_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
748 "expected 0x%08X\n", reg, (val & mask), (write & mask)); 746 reg, (val & mask), (write & mask));
749 *data = reg; 747 *data = reg;
750 return 1; 748 return 1;
751 } 749 }
@@ -797,8 +795,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
797 ew32(STATUS, toggle); 795 ew32(STATUS, toggle);
798 after = er32(STATUS) & toggle; 796 after = er32(STATUS) & toggle;
799 if (value != after) { 797 if (value != after) {
800 e_err("failed STATUS register test got: 0x%08X expected: " 798 e_err("failed STATUS register test got: 0x%08X expected: 0x%08X\n",
801 "0x%08X\n", after, value); 799 after, value);
802 *data = 1; 800 *data = 1;
803 return 1; 801 return 1;
804 } 802 }
@@ -813,15 +811,15 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
813 } 811 }
814 812
815 REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); 813 REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
816 REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); 814 REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
817 REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF); 815 REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF);
818 REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF); 816 REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF);
819 REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF); 817 REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF);
820 REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); 818 REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
821 REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); 819 REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
822 REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); 820 REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
823 REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); 821 REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
824 REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF); 822 REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF);
825 823
826 REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); 824 REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
827 825
@@ -830,10 +828,10 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
830 REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); 828 REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
831 829
832 REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); 830 REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
833 REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 831 REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
834 if (!(adapter->flags & FLAG_IS_ICH)) 832 if (!(adapter->flags & FLAG_IS_ICH))
835 REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); 833 REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
836 REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 834 REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
837 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); 835 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
838 mask = 0x8003FFFF; 836 mask = 0x8003FFFF;
839 switch (mac->type) { 837 switch (mac->type) {
@@ -1104,11 +1102,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1104 tx_ring->next_to_use = 0; 1102 tx_ring->next_to_use = 0;
1105 tx_ring->next_to_clean = 0; 1103 tx_ring->next_to_clean = 0;
1106 1104
1107 ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); 1105 ew32(TDBAL(0), ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1108 ew32(TDBAH, ((u64) tx_ring->dma >> 32)); 1106 ew32(TDBAH(0), ((u64) tx_ring->dma >> 32));
1109 ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc)); 1107 ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc));
1110 ew32(TDH, 0); 1108 ew32(TDH(0), 0);
1111 ew32(TDT, 0); 1109 ew32(TDT(0), 0);
1112 ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR | 1110 ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
1113 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | 1111 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1114 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); 1112 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
@@ -1168,11 +1166,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1168 rctl = er32(RCTL); 1166 rctl = er32(RCTL);
1169 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) 1167 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
1170 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1168 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1171 ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); 1169 ew32(RDBAL(0), ((u64) rx_ring->dma & 0xFFFFFFFF));
1172 ew32(RDBAH, ((u64) rx_ring->dma >> 32)); 1170 ew32(RDBAH(0), ((u64) rx_ring->dma >> 32));
1173 ew32(RDLEN, rx_ring->size); 1171 ew32(RDLEN(0), rx_ring->size);
1174 ew32(RDH, 0); 1172 ew32(RDH(0), 0);
1175 ew32(RDT, 0); 1173 ew32(RDT(0), 0);
1176 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | 1174 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
1177 E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | 1175 E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
1178 E1000_RCTL_SBP | E1000_RCTL_SECRC | 1176 E1000_RCTL_SBP | E1000_RCTL_SECRC |
@@ -1534,7 +1532,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1534 int ret_val = 0; 1532 int ret_val = 0;
1535 unsigned long time; 1533 unsigned long time;
1536 1534
1537 ew32(RDT, rx_ring->count - 1); 1535 ew32(RDT(0), rx_ring->count - 1);
1538 1536
1539 /* 1537 /*
1540 * Calculate the loop count based on the largest descriptor ring 1538 * Calculate the loop count based on the largest descriptor ring
@@ -1561,7 +1559,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1561 if (k == tx_ring->count) 1559 if (k == tx_ring->count)
1562 k = 0; 1560 k = 0;
1563 } 1561 }
1564 ew32(TDT, k); 1562 ew32(TDT(0), k);
1565 e1e_flush(); 1563 e1e_flush();
1566 msleep(200); 1564 msleep(200);
1567 time = jiffies; /* set the start time for the receive */ 1565 time = jiffies; /* set the start time for the receive */
@@ -1791,8 +1789,7 @@ static void e1000_get_wol(struct net_device *netdev,
1791 wol->supported &= ~WAKE_UCAST; 1789 wol->supported &= ~WAKE_UCAST;
1792 1790
1793 if (adapter->wol & E1000_WUFC_EX) 1791 if (adapter->wol & E1000_WUFC_EX)
1794 e_err("Interface does not support directed (unicast) " 1792 e_err("Interface does not support directed (unicast) frame wake-up packets\n");
1795 "frame wake-up packets\n");
1796 } 1793 }
1797 1794
1798 if (adapter->wol & E1000_WUFC_EX) 1795 if (adapter->wol & E1000_WUFC_EX)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index f82ecf536c8b..923d3fd6ce11 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -94,31 +94,40 @@ enum e1e_registers {
94 E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ 94 E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
95 E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ 95 E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
96 E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ 96 E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
97 E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */ 97/*
98 E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */ 98 * Convenience macros
99 E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */
100 E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */
101 E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */
102 E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
103 E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
104#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
105 E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
106
107/* Convenience macros
108 * 99 *
109 * Note: "_n" is the queue number of the register to be written to. 100 * Note: "_n" is the queue number of the register to be written to.
110 * 101 *
111 * Example usage: 102 * Example usage:
112 * E1000_RDBAL_REG(current_rx_queue) 103 * E1000_RDBAL(current_rx_queue)
113 *
114 */ 104 */
115#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8)) 105 E1000_RDBAL_BASE = 0x02800, /* Rx Descriptor Base Address Low - RW */
106#define E1000_RDBAL(_n) (E1000_RDBAL_BASE + (_n << 8))
107 E1000_RDBAH_BASE = 0x02804, /* Rx Descriptor Base Address High - RW */
108#define E1000_RDBAH(_n) (E1000_RDBAH_BASE + (_n << 8))
109 E1000_RDLEN_BASE = 0x02808, /* Rx Descriptor Length - RW */
110#define E1000_RDLEN(_n) (E1000_RDLEN_BASE + (_n << 8))
111 E1000_RDH_BASE = 0x02810, /* Rx Descriptor Head - RW */
112#define E1000_RDH(_n) (E1000_RDH_BASE + (_n << 8))
113 E1000_RDT_BASE = 0x02818, /* Rx Descriptor Tail - RW */
114#define E1000_RDT(_n) (E1000_RDT_BASE + (_n << 8))
115 E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
116 E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
117#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
118 E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
119
116 E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ 120 E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */
117 E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */ 121 E1000_TDBAL_BASE = 0x03800, /* Tx Descriptor Base Address Low - RW */
118 E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */ 122#define E1000_TDBAL(_n) (E1000_TDBAL_BASE + (_n << 8))
119 E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */ 123 E1000_TDBAH_BASE = 0x03804, /* Tx Descriptor Base Address High - RW */
120 E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */ 124#define E1000_TDBAH(_n) (E1000_TDBAH_BASE + (_n << 8))
121 E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */ 125 E1000_TDLEN_BASE = 0x03808, /* Tx Descriptor Length - RW */
126#define E1000_TDLEN(_n) (E1000_TDLEN_BASE + (_n << 8))
127 E1000_TDH_BASE = 0x03810, /* Tx Descriptor Head - RW */
128#define E1000_TDH(_n) (E1000_TDH_BASE + (_n << 8))
129 E1000_TDT_BASE = 0x03818, /* Tx Descriptor Tail - RW */
130#define E1000_TDT(_n) (E1000_TDT_BASE + (_n << 8))
122 E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */ 131 E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */
123 E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */ 132 E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
124#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8)) 133#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8))
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 64c76443a7aa..14af3e22d8d9 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1310,10 +1310,6 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1310 1310
1311 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) 1311 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1312 oem_reg |= HV_OEM_BITS_LPLU; 1312 oem_reg |= HV_OEM_BITS_LPLU;
1313
1314 /* Set Restart auto-neg to activate the bits */
1315 if (!hw->phy.ops.check_reset_block(hw))
1316 oem_reg |= HV_OEM_BITS_RESTART_AN;
1317 } else { 1313 } else {
1318 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | 1314 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
1319 E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) 1315 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
@@ -1324,6 +1320,11 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1324 oem_reg |= HV_OEM_BITS_LPLU; 1320 oem_reg |= HV_OEM_BITS_LPLU;
1325 } 1321 }
1326 1322
1323 /* Set Restart auto-neg to activate the bits */
1324 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
1325 !hw->phy.ops.check_reset_block(hw))
1326 oem_reg |= HV_OEM_BITS_RESTART_AN;
1327
1327 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); 1328 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1328 1329
1329release: 1330release:
@@ -2212,7 +2213,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2212 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2213 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2213 2214
2214 /* Check if the flash descriptor is valid */ 2215 /* Check if the flash descriptor is valid */
2215 if (hsfsts.hsf_status.fldesvalid == 0) { 2216 if (!hsfsts.hsf_status.fldesvalid) {
2216 e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n"); 2217 e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n");
2217 return -E1000_ERR_NVM; 2218 return -E1000_ERR_NVM;
2218 } 2219 }
@@ -2232,7 +2233,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2232 * completed. 2233 * completed.
2233 */ 2234 */
2234 2235
2235 if (hsfsts.hsf_status.flcinprog == 0) { 2236 if (!hsfsts.hsf_status.flcinprog) {
2236 /* 2237 /*
2237 * There is no cycle running at present, 2238 * There is no cycle running at present,
2238 * so we can start a cycle. 2239 * so we can start a cycle.
@@ -2250,7 +2251,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2250 */ 2251 */
2251 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { 2252 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2252 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2253 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2253 if (hsfsts.hsf_status.flcinprog == 0) { 2254 if (!hsfsts.hsf_status.flcinprog) {
2254 ret_val = 0; 2255 ret_val = 0;
2255 break; 2256 break;
2256 } 2257 }
@@ -2292,12 +2293,12 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2292 /* wait till FDONE bit is set to 1 */ 2293 /* wait till FDONE bit is set to 1 */
2293 do { 2294 do {
2294 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2295 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2295 if (hsfsts.hsf_status.flcdone == 1) 2296 if (hsfsts.hsf_status.flcdone)
2296 break; 2297 break;
2297 udelay(1); 2298 udelay(1);
2298 } while (i++ < timeout); 2299 } while (i++ < timeout);
2299 2300
2300 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) 2301 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
2301 return 0; 2302 return 0;
2302 2303
2303 return -E1000_ERR_NVM; 2304 return -E1000_ERR_NVM;
@@ -2408,10 +2409,10 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2408 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 2409 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2409 */ 2410 */
2410 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2411 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2411 if (hsfsts.hsf_status.flcerr == 1) { 2412 if (hsfsts.hsf_status.flcerr) {
2412 /* Repeat for some time before giving up. */ 2413 /* Repeat for some time before giving up. */
2413 continue; 2414 continue;
2414 } else if (hsfsts.hsf_status.flcdone == 0) { 2415 } else if (!hsfsts.hsf_status.flcdone) {
2415 e_dbg("Timeout error - flash cycle did not complete.\n"); 2416 e_dbg("Timeout error - flash cycle did not complete.\n");
2416 break; 2417 break;
2417 } 2418 }
@@ -2641,7 +2642,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2641 if (ret_val) 2642 if (ret_val)
2642 return ret_val; 2643 return ret_val;
2643 2644
2644 if ((data & 0x40) == 0) { 2645 if (!(data & 0x40)) {
2645 data |= 0x40; 2646 data |= 0x40;
2646 ret_val = e1000_write_nvm(hw, 0x19, 1, &data); 2647 ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
2647 if (ret_val) 2648 if (ret_val)
@@ -2759,10 +2760,10 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2759 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 2760 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2760 */ 2761 */
2761 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2762 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2762 if (hsfsts.hsf_status.flcerr == 1) 2763 if (hsfsts.hsf_status.flcerr)
2763 /* Repeat for some time before giving up. */ 2764 /* Repeat for some time before giving up. */
2764 continue; 2765 continue;
2765 if (hsfsts.hsf_status.flcdone == 0) { 2766 if (!hsfsts.hsf_status.flcdone) {
2766 e_dbg("Timeout error - flash cycle did not complete.\n"); 2767 e_dbg("Timeout error - flash cycle did not complete.\n");
2767 break; 2768 break;
2768 } 2769 }
@@ -2914,10 +2915,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2914 * a few more times else Done 2915 * a few more times else Done
2915 */ 2916 */
2916 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2917 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2917 if (hsfsts.hsf_status.flcerr == 1) 2918 if (hsfsts.hsf_status.flcerr)
2918 /* repeat for some time before giving up */ 2919 /* repeat for some time before giving up */
2919 continue; 2920 continue;
2920 else if (hsfsts.hsf_status.flcdone == 0) 2921 else if (!hsfsts.hsf_status.flcdone)
2921 return ret_val; 2922 return ret_val;
2922 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); 2923 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
2923 } 2924 }
@@ -3682,7 +3683,11 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3682 3683
3683 if (hw->mac.type >= e1000_pchlan) { 3684 if (hw->mac.type >= e1000_pchlan) {
3684 e1000_oem_bits_config_ich8lan(hw, false); 3685 e1000_oem_bits_config_ich8lan(hw, false);
3685 e1000_phy_hw_reset_ich8lan(hw); 3686
3687 /* Reset PHY to activate OEM bits on 82577/8 */
3688 if (hw->mac.type == e1000_pchlan)
3689 e1000e_phy_hw_reset_generic(hw);
3690
3686 ret_val = hw->phy.ops.acquire(hw); 3691 ret_val = hw->phy.ops.acquire(hw);
3687 if (ret_val) 3692 if (ret_val)
3688 return; 3693 return;
@@ -3916,7 +3921,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
3916 3921
3917 /* If EEPROM is not marked present, init the IGP 3 PHY manually */ 3922 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
3918 if (hw->mac.type <= e1000_ich9lan) { 3923 if (hw->mac.type <= e1000_ich9lan) {
3919 if (((er32(EECD) & E1000_EECD_PRES) == 0) && 3924 if (!(er32(EECD) & E1000_EECD_PRES) &&
3920 (hw->phy.type == e1000_phy_igp_3)) { 3925 (hw->phy.type == e1000_phy_igp_3)) {
3921 e1000e_phy_init_script_igp3(hw); 3926 e1000e_phy_init_script_igp3(hw);
3922 } 3927 }
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index decad98c1059..d8327499305f 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -681,7 +681,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
681 return ret_val; 681 return ret_val;
682 } 682 }
683 683
684 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) 684 if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
685 hw->fc.requested_mode = e1000_fc_none; 685 hw->fc.requested_mode = e1000_fc_none;
686 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) 686 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
687 hw->fc.requested_mode = e1000_fc_tx_pause; 687 hw->fc.requested_mode = e1000_fc_tx_pause;
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index 473f8e711510..bacc950fc684 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -85,7 +85,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
85 85
86 /* Check that the host interface is enabled. */ 86 /* Check that the host interface is enabled. */
87 hicr = er32(HICR); 87 hicr = er32(HICR);
88 if ((hicr & E1000_HICR_EN) == 0) { 88 if (!(hicr & E1000_HICR_EN)) {
89 e_dbg("E1000_HOST_EN bit disabled.\n"); 89 e_dbg("E1000_HOST_EN bit disabled.\n");
90 return -E1000_ERR_HOST_INTERFACE_COMMAND; 90 return -E1000_ERR_HOST_INTERFACE_COMMAND;
91 } 91 }
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 19ab2154802c..851f7937db29 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -56,7 +56,7 @@
56 56
57#define DRV_EXTRAVERSION "-k" 57#define DRV_EXTRAVERSION "-k"
58 58
59#define DRV_VERSION "1.9.5" DRV_EXTRAVERSION 59#define DRV_VERSION "1.10.6" DRV_EXTRAVERSION
60char e1000e_driver_name[] = "e1000e"; 60char e1000e_driver_name[] = "e1000e";
61const char e1000e_driver_version[] = DRV_VERSION; 61const char e1000e_driver_version[] = DRV_VERSION;
62 62
@@ -110,14 +110,14 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
110 110
111 /* Rx Registers */ 111 /* Rx Registers */
112 {E1000_RCTL, "RCTL"}, 112 {E1000_RCTL, "RCTL"},
113 {E1000_RDLEN, "RDLEN"}, 113 {E1000_RDLEN(0), "RDLEN"},
114 {E1000_RDH, "RDH"}, 114 {E1000_RDH(0), "RDH"},
115 {E1000_RDT, "RDT"}, 115 {E1000_RDT(0), "RDT"},
116 {E1000_RDTR, "RDTR"}, 116 {E1000_RDTR, "RDTR"},
117 {E1000_RXDCTL(0), "RXDCTL"}, 117 {E1000_RXDCTL(0), "RXDCTL"},
118 {E1000_ERT, "ERT"}, 118 {E1000_ERT, "ERT"},
119 {E1000_RDBAL, "RDBAL"}, 119 {E1000_RDBAL(0), "RDBAL"},
120 {E1000_RDBAH, "RDBAH"}, 120 {E1000_RDBAH(0), "RDBAH"},
121 {E1000_RDFH, "RDFH"}, 121 {E1000_RDFH, "RDFH"},
122 {E1000_RDFT, "RDFT"}, 122 {E1000_RDFT, "RDFT"},
123 {E1000_RDFHS, "RDFHS"}, 123 {E1000_RDFHS, "RDFHS"},
@@ -126,11 +126,11 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
126 126
127 /* Tx Registers */ 127 /* Tx Registers */
128 {E1000_TCTL, "TCTL"}, 128 {E1000_TCTL, "TCTL"},
129 {E1000_TDBAL, "TDBAL"}, 129 {E1000_TDBAL(0), "TDBAL"},
130 {E1000_TDBAH, "TDBAH"}, 130 {E1000_TDBAH(0), "TDBAH"},
131 {E1000_TDLEN, "TDLEN"}, 131 {E1000_TDLEN(0), "TDLEN"},
132 {E1000_TDH, "TDH"}, 132 {E1000_TDH(0), "TDH"},
133 {E1000_TDT, "TDT"}, 133 {E1000_TDT(0), "TDT"},
134 {E1000_TIDV, "TIDV"}, 134 {E1000_TIDV, "TIDV"},
135 {E1000_TXDCTL(0), "TXDCTL"}, 135 {E1000_TXDCTL(0), "TXDCTL"},
136 {E1000_TADV, "TADV"}, 136 {E1000_TADV, "TADV"},
@@ -1053,7 +1053,8 @@ static void e1000_print_hw_hang(struct work_struct *work)
1053 1053
1054 if (!adapter->tx_hang_recheck && 1054 if (!adapter->tx_hang_recheck &&
1055 (adapter->flags2 & FLAG2_DMA_BURST)) { 1055 (adapter->flags2 & FLAG2_DMA_BURST)) {
1056 /* May be block on write-back, flush and detect again 1056 /*
1057 * May be block on write-back, flush and detect again
1057 * flush pending descriptor writebacks to memory 1058 * flush pending descriptor writebacks to memory
1058 */ 1059 */
1059 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 1060 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
@@ -2530,33 +2531,31 @@ err:
2530} 2531}
2531 2532
2532/** 2533/**
2533 * e1000_clean - NAPI Rx polling callback 2534 * e1000e_poll - NAPI Rx polling callback
2534 * @napi: struct associated with this polling callback 2535 * @napi: struct associated with this polling callback
2535 * @budget: amount of packets driver is allowed to process this poll 2536 * @weight: number of packets driver is allowed to process this poll
2536 **/ 2537 **/
2537static int e1000_clean(struct napi_struct *napi, int budget) 2538static int e1000e_poll(struct napi_struct *napi, int weight)
2538{ 2539{
2539 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 2540 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
2541 napi);
2540 struct e1000_hw *hw = &adapter->hw; 2542 struct e1000_hw *hw = &adapter->hw;
2541 struct net_device *poll_dev = adapter->netdev; 2543 struct net_device *poll_dev = adapter->netdev;
2542 int tx_cleaned = 1, work_done = 0; 2544 int tx_cleaned = 1, work_done = 0;
2543 2545
2544 adapter = netdev_priv(poll_dev); 2546 adapter = netdev_priv(poll_dev);
2545 2547
2546 if (adapter->msix_entries && 2548 if (!adapter->msix_entries ||
2547 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) 2549 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2548 goto clean_rx; 2550 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2549
2550 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2551 2551
2552clean_rx: 2552 adapter->clean_rx(adapter->rx_ring, &work_done, weight);
2553 adapter->clean_rx(adapter->rx_ring, &work_done, budget);
2554 2553
2555 if (!tx_cleaned) 2554 if (!tx_cleaned)
2556 work_done = budget; 2555 work_done = weight;
2557 2556
2558 /* If budget not fully consumed, exit the polling mode */ 2557 /* If weight not fully consumed, exit the polling mode */
2559 if (work_done < budget) { 2558 if (work_done < weight) {
2560 if (adapter->itr_setting & 3) 2559 if (adapter->itr_setting & 3)
2561 e1000_set_itr(adapter); 2560 e1000_set_itr(adapter);
2562 napi_complete(napi); 2561 napi_complete(napi);
@@ -2800,13 +2799,13 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2800 /* Setup the HW Tx Head and Tail descriptor pointers */ 2799 /* Setup the HW Tx Head and Tail descriptor pointers */
2801 tdba = tx_ring->dma; 2800 tdba = tx_ring->dma;
2802 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); 2801 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2803 ew32(TDBAL, (tdba & DMA_BIT_MASK(32))); 2802 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
2804 ew32(TDBAH, (tdba >> 32)); 2803 ew32(TDBAH(0), (tdba >> 32));
2805 ew32(TDLEN, tdlen); 2804 ew32(TDLEN(0), tdlen);
2806 ew32(TDH, 0); 2805 ew32(TDH(0), 0);
2807 ew32(TDT, 0); 2806 ew32(TDT(0), 0);
2808 tx_ring->head = adapter->hw.hw_addr + E1000_TDH; 2807 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
2809 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT; 2808 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
2810 2809
2811 /* Set the Tx Interrupt Delay register */ 2810 /* Set the Tx Interrupt Delay register */
2812 ew32(TIDV, adapter->tx_int_delay); 2811 ew32(TIDV, adapter->tx_int_delay);
@@ -3110,13 +3109,13 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
3110 * the Base and Length of the Rx Descriptor Ring 3109 * the Base and Length of the Rx Descriptor Ring
3111 */ 3110 */
3112 rdba = rx_ring->dma; 3111 rdba = rx_ring->dma;
3113 ew32(RDBAL, (rdba & DMA_BIT_MASK(32))); 3112 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
3114 ew32(RDBAH, (rdba >> 32)); 3113 ew32(RDBAH(0), (rdba >> 32));
3115 ew32(RDLEN, rdlen); 3114 ew32(RDLEN(0), rdlen);
3116 ew32(RDH, 0); 3115 ew32(RDH(0), 0);
3117 ew32(RDT, 0); 3116 ew32(RDT(0), 0);
3118 rx_ring->head = adapter->hw.hw_addr + E1000_RDH; 3117 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
3119 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT; 3118 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
3120 3119
3121 /* Enable Receive Checksum Offload for TCP and UDP */ 3120 /* Enable Receive Checksum Offload for TCP and UDP */
3122 rxcsum = er32(RXCSUM); 3121 rxcsum = er32(RXCSUM);
@@ -6226,7 +6225,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6226 netdev->netdev_ops = &e1000e_netdev_ops; 6225 netdev->netdev_ops = &e1000e_netdev_ops;
6227 e1000e_set_ethtool_ops(netdev); 6226 e1000e_set_ethtool_ops(netdev);
6228 netdev->watchdog_timeo = 5 * HZ; 6227 netdev->watchdog_timeo = 5 * HZ;
6229 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 6228 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
6230 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); 6229 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
6231 6230
6232 netdev->mem_start = mmio_start; 6231 netdev->mem_start = mmio_start;
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index ff796e42c3eb..feb6eebb0021 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -166,8 +166,8 @@ E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lea
166 * 166 *
167 * Default Value: 1 (enabled) 167 * Default Value: 1 (enabled)
168 */ 168 */
169E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \ 169E1000_PARAM(CrcStripping,
170 "the CRC"); 170 "Enable CRC Stripping, disable if your BMC needs the CRC");
171 171
172struct e1000_option { 172struct e1000_option {
173 enum { enable_option, range_option, list_option } type; 173 enum { enable_option, range_option, list_option } type;
@@ -360,8 +360,8 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
360 adapter->itr = 20000; 360 adapter->itr = 20000;
361 break; 361 break;
362 case 4: 362 case 4:
363 e_info("%s set to simplified (2000-8000 ints) " 363 e_info("%s set to simplified (2000-8000 ints) mode\n",
364 "mode\n", opt.name); 364 opt.name);
365 adapter->itr_setting = 4; 365 adapter->itr_setting = 4;
366 break; 366 break;
367 default: 367 default:
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 35b45578c604..bd5ef64b3003 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -718,7 +718,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
718 * 1 - Enabled 718 * 1 - Enabled
719 */ 719 */
720 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; 720 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
721 if (phy->disable_polarity_correction == 1) 721 if (phy->disable_polarity_correction)
722 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 722 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
723 723
724 /* Enable downshift on BM (disabled by default) */ 724 /* Enable downshift on BM (disabled by default) */
@@ -1090,7 +1090,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1090 * If autoneg_advertised is zero, we assume it was not defaulted 1090 * If autoneg_advertised is zero, we assume it was not defaulted
1091 * by the calling code so we set to advertise full capability. 1091 * by the calling code so we set to advertise full capability.
1092 */ 1092 */
1093 if (phy->autoneg_advertised == 0) 1093 if (!phy->autoneg_advertised)
1094 phy->autoneg_advertised = phy->autoneg_mask; 1094 phy->autoneg_advertised = phy->autoneg_mask;
1095 1095
1096 e_dbg("Reconfiguring auto-neg advertisement params\n"); 1096 e_dbg("Reconfiguring auto-neg advertisement params\n");
@@ -1596,7 +1596,7 @@ s32 e1000e_check_downshift(struct e1000_hw *hw)
1596 ret_val = e1e_rphy(hw, offset, &phy_data); 1596 ret_val = e1e_rphy(hw, offset, &phy_data);
1597 1597
1598 if (!ret_val) 1598 if (!ret_val)
1599 phy->speed_downgraded = (phy_data & mask); 1599 phy->speed_downgraded = !!(phy_data & mask);
1600 1600
1601 return ret_val; 1601 return ret_val;
1602} 1602}
@@ -1925,8 +1925,8 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
1925 if (ret_val) 1925 if (ret_val)
1926 return ret_val; 1926 return ret_val;
1927 1927
1928 phy->polarity_correction = (phy_data & 1928 phy->polarity_correction = !!(phy_data &
1929 M88E1000_PSCR_POLARITY_REVERSAL); 1929 M88E1000_PSCR_POLARITY_REVERSAL);
1930 1930
1931 ret_val = e1000_check_polarity_m88(hw); 1931 ret_val = e1000_check_polarity_m88(hw);
1932 if (ret_val) 1932 if (ret_val)
@@ -1936,7 +1936,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
1936 if (ret_val) 1936 if (ret_val)
1937 return ret_val; 1937 return ret_val;
1938 1938
1939 phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX); 1939 phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX);
1940 1940
1941 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { 1941 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
1942 ret_val = e1000_get_cable_length(hw); 1942 ret_val = e1000_get_cable_length(hw);
@@ -1999,7 +1999,7 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw)
1999 if (ret_val) 1999 if (ret_val)
2000 return ret_val; 2000 return ret_val;
2001 2001
2002 phy->is_mdix = (data & IGP01E1000_PSSR_MDIX); 2002 phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX);
2003 2003
2004 if ((data & IGP01E1000_PSSR_SPEED_MASK) == 2004 if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
2005 IGP01E1000_PSSR_SPEED_1000MBPS) { 2005 IGP01E1000_PSSR_SPEED_1000MBPS) {
@@ -2052,8 +2052,7 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
2052 ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); 2052 ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data);
2053 if (ret_val) 2053 if (ret_val)
2054 return ret_val; 2054 return ret_val;
2055 phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) 2055 phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE);
2056 ? false : true;
2057 2056
2058 if (phy->polarity_correction) { 2057 if (phy->polarity_correction) {
2059 ret_val = e1000_check_polarity_ife(hw); 2058 ret_val = e1000_check_polarity_ife(hw);
@@ -2070,7 +2069,7 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
2070 if (ret_val) 2069 if (ret_val)
2071 return ret_val; 2070 return ret_val;
2072 2071
2073 phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false; 2072 phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS);
2074 2073
2075 /* The following parameters are undefined for 10/100 operation. */ 2074 /* The following parameters are undefined for 10/100 operation. */
2076 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; 2075 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
@@ -2979,7 +2978,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2979 if ((hw->phy.type == e1000_phy_82578) && 2978 if ((hw->phy.type == e1000_phy_82578) &&
2980 (hw->phy.revision >= 1) && 2979 (hw->phy.revision >= 1) &&
2981 (hw->phy.addr == 2) && 2980 (hw->phy.addr == 2) &&
2982 ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) { 2981 !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
2983 u16 data2 = 0x7EFF; 2982 u16 data2 = 0x7EFF;
2984 ret_val = e1000_access_phy_debug_regs_hv(hw, 2983 ret_val = e1000_access_phy_debug_regs_hv(hw,
2985 (1 << 6) | 0x3, 2984 (1 << 6) | 0x3,
@@ -3265,7 +3264,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
3265 if (ret_val) 3264 if (ret_val)
3266 return ret_val; 3265 return ret_val;
3267 3266
3268 phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false; 3267 phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX);
3269 3268
3270 if ((data & I82577_PHY_STATUS2_SPEED_MASK) == 3269 if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
3271 I82577_PHY_STATUS2_SPEED_1000MBPS) { 3270 I82577_PHY_STATUS2_SPEED_1000MBPS) {
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 6565c463185c..4bd16e266414 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -35,3 +35,4 @@ obj-$(CONFIG_IGB) += igb.o
35igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ 35igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
36 e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o 36 e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o
37 37
38igb-$(CONFIG_IGB_PTP) += igb_ptp.o
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 8e33bdd33eea..3758ad246742 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -35,8 +35,8 @@
35#include "e1000_82575.h" 35#include "e1000_82575.h"
36 36
37#include <linux/clocksource.h> 37#include <linux/clocksource.h>
38#include <linux/timecompare.h>
39#include <linux/net_tstamp.h> 38#include <linux/net_tstamp.h>
39#include <linux/ptp_clock_kernel.h>
40#include <linux/bitops.h> 40#include <linux/bitops.h>
41#include <linux/if_vlan.h> 41#include <linux/if_vlan.h>
42 42
@@ -328,9 +328,6 @@ struct igb_adapter {
328 328
329 /* OS defined structs */ 329 /* OS defined structs */
330 struct pci_dev *pdev; 330 struct pci_dev *pdev;
331 struct cyclecounter cycles;
332 struct timecounter clock;
333 struct timecompare compare;
334 struct hwtstamp_config hwtstamp_config; 331 struct hwtstamp_config hwtstamp_config;
335 332
336 spinlock_t stats64_lock; 333 spinlock_t stats64_lock;
@@ -364,6 +361,13 @@ struct igb_adapter {
364 u32 wvbr; 361 u32 wvbr;
365 int node; 362 int node;
366 u32 *shadow_vfta; 363 u32 *shadow_vfta;
364
365 struct ptp_clock *ptp_clock;
366 struct ptp_clock_info caps;
367 struct delayed_work overflow_work;
368 spinlock_t tmreg_lock;
369 struct cyclecounter cc;
370 struct timecounter tc;
367}; 371};
368 372
369#define IGB_FLAG_HAS_MSI (1 << 0) 373#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -378,7 +382,6 @@ struct igb_adapter {
378#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ 382#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
379 383
380#define IGB_82576_TSYNC_SHIFT 19 384#define IGB_82576_TSYNC_SHIFT 19
381#define IGB_82580_TSYNC_SHIFT 24
382#define IGB_TS_HDR_LEN 16 385#define IGB_TS_HDR_LEN 16
383enum e1000_state_t { 386enum e1000_state_t {
384 __IGB_TESTING, 387 __IGB_TESTING,
@@ -414,7 +417,15 @@ extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
414extern bool igb_has_link(struct igb_adapter *adapter); 417extern bool igb_has_link(struct igb_adapter *adapter);
415extern void igb_set_ethtool_ops(struct net_device *); 418extern void igb_set_ethtool_ops(struct net_device *);
416extern void igb_power_up_link(struct igb_adapter *); 419extern void igb_power_up_link(struct igb_adapter *);
420#ifdef CONFIG_IGB_PTP
421extern void igb_ptp_init(struct igb_adapter *adapter);
422extern void igb_ptp_remove(struct igb_adapter *adapter);
423
424extern void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
425 struct skb_shared_hwtstamps *hwtstamps,
426 u64 systim);
417 427
428#endif
418static inline s32 igb_reset_phy(struct e1000_hw *hw) 429static inline s32 igb_reset_phy(struct e1000_hw *hw)
419{ 430{
420 if (hw->phy.ops.reset) 431 if (hw->phy.ops.reset)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 5ec31598ee47..28a37bb00c90 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -60,8 +60,8 @@
60#include "igb.h" 60#include "igb.h"
61 61
62#define MAJ 3 62#define MAJ 3
63#define MIN 2 63#define MIN 4
64#define BUILD 10 64#define BUILD 7
65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
66__stringify(BUILD) "-k" 66__stringify(BUILD) "-k"
67char igb_driver_name[] = "igb"; 67char igb_driver_name[] = "igb";
@@ -114,7 +114,6 @@ static void igb_free_all_rx_resources(struct igb_adapter *);
114static void igb_setup_mrqc(struct igb_adapter *); 114static void igb_setup_mrqc(struct igb_adapter *);
115static int igb_probe(struct pci_dev *, const struct pci_device_id *); 115static int igb_probe(struct pci_dev *, const struct pci_device_id *);
116static void __devexit igb_remove(struct pci_dev *pdev); 116static void __devexit igb_remove(struct pci_dev *pdev);
117static void igb_init_hw_timer(struct igb_adapter *adapter);
118static int igb_sw_init(struct igb_adapter *); 117static int igb_sw_init(struct igb_adapter *);
119static int igb_open(struct net_device *); 118static int igb_open(struct net_device *);
120static int igb_close(struct net_device *); 119static int igb_close(struct net_device *);
@@ -565,33 +564,6 @@ exit:
565 return; 564 return;
566} 565}
567 566
568
569/**
570 * igb_read_clock - read raw cycle counter (to be used by time counter)
571 */
572static cycle_t igb_read_clock(const struct cyclecounter *tc)
573{
574 struct igb_adapter *adapter =
575 container_of(tc, struct igb_adapter, cycles);
576 struct e1000_hw *hw = &adapter->hw;
577 u64 stamp = 0;
578 int shift = 0;
579
580 /*
581 * The timestamp latches on lowest register read. For the 82580
582 * the lowest register is SYSTIMR instead of SYSTIML. However we never
583 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
584 */
585 if (hw->mac.type >= e1000_82580) {
586 stamp = rd32(E1000_SYSTIMR) >> 8;
587 shift = IGB_82580_TSYNC_SHIFT;
588 }
589
590 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
591 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
592 return stamp;
593}
594
595/** 567/**
596 * igb_get_hw_dev - return device 568 * igb_get_hw_dev - return device
597 * used by hardware layer to print debugging information 569 * used by hardware layer to print debugging information
@@ -2110,9 +2082,11 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2110 } 2082 }
2111 2083
2112#endif 2084#endif
2085#ifdef CONFIG_IGB_PTP
2113 /* do hw tstamp init after resetting */ 2086 /* do hw tstamp init after resetting */
2114 igb_init_hw_timer(adapter); 2087 igb_ptp_init(adapter);
2115 2088
2089#endif
2116 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 2090 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2117 /* print bus type/speed/width info */ 2091 /* print bus type/speed/width info */
2118 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 2092 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -2184,7 +2158,10 @@ static void __devexit igb_remove(struct pci_dev *pdev)
2184 struct e1000_hw *hw = &adapter->hw; 2158 struct e1000_hw *hw = &adapter->hw;
2185 2159
2186 pm_runtime_get_noresume(&pdev->dev); 2160 pm_runtime_get_noresume(&pdev->dev);
2161#ifdef CONFIG_IGB_PTP
2162 igb_ptp_remove(adapter);
2187 2163
2164#endif
2188 /* 2165 /*
2189 * The watchdog timer may be rescheduled, so explicitly 2166 * The watchdog timer may be rescheduled, so explicitly
2190 * disable watchdog from being rescheduled. 2167 * disable watchdog from being rescheduled.
@@ -2304,112 +2281,6 @@ out:
2304} 2281}
2305 2282
2306/** 2283/**
2307 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2308 * @adapter: board private structure to initialize
2309 *
2310 * igb_init_hw_timer initializes the function pointer and values for the hw
2311 * timer found in hardware.
2312 **/
2313static void igb_init_hw_timer(struct igb_adapter *adapter)
2314{
2315 struct e1000_hw *hw = &adapter->hw;
2316
2317 switch (hw->mac.type) {
2318 case e1000_i350:
2319 case e1000_82580:
2320 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2321 adapter->cycles.read = igb_read_clock;
2322 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2323 adapter->cycles.mult = 1;
2324 /*
2325 * The 82580 timesync updates the system timer every 8ns by 8ns
2326 * and the value cannot be shifted. Instead we need to shift
2327 * the registers to generate a 64bit timer value. As a result
2328 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2329 * 24 in order to generate a larger value for synchronization.
2330 */
2331 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2332 /* disable system timer temporarily by setting bit 31 */
2333 wr32(E1000_TSAUXC, 0x80000000);
2334 wrfl();
2335
2336 /* Set registers so that rollover occurs soon to test this. */
2337 wr32(E1000_SYSTIMR, 0x00000000);
2338 wr32(E1000_SYSTIML, 0x80000000);
2339 wr32(E1000_SYSTIMH, 0x000000FF);
2340 wrfl();
2341
2342 /* enable system timer by clearing bit 31 */
2343 wr32(E1000_TSAUXC, 0x0);
2344 wrfl();
2345
2346 timecounter_init(&adapter->clock,
2347 &adapter->cycles,
2348 ktime_to_ns(ktime_get_real()));
2349 /*
2350 * Synchronize our NIC clock against system wall clock. NIC
2351 * time stamp reading requires ~3us per sample, each sample
2352 * was pretty stable even under load => only require 10
2353 * samples for each offset comparison.
2354 */
2355 memset(&adapter->compare, 0, sizeof(adapter->compare));
2356 adapter->compare.source = &adapter->clock;
2357 adapter->compare.target = ktime_get_real;
2358 adapter->compare.num_samples = 10;
2359 timecompare_update(&adapter->compare, 0);
2360 break;
2361 case e1000_82576:
2362 /*
2363 * Initialize hardware timer: we keep it running just in case
2364 * that some program needs it later on.
2365 */
2366 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2367 adapter->cycles.read = igb_read_clock;
2368 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2369 adapter->cycles.mult = 1;
2370 /**
2371 * Scale the NIC clock cycle by a large factor so that
2372 * relatively small clock corrections can be added or
2373 * subtracted at each clock tick. The drawbacks of a large
2374 * factor are a) that the clock register overflows more quickly
2375 * (not such a big deal) and b) that the increment per tick has
2376 * to fit into 24 bits. As a result we need to use a shift of
2377 * 19 so we can fit a value of 16 into the TIMINCA register.
2378 */
2379 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2380 wr32(E1000_TIMINCA,
2381 (1 << E1000_TIMINCA_16NS_SHIFT) |
2382 (16 << IGB_82576_TSYNC_SHIFT));
2383
2384 /* Set registers so that rollover occurs soon to test this. */
2385 wr32(E1000_SYSTIML, 0x00000000);
2386 wr32(E1000_SYSTIMH, 0xFF800000);
2387 wrfl();
2388
2389 timecounter_init(&adapter->clock,
2390 &adapter->cycles,
2391 ktime_to_ns(ktime_get_real()));
2392 /*
2393 * Synchronize our NIC clock against system wall clock. NIC
2394 * time stamp reading requires ~3us per sample, each sample
2395 * was pretty stable even under load => only require 10
2396 * samples for each offset comparison.
2397 */
2398 memset(&adapter->compare, 0, sizeof(adapter->compare));
2399 adapter->compare.source = &adapter->clock;
2400 adapter->compare.target = ktime_get_real;
2401 adapter->compare.num_samples = 10;
2402 timecompare_update(&adapter->compare, 0);
2403 break;
2404 case e1000_82575:
2405 /* 82575 does not support timesync */
2406 default:
2407 break;
2408 }
2409
2410}
2411
2412/**
2413 * igb_sw_init - Initialize general software structures (struct igb_adapter) 2284 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2414 * @adapter: board private structure to initialize 2285 * @adapter: board private structure to initialize
2415 * 2286 *
@@ -5718,35 +5589,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
5718 return 0; 5589 return 0;
5719} 5590}
5720 5591
5721/** 5592#ifdef CONFIG_IGB_PTP
5722 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
5723 * @adapter: board private structure
5724 * @shhwtstamps: timestamp structure to update
5725 * @regval: unsigned 64bit system time value.
5726 *
5727 * We need to convert the system time value stored in the RX/TXSTMP registers
5728 * into a hwtstamp which can be used by the upper level timestamping functions
5729 */
5730static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5731 struct skb_shared_hwtstamps *shhwtstamps,
5732 u64 regval)
5733{
5734 u64 ns;
5735
5736 /*
5737 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5738 * 24 to match clock shift we setup earlier.
5739 */
5740 if (adapter->hw.mac.type >= e1000_82580)
5741 regval <<= IGB_82580_TSYNC_SHIFT;
5742
5743 ns = timecounter_cyc2time(&adapter->clock, regval);
5744 timecompare_update(&adapter->compare, ns);
5745 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5746 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5747 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5748}
5749
5750/** 5593/**
5751 * igb_tx_hwtstamp - utility function which checks for TX time stamp 5594 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5752 * @q_vector: pointer to q_vector containing needed info 5595 * @q_vector: pointer to q_vector containing needed info
@@ -5776,6 +5619,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5776 skb_tstamp_tx(buffer_info->skb, &shhwtstamps); 5619 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
5777} 5620}
5778 5621
5622#endif
5779/** 5623/**
5780 * igb_clean_tx_irq - Reclaim resources after transmit completes 5624 * igb_clean_tx_irq - Reclaim resources after transmit completes
5781 * @q_vector: pointer to q_vector containing needed info 5625 * @q_vector: pointer to q_vector containing needed info
@@ -5819,9 +5663,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5819 total_bytes += tx_buffer->bytecount; 5663 total_bytes += tx_buffer->bytecount;
5820 total_packets += tx_buffer->gso_segs; 5664 total_packets += tx_buffer->gso_segs;
5821 5665
5666#ifdef CONFIG_IGB_PTP
5822 /* retrieve hardware timestamp */ 5667 /* retrieve hardware timestamp */
5823 igb_tx_hwtstamp(q_vector, tx_buffer); 5668 igb_tx_hwtstamp(q_vector, tx_buffer);
5824 5669
5670#endif
5825 /* free the skb */ 5671 /* free the skb */
5826 dev_kfree_skb_any(tx_buffer->skb); 5672 dev_kfree_skb_any(tx_buffer->skb);
5827 tx_buffer->skb = NULL; 5673 tx_buffer->skb = NULL;
@@ -5993,6 +5839,7 @@ static inline void igb_rx_hash(struct igb_ring *ring,
5993 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 5839 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5994} 5840}
5995 5841
5842#ifdef CONFIG_IGB_PTP
5996static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, 5843static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
5997 union e1000_adv_rx_desc *rx_desc, 5844 union e1000_adv_rx_desc *rx_desc,
5998 struct sk_buff *skb) 5845 struct sk_buff *skb)
@@ -6032,6 +5879,7 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
6032 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 5879 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
6033} 5880}
6034 5881
5882#endif
6035static void igb_rx_vlan(struct igb_ring *ring, 5883static void igb_rx_vlan(struct igb_ring *ring,
6036 union e1000_adv_rx_desc *rx_desc, 5884 union e1000_adv_rx_desc *rx_desc,
6037 struct sk_buff *skb) 5885 struct sk_buff *skb)
@@ -6142,7 +5990,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
6142 goto next_desc; 5990 goto next_desc;
6143 } 5991 }
6144 5992
5993#ifdef CONFIG_IGB_PTP
6145 igb_rx_hwtstamp(q_vector, rx_desc, skb); 5994 igb_rx_hwtstamp(q_vector, rx_desc, skb);
5995#endif
6146 igb_rx_hash(rx_ring, rx_desc, skb); 5996 igb_rx_hash(rx_ring, rx_desc, skb);
6147 igb_rx_checksum(rx_ring, rx_desc, skb); 5997 igb_rx_checksum(rx_ring, rx_desc, skb);
6148 igb_rx_vlan(rx_ring, rx_desc, skb); 5998 igb_rx_vlan(rx_ring, rx_desc, skb);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
new file mode 100644
index 000000000000..c9b71c5bc475
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -0,0 +1,381 @@
1/*
2 * PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580
3 *
4 * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20#include <linux/module.h>
21#include <linux/device.h>
22#include <linux/pci.h>
23
24#include "igb.h"
25
26#define INCVALUE_MASK 0x7fffffff
27#define ISGN 0x80000000
28
29/*
30 * The 82580 timesync updates the system timer every 8ns by 8ns,
31 * and this update value cannot be reprogrammed.
32 *
33 * Neither the 82576 nor the 82580 offer registers wide enough to hold
34 * nanoseconds time values for very long. For the 82580, SYSTIM always
35 * counts nanoseconds, but the upper 24 bits are not availible. The
36 * frequency is adjusted by changing the 32 bit fractional nanoseconds
37 * register, TIMINCA.
38 *
39 * For the 82576, the SYSTIM register time unit is affect by the
40 * choice of the 24 bit TININCA:IV (incvalue) field. Five bits of this
41 * field are needed to provide the nominal 16 nanosecond period,
42 * leaving 19 bits for fractional nanoseconds.
43 *
44 * We scale the NIC clock cycle by a large factor so that relatively
45 * small clock corrections can be added or subtracted at each clock
46 * tick. The drawbacks of a large factor are a) that the clock
47 * register overflows more quickly (not such a big deal) and b) that
48 * the increment per tick has to fit into 24 bits. As a result we
49 * need to use a shift of 19 so we can fit a value of 16 into the
50 * TIMINCA register.
51 *
52 *
53 * SYSTIMH SYSTIML
54 * +--------------+ +---+---+------+
55 * 82576 | 32 | | 8 | 5 | 19 |
56 * +--------------+ +---+---+------+
57 * \________ 45 bits _______/ fract
58 *
59 * +----------+---+ +--------------+
60 * 82580 | 24 | 8 | | 32 |
61 * +----------+---+ +--------------+
62 * reserved \______ 40 bits _____/
63 *
64 *
65 * The 45 bit 82576 SYSTIM overflows every
66 * 2^45 * 10^-9 / 3600 = 9.77 hours.
67 *
68 * The 40 bit 82580 SYSTIM overflows every
69 * 2^40 * 10^-9 / 60 = 18.3 minutes.
70 */
71
72#define IGB_OVERFLOW_PERIOD (HZ * 60 * 9)
73#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
74#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
75#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
76#define IGB_NBITS_82580 40
77
78/*
79 * SYSTIM read access for the 82576
80 */
81
82static cycle_t igb_82576_systim_read(const struct cyclecounter *cc)
83{
84 u64 val;
85 u32 lo, hi;
86 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
87 struct e1000_hw *hw = &igb->hw;
88
89 lo = rd32(E1000_SYSTIML);
90 hi = rd32(E1000_SYSTIMH);
91
92 val = ((u64) hi) << 32;
93 val |= lo;
94
95 return val;
96}
97
98/*
99 * SYSTIM read access for the 82580
100 */
101
102static cycle_t igb_82580_systim_read(const struct cyclecounter *cc)
103{
104 u64 val;
105 u32 lo, hi, jk;
106 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
107 struct e1000_hw *hw = &igb->hw;
108
109 /*
110 * The timestamp latches on lowest register read. For the 82580
111 * the lowest register is SYSTIMR instead of SYSTIML. However we only
112 * need to provide nanosecond resolution, so we just ignore it.
113 */
114 jk = rd32(E1000_SYSTIMR);
115 lo = rd32(E1000_SYSTIML);
116 hi = rd32(E1000_SYSTIMH);
117
118 val = ((u64) hi) << 32;
119 val |= lo;
120
121 return val;
122}
123
124/*
125 * PTP clock operations
126 */
127
128static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
129{
130 u64 rate;
131 u32 incvalue;
132 int neg_adj = 0;
133 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
134 struct e1000_hw *hw = &igb->hw;
135
136 if (ppb < 0) {
137 neg_adj = 1;
138 ppb = -ppb;
139 }
140 rate = ppb;
141 rate <<= 14;
142 rate = div_u64(rate, 1953125);
143
144 incvalue = 16 << IGB_82576_TSYNC_SHIFT;
145
146 if (neg_adj)
147 incvalue -= rate;
148 else
149 incvalue += rate;
150
151 wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK));
152
153 return 0;
154}
155
156static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
157{
158 u64 rate;
159 u32 inca;
160 int neg_adj = 0;
161 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
162 struct e1000_hw *hw = &igb->hw;
163
164 if (ppb < 0) {
165 neg_adj = 1;
166 ppb = -ppb;
167 }
168 rate = ppb;
169 rate <<= 26;
170 rate = div_u64(rate, 1953125);
171
172 inca = rate & INCVALUE_MASK;
173 if (neg_adj)
174 inca |= ISGN;
175
176 wr32(E1000_TIMINCA, inca);
177
178 return 0;
179}
180
181static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta)
182{
183 s64 now;
184 unsigned long flags;
185 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
186
187 spin_lock_irqsave(&igb->tmreg_lock, flags);
188
189 now = timecounter_read(&igb->tc);
190 now += delta;
191 timecounter_init(&igb->tc, &igb->cc, now);
192
193 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
194
195 return 0;
196}
197
198static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
199{
200 u64 ns;
201 u32 remainder;
202 unsigned long flags;
203 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
204
205 spin_lock_irqsave(&igb->tmreg_lock, flags);
206
207 ns = timecounter_read(&igb->tc);
208
209 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
210
211 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
212 ts->tv_nsec = remainder;
213
214 return 0;
215}
216
217static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts)
218{
219 u64 ns;
220 unsigned long flags;
221 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
222
223 ns = ts->tv_sec * 1000000000ULL;
224 ns += ts->tv_nsec;
225
226 spin_lock_irqsave(&igb->tmreg_lock, flags);
227
228 timecounter_init(&igb->tc, &igb->cc, ns);
229
230 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
231
232 return 0;
233}
234
235static int ptp_82576_enable(struct ptp_clock_info *ptp,
236 struct ptp_clock_request *rq, int on)
237{
238 return -EOPNOTSUPP;
239}
240
241static int ptp_82580_enable(struct ptp_clock_info *ptp,
242 struct ptp_clock_request *rq, int on)
243{
244 return -EOPNOTSUPP;
245}
246
247static void igb_overflow_check(struct work_struct *work)
248{
249 struct timespec ts;
250 struct igb_adapter *igb =
251 container_of(work, struct igb_adapter, overflow_work.work);
252
253 igb_gettime(&igb->caps, &ts);
254
255 pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
256
257 schedule_delayed_work(&igb->overflow_work, IGB_OVERFLOW_PERIOD);
258}
259
260void igb_ptp_init(struct igb_adapter *adapter)
261{
262 struct e1000_hw *hw = &adapter->hw;
263
264 switch (hw->mac.type) {
265 case e1000_i350:
266 case e1000_82580:
267 adapter->caps.owner = THIS_MODULE;
268 strcpy(adapter->caps.name, "igb-82580");
269 adapter->caps.max_adj = 62499999;
270 adapter->caps.n_ext_ts = 0;
271 adapter->caps.pps = 0;
272 adapter->caps.adjfreq = ptp_82580_adjfreq;
273 adapter->caps.adjtime = igb_adjtime;
274 adapter->caps.gettime = igb_gettime;
275 adapter->caps.settime = igb_settime;
276 adapter->caps.enable = ptp_82580_enable;
277 adapter->cc.read = igb_82580_systim_read;
278 adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
279 adapter->cc.mult = 1;
280 adapter->cc.shift = 0;
281 /* Enable the timer functions by clearing bit 31. */
282 wr32(E1000_TSAUXC, 0x0);
283 break;
284
285 case e1000_82576:
286 adapter->caps.owner = THIS_MODULE;
287 strcpy(adapter->caps.name, "igb-82576");
288 adapter->caps.max_adj = 1000000000;
289 adapter->caps.n_ext_ts = 0;
290 adapter->caps.pps = 0;
291 adapter->caps.adjfreq = ptp_82576_adjfreq;
292 adapter->caps.adjtime = igb_adjtime;
293 adapter->caps.gettime = igb_gettime;
294 adapter->caps.settime = igb_settime;
295 adapter->caps.enable = ptp_82576_enable;
296 adapter->cc.read = igb_82576_systim_read;
297 adapter->cc.mask = CLOCKSOURCE_MASK(64);
298 adapter->cc.mult = 1;
299 adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
300 /* Dial the nominal frequency. */
301 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
302 break;
303
304 default:
305 adapter->ptp_clock = NULL;
306 return;
307 }
308
309 wrfl();
310
311 timecounter_init(&adapter->tc, &adapter->cc,
312 ktime_to_ns(ktime_get_real()));
313
314 INIT_DELAYED_WORK(&adapter->overflow_work, igb_overflow_check);
315
316 spin_lock_init(&adapter->tmreg_lock);
317
318 schedule_delayed_work(&adapter->overflow_work, IGB_OVERFLOW_PERIOD);
319
320 adapter->ptp_clock = ptp_clock_register(&adapter->caps);
321 if (IS_ERR(adapter->ptp_clock)) {
322 adapter->ptp_clock = NULL;
323 dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
324 } else
325 dev_info(&adapter->pdev->dev, "added PHC on %s\n",
326 adapter->netdev->name);
327}
328
329void igb_ptp_remove(struct igb_adapter *adapter)
330{
331 cancel_delayed_work_sync(&adapter->overflow_work);
332
333 if (adapter->ptp_clock) {
334 ptp_clock_unregister(adapter->ptp_clock);
335 dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
336 adapter->netdev->name);
337 }
338}
339
340/**
341 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
342 * @adapter: board private structure
343 * @hwtstamps: timestamp structure to update
344 * @systim: unsigned 64bit system time value.
345 *
346 * We need to convert the system time value stored in the RX/TXSTMP registers
347 * into a hwtstamp which can be used by the upper level timestamping functions.
348 *
349 * The 'tmreg_lock' spinlock is used to protect the consistency of the
350 * system time value. This is needed because reading the 64 bit time
351 * value involves reading two (or three) 32 bit registers. The first
352 * read latches the value. Ditto for writing.
353 *
354 * In addition, here have extended the system time with an overflow
355 * counter in software.
356 **/
357void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
358 struct skb_shared_hwtstamps *hwtstamps,
359 u64 systim)
360{
361 u64 ns;
362 unsigned long flags;
363
364 switch (adapter->hw.mac.type) {
365 case e1000_i350:
366 case e1000_82580:
367 case e1000_82576:
368 break;
369 default:
370 return;
371 }
372
373 spin_lock_irqsave(&adapter->tmreg_lock, flags);
374
375 ns = timecounter_cyc2time(&adapter->tc, systim);
376
377 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
378
379 memset(hwtstamps, 0, sizeof(*hwtstamps));
380 hwtstamps->hwtstamp = ns_to_ktime(ns);
381}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 85d2e2c4ce4a..56fd46844f65 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -91,29 +91,6 @@ out:
91 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); 91 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
92} 92}
93 93
94/**
95 * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
96 * @hw: pointer to hardware structure
97 *
98 * Read PCIe configuration space, and get the MSI-X vector count from
99 * the capabilities table.
100 **/
101static u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
102{
103 struct ixgbe_adapter *adapter = hw->back;
104 u16 msix_count;
105 pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82598_CAPS,
106 &msix_count);
107 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
108
109 /* MSI-X count is zero-based in HW, so increment to give proper value */
110 msix_count++;
111
112 return msix_count;
113}
114
115/**
116 */
117static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) 94static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
118{ 95{
119 struct ixgbe_mac_info *mac = &hw->mac; 96 struct ixgbe_mac_info *mac = &hw->mac;
@@ -126,7 +103,7 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
126 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; 103 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
127 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; 104 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
128 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; 105 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
129 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw); 106 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
130 107
131 return 0; 108 return 0;
132} 109}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 49aa41fe7b84..e59888163a17 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -2783,17 +2783,36 @@ san_mac_addr_out:
2783 * Read PCIe configuration space, and get the MSI-X vector count from 2783 * Read PCIe configuration space, and get the MSI-X vector count from
2784 * the capabilities table. 2784 * the capabilities table.
2785 **/ 2785 **/
2786u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 2786u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2787{ 2787{
2788 struct ixgbe_adapter *adapter = hw->back; 2788 struct ixgbe_adapter *adapter = hw->back;
2789 u16 msix_count; 2789 u16 msix_count = 1;
2790 pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS, 2790 u16 max_msix_count;
2791 &msix_count); 2791 u16 pcie_offset;
2792
2793 switch (hw->mac.type) {
2794 case ixgbe_mac_82598EB:
2795 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
2796 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
2797 break;
2798 case ixgbe_mac_82599EB:
2799 case ixgbe_mac_X540:
2800 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
2801 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
2802 break;
2803 default:
2804 return msix_count;
2805 }
2806
2807 pci_read_config_word(adapter->pdev, pcie_offset, &msix_count);
2792 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 2808 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2793 2809
2794 /* MSI-X count is zero-based in HW, so increment to give proper value */ 2810 /* MSI-X count is zero-based in HW */
2795 msix_count++; 2811 msix_count++;
2796 2812
2813 if (msix_count > max_msix_count)
2814 msix_count = max_msix_count;
2815
2797 return msix_count; 2816 return msix_count;
2798} 2817}
2799 2818
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 204f06235b45..d6d34324540c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -31,7 +31,7 @@
31#include "ixgbe_type.h" 31#include "ixgbe_type.h"
32#include "ixgbe.h" 32#include "ixgbe.h"
33 33
34u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); 34u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
35s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); 35s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
36s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); 36s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
37s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); 37s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 027d7a75be39..ed1b47dc0834 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -622,6 +622,16 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
622 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 622 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
623 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); 623 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
624 624
625#ifdef IXGBE_FCOE
626 if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
627 struct ixgbe_ring_feature *f;
628 f = &adapter->ring_feature[RING_F_FCOE];
629 if ((rxr_idx >= f->mask) &&
630 (rxr_idx < f->mask + f->indices))
631 set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state);
632 }
633
634#endif /* IXGBE_FCOE */
625 /* apply Rx specific ring traits */ 635 /* apply Rx specific ring traits */
626 ring->count = adapter->rx_ring_count; 636 ring->count = adapter->rx_ring_count;
627 ring->queue_index = rxr_idx; 637 ring->queue_index = rxr_idx;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 3e26b1f9ac75..7c4325ec22c2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2904,33 +2904,6 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2904 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); 2904 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2905} 2905}
2906 2906
2907/**
2908 * ixgbe_set_uta - Set unicast filter table address
2909 * @adapter: board private structure
2910 *
2911 * The unicast table address is a register array of 32-bit registers.
2912 * The table is meant to be used in a way similar to how the MTA is used
2913 * however due to certain limitations in the hardware it is necessary to
2914 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
2915 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
2916 **/
2917static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
2918{
2919 struct ixgbe_hw *hw = &adapter->hw;
2920 int i;
2921
2922 /* The UTA table only exists on 82599 hardware and newer */
2923 if (hw->mac.type < ixgbe_mac_82599EB)
2924 return;
2925
2926 /* we only need to do this if VMDq is enabled */
2927 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2928 return;
2929
2930 for (i = 0; i < 128; i++)
2931 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
2932}
2933
2934#define IXGBE_MAX_RX_DESC_POLL 10 2907#define IXGBE_MAX_RX_DESC_POLL 10
2935static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, 2908static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2936 struct ixgbe_ring *ring) 2909 struct ixgbe_ring *ring)
@@ -3154,14 +3127,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3154 set_ring_rsc_enabled(rx_ring); 3127 set_ring_rsc_enabled(rx_ring);
3155 else 3128 else
3156 clear_ring_rsc_enabled(rx_ring); 3129 clear_ring_rsc_enabled(rx_ring);
3157#ifdef IXGBE_FCOE
3158 if (netdev->features & NETIF_F_FCOE_MTU) {
3159 struct ixgbe_ring_feature *f;
3160 f = &adapter->ring_feature[RING_F_FCOE];
3161 if ((i >= f->mask) && (i < f->mask + f->indices))
3162 set_bit(__IXGBE_RX_FCOE_BUFSZ, &rx_ring->state);
3163 }
3164#endif /* IXGBE_FCOE */
3165 } 3130 }
3166} 3131}
3167 3132
@@ -3224,8 +3189,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3224 /* Program registers for the distribution of queues */ 3189 /* Program registers for the distribution of queues */
3225 ixgbe_setup_mrqc(adapter); 3190 ixgbe_setup_mrqc(adapter);
3226 3191
3227 ixgbe_set_uta(adapter);
3228
3229 /* set_rx_buffer_len must be called before ring initialization */ 3192 /* set_rx_buffer_len must be called before ring initialization */
3230 ixgbe_set_rx_buffer_len(adapter); 3193 ixgbe_set_rx_buffer_len(adapter);
3231 3194
@@ -3462,16 +3425,17 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3462 } 3425 }
3463 ixgbe_vlan_filter_enable(adapter); 3426 ixgbe_vlan_filter_enable(adapter);
3464 hw->addr_ctrl.user_set_promisc = false; 3427 hw->addr_ctrl.user_set_promisc = false;
3465 /* 3428 }
3466 * Write addresses to available RAR registers, if there is not 3429
3467 * sufficient space to store all the addresses then enable 3430 /*
3468 * unicast promiscuous mode 3431 * Write addresses to available RAR registers, if there is not
3469 */ 3432 * sufficient space to store all the addresses then enable
3470 count = ixgbe_write_uc_addr_list(netdev); 3433 * unicast promiscuous mode
3471 if (count < 0) { 3434 */
3472 fctrl |= IXGBE_FCTRL_UPE; 3435 count = ixgbe_write_uc_addr_list(netdev);
3473 vmolr |= IXGBE_VMOLR_ROPE; 3436 if (count < 0) {
3474 } 3437 fctrl |= IXGBE_FCTRL_UPE;
3438 vmolr |= IXGBE_VMOLR_ROPE;
3475 } 3439 }
3476 3440
3477 if (adapter->num_vfs) { 3441 if (adapter->num_vfs) {
@@ -4836,7 +4800,9 @@ static int ixgbe_resume(struct pci_dev *pdev)
4836 4800
4837 pci_wake_from_d3(pdev, false); 4801 pci_wake_from_d3(pdev, false);
4838 4802
4803 rtnl_lock();
4839 err = ixgbe_init_interrupt_scheme(adapter); 4804 err = ixgbe_init_interrupt_scheme(adapter);
4805 rtnl_unlock();
4840 if (err) { 4806 if (err) {
4841 e_dev_err("Cannot initialize interrupts for device\n"); 4807 e_dev_err("Cannot initialize interrupts for device\n");
4842 return err; 4808 return err;
@@ -4893,6 +4859,16 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
4893 if (wufc) { 4859 if (wufc) {
4894 ixgbe_set_rx_mode(netdev); 4860 ixgbe_set_rx_mode(netdev);
4895 4861
4862 /*
4863 * enable the optics for both mult-speed fiber and
4864 * 82599 SFP+ fiber as we can WoL.
4865 */
4866 if (hw->mac.ops.enable_tx_laser &&
4867 (hw->phy.multispeed_fiber ||
4868 (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber &&
4869 hw->mac.type == ixgbe_mac_82599EB)))
4870 hw->mac.ops.enable_tx_laser(hw);
4871
4896 /* turn on all-multi mode if wake on multicast is enabled */ 4872 /* turn on all-multi mode if wake on multicast is enabled */
4897 if (wufc & IXGBE_WUFC_MC) { 4873 if (wufc & IXGBE_WUFC_MC) {
4898 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4874 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@@ -6681,6 +6657,74 @@ static int ixgbe_set_features(struct net_device *netdev,
6681 return 0; 6657 return 0;
6682} 6658}
6683 6659
6660static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
6661 struct net_device *dev,
6662 unsigned char *addr,
6663 u16 flags)
6664{
6665 struct ixgbe_adapter *adapter = netdev_priv(dev);
6666 int err = -EOPNOTSUPP;
6667
6668 if (ndm->ndm_state & NUD_PERMANENT) {
6669 pr_info("%s: FDB only supports static addresses\n",
6670 ixgbe_driver_name);
6671 return -EINVAL;
6672 }
6673
6674 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6675 if (is_unicast_ether_addr(addr))
6676 err = dev_uc_add_excl(dev, addr);
6677 else if (is_multicast_ether_addr(addr))
6678 err = dev_mc_add_excl(dev, addr);
6679 else
6680 err = -EINVAL;
6681 }
6682
6683 /* Only return duplicate errors if NLM_F_EXCL is set */
6684 if (err == -EEXIST && !(flags & NLM_F_EXCL))
6685 err = 0;
6686
6687 return err;
6688}
6689
6690static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
6691 struct net_device *dev,
6692 unsigned char *addr)
6693{
6694 struct ixgbe_adapter *adapter = netdev_priv(dev);
6695 int err = -EOPNOTSUPP;
6696
6697 if (ndm->ndm_state & NUD_PERMANENT) {
6698 pr_info("%s: FDB only supports static addresses\n",
6699 ixgbe_driver_name);
6700 return -EINVAL;
6701 }
6702
6703 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6704 if (is_unicast_ether_addr(addr))
6705 err = dev_uc_del(dev, addr);
6706 else if (is_multicast_ether_addr(addr))
6707 err = dev_mc_del(dev, addr);
6708 else
6709 err = -EINVAL;
6710 }
6711
6712 return err;
6713}
6714
6715static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
6716 struct netlink_callback *cb,
6717 struct net_device *dev,
6718 int idx)
6719{
6720 struct ixgbe_adapter *adapter = netdev_priv(dev);
6721
6722 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6723 idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
6724
6725 return idx;
6726}
6727
6684static const struct net_device_ops ixgbe_netdev_ops = { 6728static const struct net_device_ops ixgbe_netdev_ops = {
6685 .ndo_open = ixgbe_open, 6729 .ndo_open = ixgbe_open,
6686 .ndo_stop = ixgbe_close, 6730 .ndo_stop = ixgbe_close,
@@ -6717,6 +6761,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
6717#endif /* IXGBE_FCOE */ 6761#endif /* IXGBE_FCOE */
6718 .ndo_set_features = ixgbe_set_features, 6762 .ndo_set_features = ixgbe_set_features,
6719 .ndo_fix_features = ixgbe_fix_features, 6763 .ndo_fix_features = ixgbe_fix_features,
6764 .ndo_fdb_add = ixgbe_ndo_fdb_add,
6765 .ndo_fdb_del = ixgbe_ndo_fdb_del,
6766 .ndo_fdb_dump = ixgbe_ndo_fdb_dump,
6720}; 6767};
6721 6768
6722static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, 6769static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index bf9f82f4b1ae..24117709d6a2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1582,13 +1582,21 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
1582 **/ 1582 **/
1583static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) 1583static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
1584{ 1584{
1585 *i2cctl |= IXGBE_I2C_CLK_OUT; 1585 u32 i = 0;
1586 1586 u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
1587 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); 1587 u32 i2cctl_r = 0;
1588 IXGBE_WRITE_FLUSH(hw);
1589 1588
1590 /* SCL rise time (1000ns) */ 1589 for (i = 0; i < timeout; i++) {
1591 udelay(IXGBE_I2C_T_RISE); 1590 *i2cctl |= IXGBE_I2C_CLK_OUT;
1591 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
1592 IXGBE_WRITE_FLUSH(hw);
1593 /* SCL rise time (1000ns) */
1594 udelay(IXGBE_I2C_T_RISE);
1595
1596 i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1597 if (i2cctl_r & IXGBE_I2C_CLK_IN)
1598 break;
1599 }
1592} 1600}
1593 1601
1594/** 1602/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 8636e8344fc9..4acd9e665b28 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -110,6 +110,7 @@
110#define IXGBE_I2C_CLK_OUT 0x00000002 110#define IXGBE_I2C_CLK_OUT 0x00000002
111#define IXGBE_I2C_DATA_IN 0x00000004 111#define IXGBE_I2C_DATA_IN 0x00000004
112#define IXGBE_I2C_DATA_OUT 0x00000008 112#define IXGBE_I2C_DATA_OUT 0x00000008
113#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
113 114
114/* Interrupt Registers */ 115/* Interrupt Registers */
115#define IXGBE_EICR 0x00800 116#define IXGBE_EICR 0x00800
@@ -1681,7 +1682,9 @@ enum {
1681#define IXGBE_DEVICE_CAPS 0x2C 1682#define IXGBE_DEVICE_CAPS 0x2C
1682#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 1683#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
1683#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 1684#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
1685#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
1684#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 1686#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
1687#define IXGBE_MAX_MSIX_VECTORS_82598 0x13
1685 1688
1686/* MSI-X capability fields masks */ 1689/* MSI-X capability fields masks */
1687#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF 1690#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF
@@ -2813,6 +2816,7 @@ struct ixgbe_mac_info {
2813 u16 wwnn_prefix; 2816 u16 wwnn_prefix;
2814 /* prefix for World Wide Port Name (WWPN) */ 2817 /* prefix for World Wide Port Name (WWPN) */
2815 u16 wwpn_prefix; 2818 u16 wwpn_prefix;
2819 u16 max_msix_vectors;
2816#define IXGBE_MAX_MTA 128 2820#define IXGBE_MAX_MTA 128
2817 u32 mta_shadow[IXGBE_MAX_MTA]; 2821 u32 mta_shadow[IXGBE_MAX_MTA];
2818 s32 mc_filter_type; 2822 s32 mc_filter_type;
@@ -2823,7 +2827,6 @@ struct ixgbe_mac_info {
2823 u32 rx_pb_size; 2827 u32 rx_pb_size;
2824 u32 max_tx_queues; 2828 u32 max_tx_queues;
2825 u32 max_rx_queues; 2829 u32 max_rx_queues;
2826 u32 max_msix_vectors;
2827 u32 orig_autoc; 2830 u32 orig_autoc;
2828 u32 orig_autoc2; 2831 u32 orig_autoc2;
2829 bool orig_link_settings_stored; 2832 bool orig_link_settings_stored;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 5e1ca0f05090..c8950da60e6b 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1665,6 +1665,7 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1665 .get_strings = mv643xx_eth_get_strings, 1665 .get_strings = mv643xx_eth_get_strings,
1666 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1666 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
1667 .get_sset_count = mv643xx_eth_get_sset_count, 1667 .get_sset_count = mv643xx_eth_get_sset_count,
1668 .get_ts_info = ethtool_op_get_ts_info,
1668}; 1669};
1669 1670
1670 1671
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index efec6b60b327..1db023b075a1 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1456,6 +1456,7 @@ static const struct ethtool_ops pxa168_ethtool_ops = {
1456 .set_settings = pxa168_set_settings, 1456 .set_settings = pxa168_set_settings,
1457 .get_drvinfo = pxa168_get_drvinfo, 1457 .get_drvinfo = pxa168_get_drvinfo,
1458 .get_link = ethtool_op_get_link, 1458 .get_link = ethtool_op_get_link,
1459 .get_ts_info = ethtool_op_get_ts_info,
1459}; 1460};
1460 1461
1461static const struct net_device_ops pxa168_eth_netdev_ops = { 1462static const struct net_device_ops pxa168_eth_netdev_ops = {
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index c9b504e2dfc3..7732474263da 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4816,14 +4816,14 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
4816 4816
4817 init_waitqueue_head(&hw->msi_wait); 4817 init_waitqueue_head(&hw->msi_wait);
4818 4818
4819 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
4820
4821 err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw); 4819 err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
4822 if (err) { 4820 if (err) {
4823 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); 4821 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
4824 return err; 4822 return err;
4825 } 4823 }
4826 4824
4825 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
4826
4827 sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); 4827 sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
4828 sky2_read8(hw, B0_CTST); 4828 sky2_read8(hw, B0_CTST);
4829 4829
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 1bb93531f1ba..5f027f95cc84 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -11,6 +11,18 @@ config MLX4_EN
11 This driver supports Mellanox Technologies ConnectX Ethernet 11 This driver supports Mellanox Technologies ConnectX Ethernet
12 devices. 12 devices.
13 13
14config MLX4_EN_DCB
15 bool "Data Center Bridging (DCB) Support"
16 default y
17 depends on MLX4_EN && DCB
18 ---help---
19 Say Y here if you want to use Data Center Bridging (DCB) in the
20 driver.
21 If set to N, will not be able to configure QoS and ratelimit attributes.
22 This flag is depended on the kernel's DCB support.
23
24 If unsure, set to Y
25
14config MLX4_CORE 26config MLX4_CORE
15 tristate 27 tristate
16 depends on PCI 28 depends on PCI
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index 4a40ab967eeb..293127d28b33 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_MLX4_EN) += mlx4_en.o
7 7
8mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \ 8mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
9 en_resources.o en_netdev.o en_selftest.o 9 en_resources.o en_netdev.o en_selftest.o
10mlx4_en-$(CONFIG_MLX4_EN_DCB) += en_dcb_nl.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 00b81272e314..908a460d8db6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -124,11 +124,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
124 cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; 124 cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
125 cq->mcq.event = mlx4_en_cq_event; 125 cq->mcq.event = mlx4_en_cq_event;
126 126
127 if (cq->is_tx) { 127 if (!cq->is_tx) {
128 init_timer(&cq->timer);
129 cq->timer.function = mlx4_en_poll_tx_cq;
130 cq->timer.data = (unsigned long) cq;
131 } else {
132 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); 128 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
133 napi_enable(&cq->napi); 129 napi_enable(&cq->napi);
134 } 130 }
@@ -151,16 +147,12 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
151 147
152void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 148void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
153{ 149{
154 struct mlx4_en_dev *mdev = priv->mdev; 150 if (!cq->is_tx) {
155
156 if (cq->is_tx)
157 del_timer(&cq->timer);
158 else {
159 napi_disable(&cq->napi); 151 napi_disable(&cq->napi);
160 netif_napi_del(&cq->napi); 152 netif_napi_del(&cq->napi);
161 } 153 }
162 154
163 mlx4_cq_free(mdev->dev, &cq->mcq); 155 mlx4_cq_free(priv->mdev->dev, &cq->mcq);
164} 156}
165 157
166/* Set rx cq moderation parameters */ 158/* Set rx cq moderation parameters */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
new file mode 100644
index 000000000000..5d36795877cb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -0,0 +1,255 @@
1/*
2 * Copyright (c) 2011 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/dcbnl.h>
35#include <linux/math64.h>
36
37#include "mlx4_en.h"
38
39static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
40 struct ieee_ets *ets)
41{
42 struct mlx4_en_priv *priv = netdev_priv(dev);
43 struct ieee_ets *my_ets = &priv->ets;
44
45 /* No IEEE PFC settings available */
46 if (!my_ets)
47 return -EINVAL;
48
49 ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
50 ets->cbs = my_ets->cbs;
51 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
52 memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
53 memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
54
55 return 0;
56}
57
58static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
59{
60 int i;
61 int total_ets_bw = 0;
62 int has_ets_tc = 0;
63
64 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
65 if (ets->prio_tc[i] > MLX4_EN_NUM_UP) {
66 en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
67 i, ets->prio_tc[i]);
68 return -EINVAL;
69 }
70
71 switch (ets->tc_tsa[i]) {
72 case IEEE_8021QAZ_TSA_STRICT:
73 break;
74 case IEEE_8021QAZ_TSA_ETS:
75 has_ets_tc = 1;
76 total_ets_bw += ets->tc_tx_bw[i];
77 break;
78 default:
79 en_err(priv, "TC[%d]: Not supported TSA: %d\n",
80 i, ets->tc_tsa[i]);
81 return -ENOTSUPP;
82 }
83 }
84
85 if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) {
86 en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n",
87 total_ets_bw);
88 return -EINVAL;
89 }
90
91 return 0;
92}
93
94static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
95 struct ieee_ets *ets, u16 *ratelimit)
96{
97 struct mlx4_en_dev *mdev = priv->mdev;
98 int num_strict = 0;
99 int i;
100 __u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 };
101 __u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 };
102
103 ets = ets ?: &priv->ets;
104 ratelimit = ratelimit ?: priv->maxrate;
105
106 /* higher TC means higher priority => lower pg */
107 for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
108 switch (ets->tc_tsa[i]) {
109 case IEEE_8021QAZ_TSA_STRICT:
110 pg[i] = num_strict++;
111 tc_tx_bw[i] = MLX4_EN_BW_MAX;
112 break;
113 case IEEE_8021QAZ_TSA_ETS:
114 pg[i] = MLX4_EN_TC_ETS;
115 tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN;
116 break;
117 }
118 }
119
120 return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg,
121 ratelimit);
122}
123
124static int
125mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
126{
127 struct mlx4_en_priv *priv = netdev_priv(dev);
128 struct mlx4_en_dev *mdev = priv->mdev;
129 int err;
130
131 err = mlx4_en_ets_validate(priv, ets);
132 if (err)
133 return err;
134
135 err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc);
136 if (err)
137 return err;
138
139 err = mlx4_en_config_port_scheduler(priv, ets, NULL);
140 if (err)
141 return err;
142
143 memcpy(&priv->ets, ets, sizeof(priv->ets));
144
145 return 0;
146}
147
148static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev,
149 struct ieee_pfc *pfc)
150{
151 struct mlx4_en_priv *priv = netdev_priv(dev);
152
153 pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
154 pfc->pfc_en = priv->prof->tx_ppp;
155
156 return 0;
157}
158
159static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
160 struct ieee_pfc *pfc)
161{
162 struct mlx4_en_priv *priv = netdev_priv(dev);
163 struct mlx4_en_dev *mdev = priv->mdev;
164 int err;
165
166 en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
167 pfc->pfc_cap,
168 pfc->pfc_en,
169 pfc->mbc,
170 pfc->delay);
171
172 priv->prof->rx_pause = priv->prof->tx_pause = !!pfc->pfc_en;
173 priv->prof->rx_ppp = priv->prof->tx_ppp = pfc->pfc_en;
174
175 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
176 priv->rx_skb_size + ETH_FCS_LEN,
177 priv->prof->tx_pause,
178 priv->prof->tx_ppp,
179 priv->prof->rx_pause,
180 priv->prof->rx_ppp);
181 if (err)
182 en_err(priv, "Failed setting pause params\n");
183
184 return err;
185}
186
187static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
188{
189 return DCB_CAP_DCBX_VER_IEEE;
190}
191
192static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
193{
194 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
195 (mode & DCB_CAP_DCBX_VER_CEE) ||
196 !(mode & DCB_CAP_DCBX_VER_IEEE) ||
197 !(mode & DCB_CAP_DCBX_HOST))
198 return 1;
199
200 return 0;
201}
202
203#define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
204static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev,
205 struct ieee_maxrate *maxrate)
206{
207 struct mlx4_en_priv *priv = netdev_priv(dev);
208 int i;
209
210 if (!priv->maxrate)
211 return -EINVAL;
212
213 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
214 maxrate->tc_maxrate[i] =
215 priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB;
216
217 return 0;
218}
219
220static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
221 struct ieee_maxrate *maxrate)
222{
223 struct mlx4_en_priv *priv = netdev_priv(dev);
224 u16 tmp[IEEE_8021QAZ_MAX_TCS];
225 int i, err;
226
227 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
228 /* Convert from Kbps into HW units, rounding result up.
229 * Setting to 0, means unlimited BW.
230 */
231 tmp[i] = div_u64(maxrate->tc_maxrate[i] +
232 MLX4_RATELIMIT_UNITS_IN_KB - 1,
233 MLX4_RATELIMIT_UNITS_IN_KB);
234 }
235
236 err = mlx4_en_config_port_scheduler(priv, NULL, tmp);
237 if (err)
238 return err;
239
240 memcpy(priv->maxrate, tmp, sizeof(*priv->maxrate));
241
242 return 0;
243}
244
245const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
246 .ieee_getets = mlx4_en_dcbnl_ieee_getets,
247 .ieee_setets = mlx4_en_dcbnl_ieee_setets,
248 .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
249 .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
250 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
251 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
252
253 .getdcbx = mlx4_en_dcbnl_getdcbx,
254 .setdcbx = mlx4_en_dcbnl_setdcbx,
255};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 70346fd7f9c4..72901ce2b088 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -83,7 +83,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
83#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS) 83#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
84 84
85static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= { 85static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
86 "Interupt Test", 86 "Interrupt Test",
87 "Link Test", 87 "Link Test",
88 "Speed Test", 88 "Speed Test",
89 "Register Test", 89 "Register Test",
@@ -359,8 +359,8 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
359{ 359{
360 struct mlx4_en_priv *priv = netdev_priv(dev); 360 struct mlx4_en_priv *priv = netdev_priv(dev);
361 361
362 coal->tx_coalesce_usecs = 0; 362 coal->tx_coalesce_usecs = priv->tx_usecs;
363 coal->tx_max_coalesced_frames = 0; 363 coal->tx_max_coalesced_frames = priv->tx_frames;
364 coal->rx_coalesce_usecs = priv->rx_usecs; 364 coal->rx_coalesce_usecs = priv->rx_usecs;
365 coal->rx_max_coalesced_frames = priv->rx_frames; 365 coal->rx_max_coalesced_frames = priv->rx_frames;
366 366
@@ -388,6 +388,21 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
388 MLX4_EN_RX_COAL_TIME : 388 MLX4_EN_RX_COAL_TIME :
389 coal->rx_coalesce_usecs; 389 coal->rx_coalesce_usecs;
390 390
391 /* Setting TX coalescing parameters */
392 if (coal->tx_coalesce_usecs != priv->tx_usecs ||
393 coal->tx_max_coalesced_frames != priv->tx_frames) {
394 priv->tx_usecs = coal->tx_coalesce_usecs;
395 priv->tx_frames = coal->tx_max_coalesced_frames;
396 for (i = 0; i < priv->tx_ring_num; i++) {
397 priv->tx_cq[i].moder_cnt = priv->tx_frames;
398 priv->tx_cq[i].moder_time = priv->tx_usecs;
399 if (mlx4_en_set_cq_moder(priv, &priv->tx_cq[i])) {
400 en_warn(priv, "Failed changing moderation "
401 "for TX cq %d\n", i);
402 }
403 }
404 }
405
391 /* Set adaptive coalescing params */ 406 /* Set adaptive coalescing params */
392 priv->pkt_rate_low = coal->pkt_rate_low; 407 priv->pkt_rate_low = coal->pkt_rate_low;
393 priv->rx_usecs_low = coal->rx_coalesce_usecs_low; 408 priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 2097a7d3c5b8..346fdb2e92a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -114,7 +114,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
114 params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; 114 params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
115 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; 115 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
116 params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS + 116 params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
117 (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS; 117 MLX4_EN_NUM_PPP_RINGS;
118 params->prof[i].rss_rings = 0; 118 params->prof[i].rss_rings = 0;
119 } 119 }
120 120
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 31b455a49273..eaa8fadf19c0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -45,6 +45,14 @@
45#include "mlx4_en.h" 45#include "mlx4_en.h"
46#include "en_port.h" 46#include "en_port.h"
47 47
48static int mlx4_en_setup_tc(struct net_device *dev, u8 up)
49{
50 if (up != MLX4_EN_NUM_UP)
51 return -EINVAL;
52
53 return 0;
54}
55
48static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 56static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
49{ 57{
50 struct mlx4_en_priv *priv = netdev_priv(dev); 58 struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -421,6 +429,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
421 */ 429 */
422 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 430 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
423 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 431 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
432 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
433 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
424 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - " 434 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
425 "rx_frames:%d rx_usecs:%d\n", 435 "rx_frames:%d rx_usecs:%d\n",
426 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 436 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
@@ -437,8 +447,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
437 447
438 for (i = 0; i < priv->tx_ring_num; i++) { 448 for (i = 0; i < priv->tx_ring_num; i++) {
439 cq = &priv->tx_cq[i]; 449 cq = &priv->tx_cq[i];
440 cq->moder_cnt = MLX4_EN_TX_COAL_PKTS; 450 cq->moder_cnt = priv->tx_frames;
441 cq->moder_time = MLX4_EN_TX_COAL_TIME; 451 cq->moder_time = priv->tx_usecs;
442 } 452 }
443 453
444 /* Reset auto-moderation params */ 454 /* Reset auto-moderation params */
@@ -650,12 +660,18 @@ int mlx4_en_start_port(struct net_device *dev)
650 660
651 /* Configure ring */ 661 /* Configure ring */
652 tx_ring = &priv->tx_ring[i]; 662 tx_ring = &priv->tx_ring[i];
653 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn); 663 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
664 max(0, i - MLX4_EN_NUM_TX_RINGS));
654 if (err) { 665 if (err) {
655 en_err(priv, "Failed allocating Tx ring\n"); 666 en_err(priv, "Failed allocating Tx ring\n");
656 mlx4_en_deactivate_cq(priv, cq); 667 mlx4_en_deactivate_cq(priv, cq);
657 goto tx_err; 668 goto tx_err;
658 } 669 }
670 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
671
672 /* Arm CQ for TX completions */
673 mlx4_en_arm_cq(priv, cq);
674
659 /* Set initial ownership of all Tx TXBBs to SW (1) */ 675 /* Set initial ownership of all Tx TXBBs to SW (1) */
660 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 676 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
661 *((u32 *) (tx_ring->buf + j)) = 0xffffffff; 677 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
@@ -797,12 +813,15 @@ static void mlx4_en_restart(struct work_struct *work)
797 watchdog_task); 813 watchdog_task);
798 struct mlx4_en_dev *mdev = priv->mdev; 814 struct mlx4_en_dev *mdev = priv->mdev;
799 struct net_device *dev = priv->dev; 815 struct net_device *dev = priv->dev;
816 int i;
800 817
801 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 818 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
802 819
803 mutex_lock(&mdev->state_lock); 820 mutex_lock(&mdev->state_lock);
804 if (priv->port_up) { 821 if (priv->port_up) {
805 mlx4_en_stop_port(dev); 822 mlx4_en_stop_port(dev);
823 for (i = 0; i < priv->tx_ring_num; i++)
824 netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
806 if (mlx4_en_start_port(dev)) 825 if (mlx4_en_start_port(dev))
807 en_err(priv, "Failed restarting port %d\n", priv->port); 826 en_err(priv, "Failed restarting port %d\n", priv->port);
808 } 827 }
@@ -966,6 +985,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
966 mutex_unlock(&mdev->state_lock); 985 mutex_unlock(&mdev->state_lock);
967 986
968 mlx4_en_free_resources(priv); 987 mlx4_en_free_resources(priv);
988
969 free_netdev(dev); 989 free_netdev(dev);
970} 990}
971 991
@@ -1036,6 +1056,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
1036 .ndo_poll_controller = mlx4_en_netpoll, 1056 .ndo_poll_controller = mlx4_en_netpoll,
1037#endif 1057#endif
1038 .ndo_set_features = mlx4_en_set_features, 1058 .ndo_set_features = mlx4_en_set_features,
1059 .ndo_setup_tc = mlx4_en_setup_tc,
1039}; 1060};
1040 1061
1041int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 1062int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1079,6 +1100,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1079 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 1100 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
1080 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 1101 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
1081 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 1102 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
1103#ifdef CONFIG_MLX4_EN_DCB
1104 if (!mlx4_is_slave(priv->mdev->dev))
1105 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
1106#endif
1082 1107
1083 /* Query for default mac and max mtu */ 1108 /* Query for default mac and max mtu */
1084 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 1109 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
@@ -1113,6 +1138,15 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1113 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 1138 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
1114 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 1139 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
1115 1140
1141 netdev_set_num_tc(dev, MLX4_EN_NUM_UP);
1142
1143 /* First 9 rings are for UP 0 */
1144 netdev_set_tc_queue(dev, 0, MLX4_EN_NUM_TX_RINGS + 1, 0);
1145
1146 /* Partition Tx queues evenly amongst UP's 1-7 */
1147 for (i = 1; i < MLX4_EN_NUM_UP; i++)
1148 netdev_set_tc_queue(dev, i, 1, MLX4_EN_NUM_TX_RINGS + i);
1149
1116 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); 1150 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
1117 1151
1118 /* Set defualt MAC */ 1152 /* Set defualt MAC */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
index 6934fd7e66ed..745090b49d9e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.h
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -39,6 +39,8 @@
39#define SET_PORT_PROMISC_SHIFT 31 39#define SET_PORT_PROMISC_SHIFT 31
40#define SET_PORT_MC_PROMISC_SHIFT 30 40#define SET_PORT_MC_PROMISC_SHIFT 30
41 41
42#define MLX4_EN_NUM_TC 8
43
42#define VLAN_FLTR_SIZE 128 44#define VLAN_FLTR_SIZE 128
43struct mlx4_set_vlan_fltr_mbox { 45struct mlx4_set_vlan_fltr_mbox {
44 __be32 entry[VLAN_FLTR_SIZE]; 46 __be32 entry[VLAN_FLTR_SIZE];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index bcbc54c16947..10c24c784b70 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -39,7 +39,7 @@
39 39
40void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, 40void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
41 int is_tx, int rss, int qpn, int cqn, 41 int is_tx, int rss, int qpn, int cqn,
42 struct mlx4_qp_context *context) 42 int user_prio, struct mlx4_qp_context *context)
43{ 43{
44 struct mlx4_en_dev *mdev = priv->mdev; 44 struct mlx4_en_dev *mdev = priv->mdev;
45 45
@@ -57,6 +57,10 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
57 context->local_qpn = cpu_to_be32(qpn); 57 context->local_qpn = cpu_to_be32(qpn);
58 context->pri_path.ackto = 1 & 0x07; 58 context->pri_path.ackto = 1 & 0x07;
59 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; 59 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
60 if (user_prio >= 0) {
61 context->pri_path.sched_queue |= user_prio << 3;
62 context->pri_path.feup = 1 << 6;
63 }
60 context->pri_path.counter_index = 0xff; 64 context->pri_path.counter_index = 0xff;
61 context->cqn_send = cpu_to_be32(cqn); 65 context->cqn_send = cpu_to_be32(cqn);
62 context->cqn_recv = cpu_to_be32(cqn); 66 context->cqn_recv = cpu_to_be32(cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9adbd53da525..d49a7ac3187d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -823,7 +823,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
823 823
824 memset(context, 0, sizeof *context); 824 memset(context, 0, sizeof *context);
825 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, 825 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
826 qpn, ring->cqn, context); 826 qpn, ring->cqn, -1, context);
827 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); 827 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
828 828
829 /* Cancel FCS removal if FW allows */ 829 /* Cancel FCS removal if FW allows */
@@ -890,7 +890,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
890 } 890 }
891 rss_map->indir_qp.event = mlx4_en_sqp_event; 891 rss_map->indir_qp.event = mlx4_en_sqp_event;
892 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, 892 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
893 priv->rx_ring[0].cqn, &context); 893 priv->rx_ring[0].cqn, -1, &context);
894 894
895 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) 895 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
896 rss_rings = priv->rx_ring_num; 896 rss_rings = priv->rx_ring_num;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 17968244c399..9a38483feb92 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -67,8 +67,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
67 67
68 inline_thold = min(inline_thold, MAX_INLINE); 68 inline_thold = min(inline_thold, MAX_INLINE);
69 69
70 spin_lock_init(&ring->comp_lock);
71
72 tmp = size * sizeof(struct mlx4_en_tx_info); 70 tmp = size * sizeof(struct mlx4_en_tx_info);
73 ring->tx_info = vmalloc(tmp); 71 ring->tx_info = vmalloc(tmp);
74 if (!ring->tx_info) 72 if (!ring->tx_info)
@@ -156,7 +154,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
156 154
157int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 155int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
158 struct mlx4_en_tx_ring *ring, 156 struct mlx4_en_tx_ring *ring,
159 int cq) 157 int cq, int user_prio)
160{ 158{
161 struct mlx4_en_dev *mdev = priv->mdev; 159 struct mlx4_en_dev *mdev = priv->mdev;
162 int err; 160 int err;
@@ -174,7 +172,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
174 ring->doorbell_qpn = ring->qp.qpn << 8; 172 ring->doorbell_qpn = ring->qp.qpn << 8;
175 173
176 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 174 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
177 ring->cqn, &ring->context); 175 ring->cqn, user_prio, &ring->context);
178 if (ring->bf_enabled) 176 if (ring->bf_enabled)
179 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); 177 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
180 178
@@ -317,6 +315,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
317 int size = cq->size; 315 int size = cq->size;
318 u32 size_mask = ring->size_mask; 316 u32 size_mask = ring->size_mask;
319 struct mlx4_cqe *buf = cq->buf; 317 struct mlx4_cqe *buf = cq->buf;
318 u32 packets = 0;
319 u32 bytes = 0;
320 320
321 if (!priv->port_up) 321 if (!priv->port_up)
322 return; 322 return;
@@ -345,6 +345,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
345 priv, ring, ring_index, 345 priv, ring, ring_index,
346 !!((ring->cons + txbbs_skipped) & 346 !!((ring->cons + txbbs_skipped) &
347 ring->size)); 347 ring->size));
348 packets++;
349 bytes += ring->tx_info[ring_index].nr_bytes;
348 } while (ring_index != new_index); 350 } while (ring_index != new_index);
349 351
350 ++cons_index; 352 ++cons_index;
@@ -361,13 +363,14 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
361 mlx4_cq_set_ci(mcq); 363 mlx4_cq_set_ci(mcq);
362 wmb(); 364 wmb();
363 ring->cons += txbbs_skipped; 365 ring->cons += txbbs_skipped;
366 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
364 367
365 /* Wakeup Tx queue if this ring stopped it */ 368 /* Wakeup Tx queue if this ring stopped it */
366 if (unlikely(ring->blocked)) { 369 if (unlikely(ring->blocked)) {
367 if ((u32) (ring->prod - ring->cons) <= 370 if ((u32) (ring->prod - ring->cons) <=
368 ring->size - HEADROOM - MAX_DESC_TXBBS) { 371 ring->size - HEADROOM - MAX_DESC_TXBBS) {
369 ring->blocked = 0; 372 ring->blocked = 0;
370 netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring)); 373 netif_tx_wake_queue(ring->tx_queue);
371 priv->port_stats.wake_queue++; 374 priv->port_stats.wake_queue++;
372 } 375 }
373 } 376 }
@@ -377,41 +380,12 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
377{ 380{
378 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); 381 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
379 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 382 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
380 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
381 383
382 if (!spin_trylock(&ring->comp_lock))
383 return;
384 mlx4_en_process_tx_cq(cq->dev, cq); 384 mlx4_en_process_tx_cq(cq->dev, cq);
385 mod_timer(&cq->timer, jiffies + 1); 385 mlx4_en_arm_cq(priv, cq);
386 spin_unlock(&ring->comp_lock);
387} 386}
388 387
389 388
390void mlx4_en_poll_tx_cq(unsigned long data)
391{
392 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
393 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
394 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
395 u32 inflight;
396
397 INC_PERF_COUNTER(priv->pstats.tx_poll);
398
399 if (!spin_trylock_irq(&ring->comp_lock)) {
400 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
401 return;
402 }
403 mlx4_en_process_tx_cq(cq->dev, cq);
404 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
405
406 /* If there are still packets in flight and the timer has not already
407 * been scheduled by the Tx routine then schedule it here to guarantee
408 * completion processing of these packets */
409 if (inflight && priv->port_up)
410 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
411
412 spin_unlock_irq(&ring->comp_lock);
413}
414
415static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, 389static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
416 struct mlx4_en_tx_ring *ring, 390 struct mlx4_en_tx_ring *ring,
417 u32 index, 391 u32 index,
@@ -440,25 +414,6 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
440 return ring->buf + index * TXBB_SIZE; 414 return ring->buf + index * TXBB_SIZE;
441} 415}
442 416
443static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
444{
445 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
446 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
447 unsigned long flags;
448
449 /* If we don't have a pending timer, set one up to catch our recent
450 post in case the interface becomes idle */
451 if (!timer_pending(&cq->timer))
452 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
453
454 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
455 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
456 if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
457 mlx4_en_process_tx_cq(priv->dev, cq);
458 spin_unlock_irqrestore(&ring->comp_lock, flags);
459 }
460}
461
462static int is_inline(struct sk_buff *skb, void **pfrag) 417static int is_inline(struct sk_buff *skb, void **pfrag)
463{ 418{
464 void *ptr; 419 void *ptr;
@@ -570,13 +525,9 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
570 525
571u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) 526u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
572{ 527{
573 struct mlx4_en_priv *priv = netdev_priv(dev);
574 u16 vlan_tag = 0; 528 u16 vlan_tag = 0;
575 529
576 /* If we support per priority flow control and the packet contains 530 if (vlan_tx_tag_present(skb)) {
577 * a vlan tag, send the packet to the TX ring assigned to that priority
578 */
579 if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
580 vlan_tag = vlan_tx_tag_get(skb); 531 vlan_tag = vlan_tx_tag_get(skb);
581 return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13); 532 return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
582 } 533 }
@@ -594,7 +545,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
594 struct mlx4_en_priv *priv = netdev_priv(dev); 545 struct mlx4_en_priv *priv = netdev_priv(dev);
595 struct mlx4_en_dev *mdev = priv->mdev; 546 struct mlx4_en_dev *mdev = priv->mdev;
596 struct mlx4_en_tx_ring *ring; 547 struct mlx4_en_tx_ring *ring;
597 struct mlx4_en_cq *cq;
598 struct mlx4_en_tx_desc *tx_desc; 548 struct mlx4_en_tx_desc *tx_desc;
599 struct mlx4_wqe_data_seg *data; 549 struct mlx4_wqe_data_seg *data;
600 struct skb_frag_struct *frag; 550 struct skb_frag_struct *frag;
@@ -638,13 +588,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
638 if (unlikely(((int)(ring->prod - ring->cons)) > 588 if (unlikely(((int)(ring->prod - ring->cons)) >
639 ring->size - HEADROOM - MAX_DESC_TXBBS)) { 589 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
640 /* every full Tx ring stops queue */ 590 /* every full Tx ring stops queue */
641 netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind)); 591 netif_tx_stop_queue(ring->tx_queue);
642 ring->blocked = 1; 592 ring->blocked = 1;
643 priv->port_stats.queue_stopped++; 593 priv->port_stats.queue_stopped++;
644 594
645 /* Use interrupts to find out when queue opened */
646 cq = &priv->tx_cq[tx_ind];
647 mlx4_en_arm_cq(priv, cq);
648 return NETDEV_TX_BUSY; 595 return NETDEV_TX_BUSY;
649 } 596 }
650 597
@@ -707,7 +654,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
707 priv->port_stats.tso_packets++; 654 priv->port_stats.tso_packets++;
708 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + 655 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
709 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); 656 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
710 ring->bytes += skb->len + (i - 1) * lso_header_size; 657 tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
711 ring->packets += i; 658 ring->packets += i;
712 } else { 659 } else {
713 /* Normal (Non LSO) packet */ 660 /* Normal (Non LSO) packet */
@@ -715,10 +662,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
715 ((ring->prod & ring->size) ? 662 ((ring->prod & ring->size) ?
716 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); 663 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
717 data = &tx_desc->data; 664 data = &tx_desc->data;
718 ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN); 665 tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
719 ring->packets++; 666 ring->packets++;
720 667
721 } 668 }
669 ring->bytes += tx_info->nr_bytes;
670 netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
722 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); 671 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
723 672
724 673
@@ -792,9 +741,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
792 iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); 741 iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
793 } 742 }
794 743
795 /* Poll CQ here */
796 mlx4_en_xmit_poll(priv, tx_ind);
797
798 return NETDEV_TX_OK; 744 return NETDEV_TX_OK;
799 745
800tx_drop: 746tx_drop:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 2a0ff2cc7182..cd56f1aea4b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -53,6 +53,26 @@
53#define DRV_VERSION "1.1" 53#define DRV_VERSION "1.1"
54#define DRV_RELDATE "Dec, 2011" 54#define DRV_RELDATE "Dec, 2011"
55 55
56#define MLX4_NUM_UP 8
57#define MLX4_NUM_TC 8
58#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
59#define MLX4_RATELIMIT_DEFAULT 0xffff
60
61struct mlx4_set_port_prio2tc_context {
62 u8 prio2tc[4];
63};
64
65struct mlx4_port_scheduler_tc_cfg_be {
66 __be16 pg;
67 __be16 bw_precentage;
68 __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
69 __be16 max_bw_value;
70};
71
72struct mlx4_set_port_scheduler_context {
73 struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
74};
75
56enum { 76enum {
57 MLX4_HCR_BASE = 0x80680, 77 MLX4_HCR_BASE = 0x80680,
58 MLX4_HCR_SIZE = 0x0001c, 78 MLX4_HCR_SIZE = 0x0001c,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d69fee41f24a..5d876375a132 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -40,6 +40,9 @@
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <linux/netdevice.h> 41#include <linux/netdevice.h>
42#include <linux/if_vlan.h> 42#include <linux/if_vlan.h>
43#ifdef CONFIG_MLX4_EN_DCB
44#include <linux/dcbnl.h>
45#endif
43 46
44#include <linux/mlx4/device.h> 47#include <linux/mlx4/device.h>
45#include <linux/mlx4/qp.h> 48#include <linux/mlx4/qp.h>
@@ -111,6 +114,7 @@ enum {
111#define MLX4_EN_NUM_TX_RINGS 8 114#define MLX4_EN_NUM_TX_RINGS 8
112#define MLX4_EN_NUM_PPP_RINGS 8 115#define MLX4_EN_NUM_PPP_RINGS 8
113#define MAX_TX_RINGS (MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS) 116#define MAX_TX_RINGS (MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS)
117#define MLX4_EN_NUM_UP 8
114#define MLX4_EN_DEF_TX_RING_SIZE 512 118#define MLX4_EN_DEF_TX_RING_SIZE 512
115#define MLX4_EN_DEF_RX_RING_SIZE 1024 119#define MLX4_EN_DEF_RX_RING_SIZE 1024
116 120
@@ -118,7 +122,7 @@ enum {
118#define MLX4_EN_RX_COAL_TARGET 44 122#define MLX4_EN_RX_COAL_TARGET 44
119#define MLX4_EN_RX_COAL_TIME 0x10 123#define MLX4_EN_RX_COAL_TIME 0x10
120 124
121#define MLX4_EN_TX_COAL_PKTS 5 125#define MLX4_EN_TX_COAL_PKTS 16
122#define MLX4_EN_TX_COAL_TIME 0x80 126#define MLX4_EN_TX_COAL_TIME 0x80
123 127
124#define MLX4_EN_RX_RATE_LOW 400000 128#define MLX4_EN_RX_RATE_LOW 400000
@@ -196,6 +200,7 @@ enum cq_type {
196struct mlx4_en_tx_info { 200struct mlx4_en_tx_info {
197 struct sk_buff *skb; 201 struct sk_buff *skb;
198 u32 nr_txbb; 202 u32 nr_txbb;
203 u32 nr_bytes;
199 u8 linear; 204 u8 linear;
200 u8 data_offset; 205 u8 data_offset;
201 u8 inl; 206 u8 inl;
@@ -251,9 +256,9 @@ struct mlx4_en_tx_ring {
251 unsigned long bytes; 256 unsigned long bytes;
252 unsigned long packets; 257 unsigned long packets;
253 unsigned long tx_csum; 258 unsigned long tx_csum;
254 spinlock_t comp_lock;
255 struct mlx4_bf bf; 259 struct mlx4_bf bf;
256 bool bf_enabled; 260 bool bf_enabled;
261 struct netdev_queue *tx_queue;
257}; 262};
258 263
259struct mlx4_en_rx_desc { 264struct mlx4_en_rx_desc {
@@ -304,8 +309,6 @@ struct mlx4_en_cq {
304 spinlock_t lock; 309 spinlock_t lock;
305 struct net_device *dev; 310 struct net_device *dev;
306 struct napi_struct napi; 311 struct napi_struct napi;
307 /* Per-core Tx cq processing support */
308 struct timer_list timer;
309 int size; 312 int size;
310 int buf_size; 313 int buf_size;
311 unsigned vector; 314 unsigned vector;
@@ -411,6 +414,15 @@ struct mlx4_en_frag_info {
411 414
412}; 415};
413 416
417#ifdef CONFIG_MLX4_EN_DCB
418/* Minimal TC BW - setting to 0 will block traffic */
419#define MLX4_EN_BW_MIN 1
420#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
421
422#define MLX4_EN_TC_ETS 7
423
424#endif
425
414struct mlx4_en_priv { 426struct mlx4_en_priv {
415 struct mlx4_en_dev *mdev; 427 struct mlx4_en_dev *mdev;
416 struct mlx4_en_port_profile *prof; 428 struct mlx4_en_port_profile *prof;
@@ -484,6 +496,11 @@ struct mlx4_en_priv {
484 int vids[128]; 496 int vids[128];
485 bool wol; 497 bool wol;
486 struct device *ddev; 498 struct device *ddev;
499
500#ifdef CONFIG_MLX4_EN_DCB
501 struct ieee_ets ets;
502 u16 maxrate[IEEE_8021QAZ_MAX_TCS];
503#endif
487}; 504};
488 505
489enum mlx4_en_wol { 506enum mlx4_en_wol {
@@ -512,7 +529,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
512int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 529int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
513int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 530int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
514 531
515void mlx4_en_poll_tx_cq(unsigned long data);
516void mlx4_en_tx_irq(struct mlx4_cq *mcq); 532void mlx4_en_tx_irq(struct mlx4_cq *mcq);
517u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); 533u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
518netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 534netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -522,7 +538,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ri
522void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); 538void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
523int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 539int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
524 struct mlx4_en_tx_ring *ring, 540 struct mlx4_en_tx_ring *ring,
525 int cq); 541 int cq, int user_prio);
526void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, 542void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
527 struct mlx4_en_tx_ring *ring); 543 struct mlx4_en_tx_ring *ring);
528 544
@@ -540,8 +556,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
540 int budget); 556 int budget);
541int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); 557int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
542void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, 558void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
543 int is_tx, int rss, int qpn, int cqn, 559 int is_tx, int rss, int qpn, int cqn, int user_prio,
544 struct mlx4_qp_context *context); 560 struct mlx4_qp_context *context);
545void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event); 561void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
546int mlx4_en_map_buffer(struct mlx4_buf *buf); 562int mlx4_en_map_buffer(struct mlx4_buf *buf);
547void mlx4_en_unmap_buffer(struct mlx4_buf *buf); 563void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
@@ -558,6 +574,10 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
558int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset); 574int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
559int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port); 575int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
560 576
577#ifdef CONFIG_MLX4_EN_DCB
578extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
579#endif
580
561#define MLX4_EN_NUM_SELF_TEST 5 581#define MLX4_EN_NUM_SELF_TEST 5
562void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); 582void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
563u64 mlx4_en_mac_to_u64(u8 *addr); 583u64 mlx4_en_mac_to_u64(u8 *addr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 77535ff18f1b..55b12e6bed87 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -834,6 +834,68 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
834} 834}
835EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc); 835EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
836 836
837int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
838{
839 struct mlx4_cmd_mailbox *mailbox;
840 struct mlx4_set_port_prio2tc_context *context;
841 int err;
842 u32 in_mod;
843 int i;
844
845 mailbox = mlx4_alloc_cmd_mailbox(dev);
846 if (IS_ERR(mailbox))
847 return PTR_ERR(mailbox);
848 context = mailbox->buf;
849 memset(context, 0, sizeof *context);
850
851 for (i = 0; i < MLX4_NUM_UP; i += 2)
852 context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
853
854 in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
855 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
856 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
857
858 mlx4_free_cmd_mailbox(dev, mailbox);
859 return err;
860}
861EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
862
863int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
864 u8 *pg, u16 *ratelimit)
865{
866 struct mlx4_cmd_mailbox *mailbox;
867 struct mlx4_set_port_scheduler_context *context;
868 int err;
869 u32 in_mod;
870 int i;
871
872 mailbox = mlx4_alloc_cmd_mailbox(dev);
873 if (IS_ERR(mailbox))
874 return PTR_ERR(mailbox);
875 context = mailbox->buf;
876 memset(context, 0, sizeof *context);
877
878 for (i = 0; i < MLX4_NUM_TC; i++) {
879 struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
880 u16 r = ratelimit && ratelimit[i] ? ratelimit[i] :
881 MLX4_RATELIMIT_DEFAULT;
882
883 tc->pg = htons(pg[i]);
884 tc->bw_precentage = htons(tc_tx_bw[i]);
885
886 tc->max_bw_units = htons(MLX4_RATELIMIT_UNITS);
887 tc->max_bw_value = htons(r);
888 }
889
890 in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
891 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
892 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
893
894 mlx4_free_cmd_mailbox(dev, mailbox);
895 return err;
896}
897EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
898
837int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, 899int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
838 struct mlx4_vhcr *vhcr, 900 struct mlx4_vhcr *vhcr,
839 struct mlx4_cmd_mailbox *inbox, 901 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f84dd2dc82b6..24fb049ac2f2 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1262,7 +1262,7 @@ static struct platform_driver ks8842_platform_driver = {
1262 .owner = THIS_MODULE, 1262 .owner = THIS_MODULE,
1263 }, 1263 },
1264 .probe = ks8842_probe, 1264 .probe = ks8842_probe,
1265 .remove = ks8842_remove, 1265 .remove = __devexit_p(ks8842_remove),
1266}; 1266};
1267 1267
1268module_platform_driver(ks8842_platform_driver); 1268module_platform_driver(ks8842_platform_driver);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index c722aa607d07..f8dda009d3c0 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -889,16 +889,17 @@ static int ks8851_net_stop(struct net_device *dev)
889 netif_stop_queue(dev); 889 netif_stop_queue(dev);
890 890
891 mutex_lock(&ks->lock); 891 mutex_lock(&ks->lock);
892 /* turn off the IRQs and ack any outstanding */
893 ks8851_wrreg16(ks, KS_IER, 0x0000);
894 ks8851_wrreg16(ks, KS_ISR, 0xffff);
895 mutex_unlock(&ks->lock);
892 896
893 /* stop any outstanding work */ 897 /* stop any outstanding work */
894 flush_work(&ks->irq_work); 898 flush_work(&ks->irq_work);
895 flush_work(&ks->tx_work); 899 flush_work(&ks->tx_work);
896 flush_work(&ks->rxctrl_work); 900 flush_work(&ks->rxctrl_work);
897 901
898 /* turn off the IRQs and ack any outstanding */ 902 mutex_lock(&ks->lock);
899 ks8851_wrreg16(ks, KS_IER, 0x0000);
900 ks8851_wrreg16(ks, KS_ISR, 0xffff);
901
902 /* shutdown RX process */ 903 /* shutdown RX process */
903 ks8851_wrreg16(ks, KS_RXCR1, 0x0000); 904 ks8851_wrreg16(ks, KS_RXCR1, 0x0000);
904 905
@@ -907,6 +908,7 @@ static int ks8851_net_stop(struct net_device *dev)
907 908
908 /* set powermode to soft power down to save power */ 909 /* set powermode to soft power down to save power */
909 ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN); 910 ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
911 mutex_unlock(&ks->lock);
910 912
911 /* ensure any queued tx buffers are dumped */ 913 /* ensure any queued tx buffers are dumped */
912 while (!skb_queue_empty(&ks->txq)) { 914 while (!skb_queue_empty(&ks->txq)) {
@@ -918,7 +920,6 @@ static int ks8851_net_stop(struct net_device *dev)
918 dev_kfree_skb(txb); 920 dev_kfree_skb(txb);
919 } 921 }
920 922
921 mutex_unlock(&ks->lock);
922 return 0; 923 return 0;
923} 924}
924 925
@@ -1418,6 +1419,7 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1418 struct net_device *ndev; 1419 struct net_device *ndev;
1419 struct ks8851_net *ks; 1420 struct ks8851_net *ks;
1420 int ret; 1421 int ret;
1422 unsigned cider;
1421 1423
1422 ndev = alloc_etherdev(sizeof(struct ks8851_net)); 1424 ndev = alloc_etherdev(sizeof(struct ks8851_net));
1423 if (!ndev) 1425 if (!ndev)
@@ -1484,8 +1486,8 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1484 ks8851_soft_reset(ks, GRR_GSR); 1486 ks8851_soft_reset(ks, GRR_GSR);
1485 1487
1486 /* simple check for a valid chip being connected to the bus */ 1488 /* simple check for a valid chip being connected to the bus */
1487 1489 cider = ks8851_rdreg16(ks, KS_CIDER);
1488 if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { 1490 if ((cider & ~CIDER_REV_MASK) != CIDER_ID) {
1489 dev_err(&spi->dev, "failed to read device ID\n"); 1491 dev_err(&spi->dev, "failed to read device ID\n");
1490 ret = -ENODEV; 1492 ret = -ENODEV;
1491 goto err_id; 1493 goto err_id;
@@ -1516,15 +1518,14 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1516 } 1518 }
1517 1519
1518 netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n", 1520 netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
1519 CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)), 1521 CIDER_REV_GET(cider), ndev->dev_addr, ndev->irq,
1520 ndev->dev_addr, ndev->irq,
1521 ks->rc_ccr & CCR_EEPROM ? "has" : "no"); 1522 ks->rc_ccr & CCR_EEPROM ? "has" : "no");
1522 1523
1523 return 0; 1524 return 0;
1524 1525
1525 1526
1526err_netdev: 1527err_netdev:
1527 free_irq(ndev->irq, ndev); 1528 free_irq(ndev->irq, ks);
1528 1529
1529err_id: 1530err_id:
1530err_irq: 1531err_irq:
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index b8104d9f4081..5ffde23ac8fb 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -40,7 +40,7 @@
40#define DRV_NAME "ks8851_mll" 40#define DRV_NAME "ks8851_mll"
41 41
42static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; 42static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
43#define MAX_RECV_FRAMES 32 43#define MAX_RECV_FRAMES 255
44#define MAX_BUF_SIZE 2048 44#define MAX_BUF_SIZE 2048
45#define TX_BUF_SIZE 2000 45#define TX_BUF_SIZE 2000
46#define RX_BUF_SIZE 2000 46#define RX_BUF_SIZE 2000
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ef723b185d85..eaf9ff0262a9 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5675,7 +5675,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
5675 memcpy(hw->override_addr, mac->sa_data, ETH_ALEN); 5675 memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
5676 } 5676 }
5677 5677
5678 memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN); 5678 memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN);
5679 5679
5680 interrupt = hw_block_intr(hw); 5680 interrupt = hw_block_intr(hw);
5681 5681
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 27273ae1a6e6..90153fc983cb 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4033,7 +4033,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4033 4033
4034 netdev->netdev_ops = &myri10ge_netdev_ops; 4034 netdev->netdev_ops = &myri10ge_netdev_ops;
4035 netdev->mtu = myri10ge_initial_mtu; 4035 netdev->mtu = myri10ge_initial_mtu;
4036 netdev->base_addr = mgp->iomem_base;
4037 netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM; 4036 netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM;
4038 netdev->features = netdev->hw_features; 4037 netdev->features = netdev->hw_features;
4039 4038
@@ -4047,12 +4046,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4047 netdev->vlan_features &= ~NETIF_F_TSO; 4046 netdev->vlan_features &= ~NETIF_F_TSO;
4048 4047
4049 /* make sure we can get an irq, and that MSI can be 4048 /* make sure we can get an irq, and that MSI can be
4050 * setup (if available). Also ensure netdev->irq 4049 * setup (if available). */
4051 * is set to correct value if MSI is enabled */
4052 status = myri10ge_request_irq(mgp); 4050 status = myri10ge_request_irq(mgp);
4053 if (status != 0) 4051 if (status != 0)
4054 goto abort_with_firmware; 4052 goto abort_with_firmware;
4055 netdev->irq = pdev->irq;
4056 myri10ge_free_irq(mgp); 4053 myri10ge_free_irq(mgp);
4057 4054
4058 /* Save configuration space to be restored if the 4055 /* Save configuration space to be restored if the
@@ -4077,7 +4074,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4077 else 4074 else
4078 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", 4075 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
4079 mgp->msi_enabled ? "MSI" : "xPIC", 4076 mgp->msi_enabled ? "MSI" : "xPIC",
4080 netdev->irq, mgp->tx_boundary, mgp->fw_name, 4077 pdev->irq, mgp->tx_boundary, mgp->fw_name,
4081 (mgp->wc_enabled ? "Enabled" : "Disabled")); 4078 (mgp->wc_enabled ? "Enabled" : "Disabled"));
4082 4079
4083 board_number++; 4080 board_number++;
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index d38e48d4f430..5b61d12f8b91 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -547,6 +547,7 @@ struct netdev_private {
547 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 547 struct sk_buff *tx_skbuff[TX_RING_SIZE];
548 dma_addr_t tx_dma[TX_RING_SIZE]; 548 dma_addr_t tx_dma[TX_RING_SIZE];
549 struct net_device *dev; 549 struct net_device *dev;
550 void __iomem *ioaddr;
550 struct napi_struct napi; 551 struct napi_struct napi;
551 /* Media monitoring timer */ 552 /* Media monitoring timer */
552 struct timer_list timer; 553 struct timer_list timer;
@@ -699,7 +700,9 @@ static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
699 700
700static inline void __iomem *ns_ioaddr(struct net_device *dev) 701static inline void __iomem *ns_ioaddr(struct net_device *dev)
701{ 702{
702 return (void __iomem *) dev->base_addr; 703 struct netdev_private *np = netdev_priv(dev);
704
705 return np->ioaddr;
703} 706}
704 707
705static inline void natsemi_irq_enable(struct net_device *dev) 708static inline void natsemi_irq_enable(struct net_device *dev)
@@ -863,10 +866,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
863 /* Store MAC Address in perm_addr */ 866 /* Store MAC Address in perm_addr */
864 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); 867 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
865 868
866 dev->base_addr = (unsigned long __force) ioaddr;
867 dev->irq = irq;
868
869 np = netdev_priv(dev); 869 np = netdev_priv(dev);
870 np->ioaddr = ioaddr;
871
870 netif_napi_add(dev, &np->napi, natsemi_poll, 64); 872 netif_napi_add(dev, &np->napi, natsemi_poll, 64);
871 np->dev = dev; 873 np->dev = dev;
872 874
@@ -914,9 +916,6 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
914 } 916 }
915 917
916 option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; 918 option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
917 if (dev->mem_start)
918 option = dev->mem_start;
919
920 /* The lower four bits are the media type. */ 919 /* The lower four bits are the media type. */
921 if (option) { 920 if (option) {
922 if (option & 0x200) 921 if (option & 0x200)
@@ -1532,20 +1531,21 @@ static int netdev_open(struct net_device *dev)
1532{ 1531{
1533 struct netdev_private *np = netdev_priv(dev); 1532 struct netdev_private *np = netdev_priv(dev);
1534 void __iomem * ioaddr = ns_ioaddr(dev); 1533 void __iomem * ioaddr = ns_ioaddr(dev);
1534 const int irq = np->pci_dev->irq;
1535 int i; 1535 int i;
1536 1536
1537 /* Reset the chip, just in case. */ 1537 /* Reset the chip, just in case. */
1538 natsemi_reset(dev); 1538 natsemi_reset(dev);
1539 1539
1540 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); 1540 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
1541 if (i) return i; 1541 if (i) return i;
1542 1542
1543 if (netif_msg_ifup(np)) 1543 if (netif_msg_ifup(np))
1544 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", 1544 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1545 dev->name, dev->irq); 1545 dev->name, irq);
1546 i = alloc_ring(dev); 1546 i = alloc_ring(dev);
1547 if (i < 0) { 1547 if (i < 0) {
1548 free_irq(dev->irq, dev); 1548 free_irq(irq, dev);
1549 return i; 1549 return i;
1550 } 1550 }
1551 napi_enable(&np->napi); 1551 napi_enable(&np->napi);
@@ -1794,6 +1794,7 @@ static void netdev_timer(unsigned long data)
1794 struct netdev_private *np = netdev_priv(dev); 1794 struct netdev_private *np = netdev_priv(dev);
1795 void __iomem * ioaddr = ns_ioaddr(dev); 1795 void __iomem * ioaddr = ns_ioaddr(dev);
1796 int next_tick = NATSEMI_TIMER_FREQ; 1796 int next_tick = NATSEMI_TIMER_FREQ;
1797 const int irq = np->pci_dev->irq;
1797 1798
1798 if (netif_msg_timer(np)) { 1799 if (netif_msg_timer(np)) {
1799 /* DO NOT read the IntrStatus register, 1800 /* DO NOT read the IntrStatus register,
@@ -1817,14 +1818,14 @@ static void netdev_timer(unsigned long data)
1817 if (netif_msg_drv(np)) 1818 if (netif_msg_drv(np))
1818 printk(KERN_NOTICE "%s: possible phy reset: " 1819 printk(KERN_NOTICE "%s: possible phy reset: "
1819 "re-initializing\n", dev->name); 1820 "re-initializing\n", dev->name);
1820 disable_irq(dev->irq); 1821 disable_irq(irq);
1821 spin_lock_irq(&np->lock); 1822 spin_lock_irq(&np->lock);
1822 natsemi_stop_rxtx(dev); 1823 natsemi_stop_rxtx(dev);
1823 dump_ring(dev); 1824 dump_ring(dev);
1824 reinit_ring(dev); 1825 reinit_ring(dev);
1825 init_registers(dev); 1826 init_registers(dev);
1826 spin_unlock_irq(&np->lock); 1827 spin_unlock_irq(&np->lock);
1827 enable_irq(dev->irq); 1828 enable_irq(irq);
1828 } else { 1829 } else {
1829 /* hurry back */ 1830 /* hurry back */
1830 next_tick = HZ; 1831 next_tick = HZ;
@@ -1841,10 +1842,10 @@ static void netdev_timer(unsigned long data)
1841 spin_unlock_irq(&np->lock); 1842 spin_unlock_irq(&np->lock);
1842 } 1843 }
1843 if (np->oom) { 1844 if (np->oom) {
1844 disable_irq(dev->irq); 1845 disable_irq(irq);
1845 np->oom = 0; 1846 np->oom = 0;
1846 refill_rx(dev); 1847 refill_rx(dev);
1847 enable_irq(dev->irq); 1848 enable_irq(irq);
1848 if (!np->oom) { 1849 if (!np->oom) {
1849 writel(RxOn, ioaddr + ChipCmd); 1850 writel(RxOn, ioaddr + ChipCmd);
1850 } else { 1851 } else {
@@ -1885,8 +1886,9 @@ static void ns_tx_timeout(struct net_device *dev)
1885{ 1886{
1886 struct netdev_private *np = netdev_priv(dev); 1887 struct netdev_private *np = netdev_priv(dev);
1887 void __iomem * ioaddr = ns_ioaddr(dev); 1888 void __iomem * ioaddr = ns_ioaddr(dev);
1889 const int irq = np->pci_dev->irq;
1888 1890
1889 disable_irq(dev->irq); 1891 disable_irq(irq);
1890 spin_lock_irq(&np->lock); 1892 spin_lock_irq(&np->lock);
1891 if (!np->hands_off) { 1893 if (!np->hands_off) {
1892 if (netif_msg_tx_err(np)) 1894 if (netif_msg_tx_err(np))
@@ -1905,7 +1907,7 @@ static void ns_tx_timeout(struct net_device *dev)
1905 dev->name); 1907 dev->name);
1906 } 1908 }
1907 spin_unlock_irq(&np->lock); 1909 spin_unlock_irq(&np->lock);
1908 enable_irq(dev->irq); 1910 enable_irq(irq);
1909 1911
1910 dev->trans_start = jiffies; /* prevent tx timeout */ 1912 dev->trans_start = jiffies; /* prevent tx timeout */
1911 dev->stats.tx_errors++; 1913 dev->stats.tx_errors++;
@@ -2470,9 +2472,12 @@ static struct net_device_stats *get_stats(struct net_device *dev)
2470#ifdef CONFIG_NET_POLL_CONTROLLER 2472#ifdef CONFIG_NET_POLL_CONTROLLER
2471static void natsemi_poll_controller(struct net_device *dev) 2473static void natsemi_poll_controller(struct net_device *dev)
2472{ 2474{
2473 disable_irq(dev->irq); 2475 struct netdev_private *np = netdev_priv(dev);
2474 intr_handler(dev->irq, dev); 2476 const int irq = np->pci_dev->irq;
2475 enable_irq(dev->irq); 2477
2478 disable_irq(irq);
2479 intr_handler(irq, dev);
2480 enable_irq(irq);
2476} 2481}
2477#endif 2482#endif
2478 2483
@@ -2523,8 +2528,9 @@ static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2523 if (netif_running(dev)) { 2528 if (netif_running(dev)) {
2524 struct netdev_private *np = netdev_priv(dev); 2529 struct netdev_private *np = netdev_priv(dev);
2525 void __iomem * ioaddr = ns_ioaddr(dev); 2530 void __iomem * ioaddr = ns_ioaddr(dev);
2531 const int irq = np->pci_dev->irq;
2526 2532
2527 disable_irq(dev->irq); 2533 disable_irq(irq);
2528 spin_lock(&np->lock); 2534 spin_lock(&np->lock);
2529 /* stop engines */ 2535 /* stop engines */
2530 natsemi_stop_rxtx(dev); 2536 natsemi_stop_rxtx(dev);
@@ -2537,7 +2543,7 @@ static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2537 /* restart engines */ 2543 /* restart engines */
2538 writel(RxOn | TxOn, ioaddr + ChipCmd); 2544 writel(RxOn | TxOn, ioaddr + ChipCmd);
2539 spin_unlock(&np->lock); 2545 spin_unlock(&np->lock);
2540 enable_irq(dev->irq); 2546 enable_irq(irq);
2541 } 2547 }
2542 return 0; 2548 return 0;
2543} 2549}
@@ -3135,6 +3141,7 @@ static int netdev_close(struct net_device *dev)
3135{ 3141{
3136 void __iomem * ioaddr = ns_ioaddr(dev); 3142 void __iomem * ioaddr = ns_ioaddr(dev);
3137 struct netdev_private *np = netdev_priv(dev); 3143 struct netdev_private *np = netdev_priv(dev);
3144 const int irq = np->pci_dev->irq;
3138 3145
3139 if (netif_msg_ifdown(np)) 3146 if (netif_msg_ifdown(np))
3140 printk(KERN_DEBUG 3147 printk(KERN_DEBUG
@@ -3156,14 +3163,14 @@ static int netdev_close(struct net_device *dev)
3156 */ 3163 */
3157 3164
3158 del_timer_sync(&np->timer); 3165 del_timer_sync(&np->timer);
3159 disable_irq(dev->irq); 3166 disable_irq(irq);
3160 spin_lock_irq(&np->lock); 3167 spin_lock_irq(&np->lock);
3161 natsemi_irq_disable(dev); 3168 natsemi_irq_disable(dev);
3162 np->hands_off = 1; 3169 np->hands_off = 1;
3163 spin_unlock_irq(&np->lock); 3170 spin_unlock_irq(&np->lock);
3164 enable_irq(dev->irq); 3171 enable_irq(irq);
3165 3172
3166 free_irq(dev->irq, dev); 3173 free_irq(irq, dev);
3167 3174
3168 /* Interrupt disabled, interrupt handler released, 3175 /* Interrupt disabled, interrupt handler released,
3169 * queue stopped, timer deleted, rtnl_lock held 3176 * queue stopped, timer deleted, rtnl_lock held
@@ -3256,9 +3263,11 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
3256 3263
3257 rtnl_lock(); 3264 rtnl_lock();
3258 if (netif_running (dev)) { 3265 if (netif_running (dev)) {
3266 const int irq = np->pci_dev->irq;
3267
3259 del_timer_sync(&np->timer); 3268 del_timer_sync(&np->timer);
3260 3269
3261 disable_irq(dev->irq); 3270 disable_irq(irq);
3262 spin_lock_irq(&np->lock); 3271 spin_lock_irq(&np->lock);
3263 3272
3264 natsemi_irq_disable(dev); 3273 natsemi_irq_disable(dev);
@@ -3267,7 +3276,7 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
3267 netif_stop_queue(dev); 3276 netif_stop_queue(dev);
3268 3277
3269 spin_unlock_irq(&np->lock); 3278 spin_unlock_irq(&np->lock);
3270 enable_irq(dev->irq); 3279 enable_irq(irq);
3271 3280
3272 napi_disable(&np->napi); 3281 napi_disable(&np->napi);
3273 3282
@@ -3307,6 +3316,8 @@ static int natsemi_resume (struct pci_dev *pdev)
3307 if (netif_device_present(dev)) 3316 if (netif_device_present(dev))
3308 goto out; 3317 goto out;
3309 if (netif_running(dev)) { 3318 if (netif_running(dev)) {
3319 const int irq = np->pci_dev->irq;
3320
3310 BUG_ON(!np->hands_off); 3321 BUG_ON(!np->hands_off);
3311 ret = pci_enable_device(pdev); 3322 ret = pci_enable_device(pdev);
3312 if (ret < 0) { 3323 if (ret < 0) {
@@ -3320,13 +3331,13 @@ static int natsemi_resume (struct pci_dev *pdev)
3320 3331
3321 natsemi_reset(dev); 3332 natsemi_reset(dev);
3322 init_ring(dev); 3333 init_ring(dev);
3323 disable_irq(dev->irq); 3334 disable_irq(irq);
3324 spin_lock_irq(&np->lock); 3335 spin_lock_irq(&np->lock);
3325 np->hands_off = 0; 3336 np->hands_off = 0;
3326 init_registers(dev); 3337 init_registers(dev);
3327 netif_device_attach(dev); 3338 netif_device_attach(dev);
3328 spin_unlock_irq(&np->lock); 3339 spin_unlock_irq(&np->lock);
3329 enable_irq(dev->irq); 3340 enable_irq(irq);
3330 3341
3331 mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ)); 3342 mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ));
3332 } 3343 }
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 6338ef8606ae..bb367582c1e8 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2846,6 +2846,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
2846static void s2io_netpoll(struct net_device *dev) 2846static void s2io_netpoll(struct net_device *dev)
2847{ 2847{
2848 struct s2io_nic *nic = netdev_priv(dev); 2848 struct s2io_nic *nic = netdev_priv(dev);
2849 const int irq = nic->pdev->irq;
2849 struct XENA_dev_config __iomem *bar0 = nic->bar0; 2850 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2850 u64 val64 = 0xFFFFFFFFFFFFFFFFULL; 2851 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2851 int i; 2852 int i;
@@ -2855,7 +2856,7 @@ static void s2io_netpoll(struct net_device *dev)
2855 if (pci_channel_offline(nic->pdev)) 2856 if (pci_channel_offline(nic->pdev))
2856 return; 2857 return;
2857 2858
2858 disable_irq(dev->irq); 2859 disable_irq(irq);
2859 2860
2860 writeq(val64, &bar0->rx_traffic_int); 2861 writeq(val64, &bar0->rx_traffic_int);
2861 writeq(val64, &bar0->tx_traffic_int); 2862 writeq(val64, &bar0->tx_traffic_int);
@@ -2884,7 +2885,7 @@ static void s2io_netpoll(struct net_device *dev)
2884 break; 2885 break;
2885 } 2886 }
2886 } 2887 }
2887 enable_irq(dev->irq); 2888 enable_irq(irq);
2888} 2889}
2889#endif 2890#endif
2890 2891
@@ -3897,9 +3898,7 @@ static void remove_msix_isr(struct s2io_nic *sp)
3897 3898
3898static void remove_inta_isr(struct s2io_nic *sp) 3899static void remove_inta_isr(struct s2io_nic *sp)
3899{ 3900{
3900 struct net_device *dev = sp->dev; 3901 free_irq(sp->pdev->irq, sp->dev);
3901
3902 free_irq(sp->pdev->irq, dev);
3903} 3902}
3904 3903
3905/* ********************************************************* * 3904/* ********************************************************* *
@@ -7046,7 +7045,7 @@ static int s2io_add_isr(struct s2io_nic *sp)
7046 } 7045 }
7047 } 7046 }
7048 if (sp->config.intr_type == INTA) { 7047 if (sp->config.intr_type == INTA) {
7049 err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED, 7048 err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
7050 sp->name, dev); 7049 sp->name, dev);
7051 if (err) { 7050 if (err) {
7052 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", 7051 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
@@ -7908,9 +7907,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7908 goto bar1_remap_failed; 7907 goto bar1_remap_failed;
7909 } 7908 }
7910 7909
7911 dev->irq = pdev->irq;
7912 dev->base_addr = (unsigned long)sp->bar0;
7913
7914 /* Initializing the BAR1 address as the start of the FIFO pointer. */ 7910 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7915 for (j = 0; j < MAX_TX_FIFOS; j++) { 7911 for (j = 0; j < MAX_TX_FIFOS; j++) {
7916 mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000); 7912 mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index ef76725454d2..51387c31914b 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1882,25 +1882,24 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1882 */ 1882 */
1883static void vxge_netpoll(struct net_device *dev) 1883static void vxge_netpoll(struct net_device *dev)
1884{ 1884{
1885 struct __vxge_hw_device *hldev; 1885 struct vxgedev *vdev = netdev_priv(dev);
1886 struct vxgedev *vdev; 1886 struct pci_dev *pdev = vdev->pdev;
1887 1887 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
1888 vdev = netdev_priv(dev); 1888 const int irq = pdev->irq;
1889 hldev = pci_get_drvdata(vdev->pdev);
1890 1889
1891 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1890 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1892 1891
1893 if (pci_channel_offline(vdev->pdev)) 1892 if (pci_channel_offline(pdev))
1894 return; 1893 return;
1895 1894
1896 disable_irq(dev->irq); 1895 disable_irq(irq);
1897 vxge_hw_device_clear_tx_rx(hldev); 1896 vxge_hw_device_clear_tx_rx(hldev);
1898 1897
1899 vxge_hw_device_clear_tx_rx(hldev); 1898 vxge_hw_device_clear_tx_rx(hldev);
1900 VXGE_COMPLETE_ALL_RX(vdev); 1899 VXGE_COMPLETE_ALL_RX(vdev);
1901 VXGE_COMPLETE_ALL_TX(vdev); 1900 VXGE_COMPLETE_ALL_TX(vdev);
1902 1901
1903 enable_irq(dev->irq); 1902 enable_irq(irq);
1904 1903
1905 vxge_debug_entryexit(VXGE_TRACE, 1904 vxge_debug_entryexit(VXGE_TRACE,
1906 "%s:%d Exiting...", __func__, __LINE__); 1905 "%s:%d Exiting...", __func__, __LINE__);
@@ -2860,12 +2859,12 @@ static int vxge_open(struct net_device *dev)
2860 vdev->config.rx_pause_enable); 2859 vdev->config.rx_pause_enable);
2861 2860
2862 if (vdev->vp_reset_timer.function == NULL) 2861 if (vdev->vp_reset_timer.function == NULL)
2863 vxge_os_timer(vdev->vp_reset_timer, 2862 vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset, vdev,
2864 vxge_poll_vp_reset, vdev, (HZ/2)); 2863 HZ / 2);
2865 2864
2866 /* There is no need to check for RxD leak and RxD lookup on Titan1A */ 2865 /* There is no need to check for RxD leak and RxD lookup on Titan1A */
2867 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL) 2866 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2868 vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev, 2867 vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
2869 HZ / 2); 2868 HZ / 2);
2870 2869
2871 set_bit(__VXGE_STATE_CARD_UP, &vdev->state); 2870 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -3424,9 +3423,6 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3424 ndev->features |= ndev->hw_features | 3423 ndev->features |= ndev->hw_features |
3425 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; 3424 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3426 3425
3427 /* Driver entry points */
3428 ndev->irq = vdev->pdev->irq;
3429 ndev->base_addr = (unsigned long) hldev->bar0;
3430 3426
3431 ndev->netdev_ops = &vxge_netdev_ops; 3427 ndev->netdev_ops = &vxge_netdev_ops;
3432 3428
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index f52a42d1dbb7..35f3e7552ec2 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -416,12 +416,15 @@ struct vxge_tx_priv {
416 static int p = val; \ 416 static int p = val; \
417 module_param(p, int, 0) 417 module_param(p, int, 0)
418 418
419#define vxge_os_timer(timer, handle, arg, exp) do { \ 419static inline
420 init_timer(&timer); \ 420void vxge_os_timer(struct timer_list *timer, void (*func)(unsigned long data),
421 timer.function = handle; \ 421 struct vxgedev *vdev, unsigned long timeout)
422 timer.data = (unsigned long) arg; \ 422{
423 mod_timer(&timer, (jiffies + exp)); \ 423 init_timer(timer);
424 } while (0); 424 timer->function = func;
425 timer->data = (unsigned long)vdev;
426 mod_timer(timer, jiffies + timeout);
427}
425 428
426void vxge_initialize_ethtool_ops(struct net_device *ndev); 429void vxge_initialize_ethtool_ops(struct net_device *ndev);
427enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); 430enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index aca13046e432..d93a088debc3 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3942,13 +3942,11 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3942 ret = pci_enable_msi(np->pci_dev); 3942 ret = pci_enable_msi(np->pci_dev);
3943 if (ret == 0) { 3943 if (ret == 0) {
3944 np->msi_flags |= NV_MSI_ENABLED; 3944 np->msi_flags |= NV_MSI_ENABLED;
3945 dev->irq = np->pci_dev->irq;
3946 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 3945 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3947 netdev_info(dev, "request_irq failed %d\n", 3946 netdev_info(dev, "request_irq failed %d\n",
3948 ret); 3947 ret);
3949 pci_disable_msi(np->pci_dev); 3948 pci_disable_msi(np->pci_dev);
3950 np->msi_flags &= ~NV_MSI_ENABLED; 3949 np->msi_flags &= ~NV_MSI_ENABLED;
3951 dev->irq = np->pci_dev->irq;
3952 goto out_err; 3950 goto out_err;
3953 } 3951 }
3954 3952
@@ -5649,9 +5647,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5649 np->base = ioremap(addr, np->register_size); 5647 np->base = ioremap(addr, np->register_size);
5650 if (!np->base) 5648 if (!np->base)
5651 goto out_relreg; 5649 goto out_relreg;
5652 dev->base_addr = (unsigned long)np->base;
5653
5654 dev->irq = pci_dev->irq;
5655 5650
5656 np->rx_ring_size = RX_RING_DEFAULT; 5651 np->rx_ring_size = RX_RING_DEFAULT;
5657 np->tx_ring_size = TX_RING_DEFAULT; 5652 np->tx_ring_size = TX_RING_DEFAULT;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 6dfc26d85e47..d3469d8e3f0d 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -990,10 +990,10 @@ static int __lpc_handle_recv(struct net_device *ndev, int budget)
990 ndev->stats.rx_errors++; 990 ndev->stats.rx_errors++;
991 } else { 991 } else {
992 /* Packet is good */ 992 /* Packet is good */
993 skb = dev_alloc_skb(len + 8); 993 skb = dev_alloc_skb(len);
994 if (!skb) 994 if (!skb) {
995 ndev->stats.rx_dropped++; 995 ndev->stats.rx_dropped++;
996 else { 996 } else {
997 prdbuf = skb_put(skb, len); 997 prdbuf = skb_put(skb, len);
998 998
999 /* Copy packet from buffer */ 999 /* Copy packet from buffer */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index dd14915f54bb..9f3dbc4feadc 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -660,6 +660,7 @@ extern u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
660extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev); 660extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
661extern u64 pch_rx_snap_read(struct pci_dev *pdev); 661extern u64 pch_rx_snap_read(struct pci_dev *pdev);
662extern u64 pch_tx_snap_read(struct pci_dev *pdev); 662extern u64 pch_tx_snap_read(struct pci_dev *pdev);
663extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
663#endif 664#endif
664 665
665/* pch_gbe_param.c */ 666/* pch_gbe_param.c */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 8035e5ff6e06..89c6bcf4bca2 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -101,18 +101,19 @@ const char pch_driver_version[] = DRV_VERSION;
101 101
102#ifdef CONFIG_PCH_PTP 102#ifdef CONFIG_PCH_PTP
103/* Macros for ieee1588 */ 103/* Macros for ieee1588 */
104#define TICKS_NS_SHIFT 5
105
106/* 0x40 Time Synchronization Channel Control Register Bits */ 104/* 0x40 Time Synchronization Channel Control Register Bits */
107#define MASTER_MODE (1<<0) 105#define MASTER_MODE (1<<0)
108#define SLAVE_MODE (0<<0) 106#define SLAVE_MODE (0)
109#define V2_MODE (1<<31) 107#define V2_MODE (1<<31)
110#define CAP_MODE0 (0<<16) 108#define CAP_MODE0 (0)
111#define CAP_MODE2 (1<<17) 109#define CAP_MODE2 (1<<17)
112 110
113/* 0x44 Time Synchronization Channel Event Register Bits */ 111/* 0x44 Time Synchronization Channel Event Register Bits */
114#define TX_SNAPSHOT_LOCKED (1<<0) 112#define TX_SNAPSHOT_LOCKED (1<<0)
115#define RX_SNAPSHOT_LOCKED (1<<1) 113#define RX_SNAPSHOT_LOCKED (1<<1)
114
115#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
116#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
116#endif 117#endif
117 118
118static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; 119static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
@@ -133,10 +134,8 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
133 u16 *hi, *id; 134 u16 *hi, *id;
134 u32 lo; 135 u32 lo;
135 136
136 if ((sk_run_filter(skb, ptp_filter) != PTP_CLASS_V2_IPV4) && 137 if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE)
137 (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)) {
138 return 0; 138 return 0;
139 }
140 139
141 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 140 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
142 141
@@ -153,8 +152,8 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
153 seqid == *id); 152 seqid == *id);
154} 153}
155 154
156static void pch_rx_timestamp( 155static void
157 struct pch_gbe_adapter *adapter, struct sk_buff *skb) 156pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
158{ 157{
159 struct skb_shared_hwtstamps *shhwtstamps; 158 struct skb_shared_hwtstamps *shhwtstamps;
160 struct pci_dev *pdev; 159 struct pci_dev *pdev;
@@ -183,7 +182,6 @@ static void pch_rx_timestamp(
183 goto out; 182 goto out;
184 183
185 ns = pch_rx_snap_read(pdev); 184 ns = pch_rx_snap_read(pdev);
186 ns <<= TICKS_NS_SHIFT;
187 185
188 shhwtstamps = skb_hwtstamps(skb); 186 shhwtstamps = skb_hwtstamps(skb);
189 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 187 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
@@ -192,8 +190,8 @@ out:
192 pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED); 190 pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
193} 191}
194 192
195static void pch_tx_timestamp( 193static void
196 struct pch_gbe_adapter *adapter, struct sk_buff *skb) 194pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
197{ 195{
198 struct skb_shared_hwtstamps shhwtstamps; 196 struct skb_shared_hwtstamps shhwtstamps;
199 struct pci_dev *pdev; 197 struct pci_dev *pdev;
@@ -202,17 +200,16 @@ static void pch_tx_timestamp(
202 u32 cnt, val; 200 u32 cnt, val;
203 201
204 shtx = skb_shinfo(skb); 202 shtx = skb_shinfo(skb);
205 if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)) 203 if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
206 shtx->tx_flags |= SKBTX_IN_PROGRESS;
207 else
208 return; 204 return;
209 205
206 shtx->tx_flags |= SKBTX_IN_PROGRESS;
207
210 /* Get ieee1588's dev information */ 208 /* Get ieee1588's dev information */
211 pdev = adapter->ptp_pdev; 209 pdev = adapter->ptp_pdev;
212 210
213 /* 211 /*
214 * This really stinks, but we have to poll for the Tx time stamp. 212 * This really stinks, but we have to poll for the Tx time stamp.
215 * Usually, the time stamp is ready after 4 to 6 microseconds.
216 */ 213 */
217 for (cnt = 0; cnt < 100; cnt++) { 214 for (cnt = 0; cnt < 100; cnt++) {
218 val = pch_ch_event_read(pdev); 215 val = pch_ch_event_read(pdev);
@@ -226,7 +223,6 @@ static void pch_tx_timestamp(
226 } 223 }
227 224
228 ns = pch_tx_snap_read(pdev); 225 ns = pch_tx_snap_read(pdev);
229 ns <<= TICKS_NS_SHIFT;
230 226
231 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 227 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
232 shhwtstamps.hwtstamp = ns_to_ktime(ns); 228 shhwtstamps.hwtstamp = ns_to_ktime(ns);
@@ -240,6 +236,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
240 struct hwtstamp_config cfg; 236 struct hwtstamp_config cfg;
241 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 237 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
242 struct pci_dev *pdev; 238 struct pci_dev *pdev;
239 u8 station[20];
243 240
244 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 241 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
245 return -EFAULT; 242 return -EFAULT;
@@ -267,15 +264,23 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
267 break; 264 break;
268 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 265 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
269 adapter->hwts_rx_en = 0; 266 adapter->hwts_rx_en = 0;
270 pch_ch_control_write(pdev, (SLAVE_MODE | CAP_MODE0)); 267 pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
271 break; 268 break;
272 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 269 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
273 adapter->hwts_rx_en = 1; 270 adapter->hwts_rx_en = 1;
274 pch_ch_control_write(pdev, (MASTER_MODE | CAP_MODE0)); 271 pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
275 break; 272 break;
276 case HWTSTAMP_FILTER_PTP_V2_EVENT: 273 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
277 adapter->hwts_rx_en = 1; 274 adapter->hwts_rx_en = 1;
278 pch_ch_control_write(pdev, (V2_MODE | CAP_MODE2)); 275 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
276 strcpy(station, PTP_L4_MULTICAST_SA);
277 pch_set_station_address(station, pdev);
278 break;
279 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
280 adapter->hwts_rx_en = 1;
281 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
282 strcpy(station, PTP_L2_MULTICAST_SA);
283 pch_set_station_address(station, pdev);
279 break; 284 break;
280 default: 285 default:
281 return -ERANGE; 286 return -ERANGE;
@@ -387,31 +392,85 @@ static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
387} 392}
388 393
389/** 394/**
395 * pch_gbe_mac_save_mac_addr_regs - Save MAC addresse registers
396 * @hw: Pointer to the HW structure
397 * @addr: Pointer to the MAC address
398 * @index: MAC address array register
399 */
400static void
401pch_gbe_mac_save_mac_addr_regs(struct pch_gbe_hw *hw,
402 struct pch_gbe_regs_mac_adr *mac_adr, u32 index)
403{
404 mac_adr->high = ioread32(&hw->reg->mac_adr[index].high);
405 mac_adr->low = ioread32(&hw->reg->mac_adr[index].low);
406}
407
408/**
409 * pch_gbe_mac_store_mac_addr_regs - Store MAC addresse registers
410 * @hw: Pointer to the HW structure
411 * @addr: Pointer to the MAC address
412 * @index: MAC address array register
413 */
414static void
415pch_gbe_mac_store_mac_addr_regs(struct pch_gbe_hw *hw,
416 struct pch_gbe_regs_mac_adr *mac_adr, u32 index)
417{
418 u32 adrmask;
419
420 adrmask = ioread32(&hw->reg->ADDR_MASK);
421 iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
422 /* wait busy */
423 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
424 /* Set the MAC address to the MAC address xA/xB register */
425 iowrite32(mac_adr->high, &hw->reg->mac_adr[index].high);
426 iowrite32(mac_adr->low, &hw->reg->mac_adr[index].low);
427 iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
428 /* wait busy */
429 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
430}
431
432#define MAC_ADDR_LIST_NUM 16
433/**
390 * pch_gbe_mac_reset_hw - Reset hardware 434 * pch_gbe_mac_reset_hw - Reset hardware
391 * @hw: Pointer to the HW structure 435 * @hw: Pointer to the HW structure
392 */ 436 */
393static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw) 437static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
394{ 438{
439 struct pch_gbe_regs_mac_adr mac_addr_list[MAC_ADDR_LIST_NUM];
440 int i;
441
395 /* Read the MAC address. and store to the private data */ 442 /* Read the MAC address. and store to the private data */
396 pch_gbe_mac_read_mac_addr(hw); 443 pch_gbe_mac_read_mac_addr(hw);
444 /* Read other MAC addresses */
445 for (i = 1; i < MAC_ADDR_LIST_NUM; i++)
446 pch_gbe_mac_save_mac_addr_regs(hw, &mac_addr_list[i], i);
397 iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET); 447 iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
398#ifdef PCH_GBE_MAC_IFOP_RGMII 448#ifdef PCH_GBE_MAC_IFOP_RGMII
399 iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE); 449 iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
400#endif 450#endif
401 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST); 451 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
402 /* Setup the receive address */ 452 /* Setup the receive addresses */
403 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); 453 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
454 for (i = 1; i < MAC_ADDR_LIST_NUM; i++)
455 pch_gbe_mac_store_mac_addr_regs(hw, &mac_addr_list[i], i);
404 return; 456 return;
405} 457}
406 458
407static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) 459static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
408{ 460{
409 /* Read the MAC address. and store to the private data */ 461 struct pch_gbe_regs_mac_adr mac_addr_list[MAC_ADDR_LIST_NUM];
462 int i;
463
464 /* Read the MAC addresses. and store to the private data */
410 pch_gbe_mac_read_mac_addr(hw); 465 pch_gbe_mac_read_mac_addr(hw);
466 for (i = 1; i < MAC_ADDR_LIST_NUM; i++)
467 pch_gbe_mac_save_mac_addr_regs(hw, &mac_addr_list[i], i);
411 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); 468 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
412 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); 469 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
413 /* Setup the MAC address */ 470 /* Setup the MAC addresses */
414 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); 471 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
472 for (i = 1; i < MAC_ADDR_LIST_NUM; i++)
473 pch_gbe_mac_store_mac_addr_regs(hw, &mac_addr_list[i], i);
415 return; 474 return;
416} 475}
417 476
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 0d29f5f4b8e4..c2367158350e 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -683,8 +683,6 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev,
683 } 683 }
684 684
685 hmp->base = ioaddr; 685 hmp->base = ioaddr;
686 dev->base_addr = (unsigned long)ioaddr;
687 dev->irq = irq;
688 pci_set_drvdata(pdev, dev); 686 pci_set_drvdata(pdev, dev);
689 687
690 hmp->chip_id = chip_id; 688 hmp->chip_id = chip_id;
@@ -859,14 +857,11 @@ static int hamachi_open(struct net_device *dev)
859 u32 rx_int_var, tx_int_var; 857 u32 rx_int_var, tx_int_var;
860 u16 fifo_info; 858 u16 fifo_info;
861 859
862 i = request_irq(dev->irq, hamachi_interrupt, IRQF_SHARED, dev->name, dev); 860 i = request_irq(hmp->pci_dev->irq, hamachi_interrupt, IRQF_SHARED,
861 dev->name, dev);
863 if (i) 862 if (i)
864 return i; 863 return i;
865 864
866 if (hamachi_debug > 1)
867 printk(KERN_DEBUG "%s: hamachi_open() irq %d.\n",
868 dev->name, dev->irq);
869
870 hamachi_init_ring(dev); 865 hamachi_init_ring(dev);
871 866
872#if ADDRLEN == 64 867#if ADDRLEN == 64
@@ -1705,7 +1700,7 @@ static int hamachi_close(struct net_device *dev)
1705 } 1700 }
1706#endif /* __i386__ debugging only */ 1701#endif /* __i386__ debugging only */
1707 1702
1708 free_irq(dev->irq, dev); 1703 free_irq(hmp->pci_dev->irq, dev);
1709 1704
1710 del_timer_sync(&hmp->timer); 1705 del_timer_sync(&hmp->timer);
1711 1706
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 7757b80ef924..04e622fd468d 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -427,9 +427,6 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
427 /* Reset the chip. */ 427 /* Reset the chip. */
428 iowrite32(0x80000000, ioaddr + DMACtrl); 428 iowrite32(0x80000000, ioaddr + DMACtrl);
429 429
430 dev->base_addr = (unsigned long)ioaddr;
431 dev->irq = irq;
432
433 pci_set_drvdata(pdev, dev); 430 pci_set_drvdata(pdev, dev);
434 spin_lock_init(&np->lock); 431 spin_lock_init(&np->lock);
435 432
@@ -569,25 +566,20 @@ static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value
569static int yellowfin_open(struct net_device *dev) 566static int yellowfin_open(struct net_device *dev)
570{ 567{
571 struct yellowfin_private *yp = netdev_priv(dev); 568 struct yellowfin_private *yp = netdev_priv(dev);
569 const int irq = yp->pci_dev->irq;
572 void __iomem *ioaddr = yp->base; 570 void __iomem *ioaddr = yp->base;
573 int i, ret; 571 int i, rc;
574 572
575 /* Reset the chip. */ 573 /* Reset the chip. */
576 iowrite32(0x80000000, ioaddr + DMACtrl); 574 iowrite32(0x80000000, ioaddr + DMACtrl);
577 575
578 ret = request_irq(dev->irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev); 576 rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
579 if (ret) 577 if (rc)
580 return ret; 578 return rc;
581
582 if (yellowfin_debug > 1)
583 netdev_printk(KERN_DEBUG, dev, "%s() irq %d\n",
584 __func__, dev->irq);
585 579
586 ret = yellowfin_init_ring(dev); 580 rc = yellowfin_init_ring(dev);
587 if (ret) { 581 if (rc < 0)
588 free_irq(dev->irq, dev); 582 goto err_free_irq;
589 return ret;
590 }
591 583
592 iowrite32(yp->rx_ring_dma, ioaddr + RxPtr); 584 iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
593 iowrite32(yp->tx_ring_dma, ioaddr + TxPtr); 585 iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
@@ -647,8 +639,12 @@ static int yellowfin_open(struct net_device *dev)
647 yp->timer.data = (unsigned long)dev; 639 yp->timer.data = (unsigned long)dev;
648 yp->timer.function = yellowfin_timer; /* timer handler */ 640 yp->timer.function = yellowfin_timer; /* timer handler */
649 add_timer(&yp->timer); 641 add_timer(&yp->timer);
642out:
643 return rc;
650 644
651 return 0; 645err_free_irq:
646 free_irq(irq, dev);
647 goto out;
652} 648}
653 649
654static void yellowfin_timer(unsigned long data) 650static void yellowfin_timer(unsigned long data)
@@ -1251,7 +1247,7 @@ static int yellowfin_close(struct net_device *dev)
1251 } 1247 }
1252#endif /* __i386__ debugging only */ 1248#endif /* __i386__ debugging only */
1253 1249
1254 free_irq(dev->irq, dev); 1250 free_irq(yp->pci_dev->irq, dev);
1255 1251
1256 /* Free all the skbuffs in the Rx queue. */ 1252 /* Free all the skbuffs in the Rx queue. */
1257 for (i = 0; i < RX_RING_SIZE; i++) { 1253 for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 49343ec21c82..09d8d33171df 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3845,7 +3845,7 @@ static int ql_wol(struct ql_adapter *qdev)
3845 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | 3845 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3846 WAKE_MCAST | WAKE_BCAST)) { 3846 WAKE_MCAST | WAKE_BCAST)) {
3847 netif_err(qdev, ifdown, qdev->ndev, 3847 netif_err(qdev, ifdown, qdev->ndev,
3848 "Unsupported WOL paramter. qdev->wol = 0x%x.\n", 3848 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3849 qdev->wol); 3849 qdev->wol);
3850 return -EINVAL; 3850 return -EINVAL;
3851 } 3851 }
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index b96e1920e045..4de73643fec6 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw> 4 * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw>
5 * Copyright (C) 2007 5 * Copyright (C) 2007
6 * Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us> 6 * Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
7 * Florian Fainelli <florian@openwrt.org> 7 * Copyright (C) 2007-2012 Florian Fainelli <florian@openwrt.org>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
@@ -74,9 +74,13 @@
74#define MT_ICR 0x0C /* TX interrupt control */ 74#define MT_ICR 0x0C /* TX interrupt control */
75#define MR_ICR 0x10 /* RX interrupt control */ 75#define MR_ICR 0x10 /* RX interrupt control */
76#define MTPR 0x14 /* TX poll command register */ 76#define MTPR 0x14 /* TX poll command register */
77#define TM2TX 0x0001 /* Trigger MAC to transmit */
77#define MR_BSR 0x18 /* RX buffer size */ 78#define MR_BSR 0x18 /* RX buffer size */
78#define MR_DCR 0x1A /* RX descriptor control */ 79#define MR_DCR 0x1A /* RX descriptor control */
79#define MLSR 0x1C /* Last status */ 80#define MLSR 0x1C /* Last status */
81#define TX_FIFO_UNDR 0x0200 /* TX FIFO under-run */
82#define TX_EXCEEDC 0x2000 /* Transmit exceed collision */
83#define TX_LATEC 0x4000 /* Transmit late collision */
80#define MMDIO 0x20 /* MDIO control register */ 84#define MMDIO 0x20 /* MDIO control register */
81#define MDIO_WRITE 0x4000 /* MDIO write */ 85#define MDIO_WRITE 0x4000 /* MDIO write */
82#define MDIO_READ 0x2000 /* MDIO read */ 86#define MDIO_READ 0x2000 /* MDIO read */
@@ -124,6 +128,9 @@
124#define MID_3M 0x82 /* MID3 Medium */ 128#define MID_3M 0x82 /* MID3 Medium */
125#define MID_3H 0x84 /* MID3 High */ 129#define MID_3H 0x84 /* MID3 High */
126#define PHY_CC 0x88 /* PHY status change configuration register */ 130#define PHY_CC 0x88 /* PHY status change configuration register */
131#define SCEN 0x8000 /* PHY status change enable */
132#define PHYAD_SHIFT 8 /* PHY address shift */
133#define TMRDIV_SHIFT 0 /* Timer divider shift */
127#define PHY_ST 0x8A /* PHY status register */ 134#define PHY_ST 0x8A /* PHY status register */
128#define MAC_SM 0xAC /* MAC status machine */ 135#define MAC_SM 0xAC /* MAC status machine */
129#define MAC_SM_RST 0x0002 /* MAC status machine reset */ 136#define MAC_SM_RST 0x0002 /* MAC status machine reset */
@@ -137,6 +144,8 @@
137#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ 144#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */
138#define MCAST_MAX 3 /* Max number multicast addresses to filter */ 145#define MCAST_MAX 3 /* Max number multicast addresses to filter */
139 146
147#define MAC_DEF_TIMEOUT 2048 /* Default MAC read/write operation timeout */
148
140/* Descriptor status */ 149/* Descriptor status */
141#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ 150#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */
142#define DSC_RX_OK 0x4000 /* RX was successful */ 151#define DSC_RX_OK 0x4000 /* RX was successful */
@@ -187,7 +196,7 @@ struct r6040_private {
187 dma_addr_t rx_ring_dma; 196 dma_addr_t rx_ring_dma;
188 dma_addr_t tx_ring_dma; 197 dma_addr_t tx_ring_dma;
189 u16 tx_free_desc; 198 u16 tx_free_desc;
190 u16 mcr0, mcr1; 199 u16 mcr0;
191 struct net_device *dev; 200 struct net_device *dev;
192 struct mii_bus *mii_bus; 201 struct mii_bus *mii_bus;
193 struct napi_struct napi; 202 struct napi_struct napi;
@@ -204,7 +213,7 @@ static char version[] __devinitdata = DRV_NAME
204/* Read a word data from PHY Chip */ 213/* Read a word data from PHY Chip */
205static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg) 214static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
206{ 215{
207 int limit = 2048; 216 int limit = MAC_DEF_TIMEOUT;
208 u16 cmd; 217 u16 cmd;
209 218
210 iowrite16(MDIO_READ + reg + (phy_addr << 8), ioaddr + MMDIO); 219 iowrite16(MDIO_READ + reg + (phy_addr << 8), ioaddr + MMDIO);
@@ -222,7 +231,7 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
222static void r6040_phy_write(void __iomem *ioaddr, 231static void r6040_phy_write(void __iomem *ioaddr,
223 int phy_addr, int reg, u16 val) 232 int phy_addr, int reg, u16 val)
224{ 233{
225 int limit = 2048; 234 int limit = MAC_DEF_TIMEOUT;
226 u16 cmd; 235 u16 cmd;
227 236
228 iowrite16(val, ioaddr + MMWD); 237 iowrite16(val, ioaddr + MMWD);
@@ -358,27 +367,35 @@ err_exit:
358 return rc; 367 return rc;
359} 368}
360 369
361static void r6040_init_mac_regs(struct net_device *dev) 370static void r6040_reset_mac(struct r6040_private *lp)
362{ 371{
363 struct r6040_private *lp = netdev_priv(dev);
364 void __iomem *ioaddr = lp->base; 372 void __iomem *ioaddr = lp->base;
365 int limit = 2048; 373 int limit = MAC_DEF_TIMEOUT;
366 u16 cmd; 374 u16 cmd;
367 375
368 /* Mask Off Interrupt */
369 iowrite16(MSK_INT, ioaddr + MIER);
370
371 /* Reset RDC MAC */
372 iowrite16(MAC_RST, ioaddr + MCR1); 376 iowrite16(MAC_RST, ioaddr + MCR1);
373 while (limit--) { 377 while (limit--) {
374 cmd = ioread16(ioaddr + MCR1); 378 cmd = ioread16(ioaddr + MCR1);
375 if (cmd & MAC_RST) 379 if (cmd & MAC_RST)
376 break; 380 break;
377 } 381 }
382
378 /* Reset internal state machine */ 383 /* Reset internal state machine */
379 iowrite16(MAC_SM_RST, ioaddr + MAC_SM); 384 iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
380 iowrite16(0, ioaddr + MAC_SM); 385 iowrite16(0, ioaddr + MAC_SM);
381 mdelay(5); 386 mdelay(5);
387}
388
389static void r6040_init_mac_regs(struct net_device *dev)
390{
391 struct r6040_private *lp = netdev_priv(dev);
392 void __iomem *ioaddr = lp->base;
393
394 /* Mask Off Interrupt */
395 iowrite16(MSK_INT, ioaddr + MIER);
396
397 /* Reset RDC MAC */
398 r6040_reset_mac(lp);
382 399
383 /* MAC Bus Control Register */ 400 /* MAC Bus Control Register */
384 iowrite16(MBCR_DEFAULT, ioaddr + MBCR); 401 iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
@@ -407,7 +424,7 @@ static void r6040_init_mac_regs(struct net_device *dev)
407 /* Let TX poll the descriptors 424 /* Let TX poll the descriptors
408 * we may got called by r6040_tx_timeout which has left 425 * we may got called by r6040_tx_timeout which has left
409 * some unsent tx buffers */ 426 * some unsent tx buffers */
410 iowrite16(0x01, ioaddr + MTPR); 427 iowrite16(TM2TX, ioaddr + MTPR);
411} 428}
412 429
413static void r6040_tx_timeout(struct net_device *dev) 430static void r6040_tx_timeout(struct net_device *dev)
@@ -445,18 +462,13 @@ static void r6040_down(struct net_device *dev)
445{ 462{
446 struct r6040_private *lp = netdev_priv(dev); 463 struct r6040_private *lp = netdev_priv(dev);
447 void __iomem *ioaddr = lp->base; 464 void __iomem *ioaddr = lp->base;
448 int limit = 2048;
449 u16 *adrp; 465 u16 *adrp;
450 u16 cmd;
451 466
452 /* Stop MAC */ 467 /* Stop MAC */
453 iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */ 468 iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */
454 iowrite16(MAC_RST, ioaddr + MCR1); /* Reset RDC MAC */ 469
455 while (limit--) { 470 /* Reset RDC MAC */
456 cmd = ioread16(ioaddr + MCR1); 471 r6040_reset_mac(lp);
457 if (cmd & MAC_RST)
458 break;
459 }
460 472
461 /* Restore MAC Address to MIDx */ 473 /* Restore MAC Address to MIDx */
462 adrp = (u16 *) dev->dev_addr; 474 adrp = (u16 *) dev->dev_addr;
@@ -599,9 +611,9 @@ static void r6040_tx(struct net_device *dev)
599 /* Check for errors */ 611 /* Check for errors */
600 err = ioread16(ioaddr + MLSR); 612 err = ioread16(ioaddr + MLSR);
601 613
602 if (err & 0x0200) 614 if (err & TX_FIFO_UNDR)
603 dev->stats.rx_fifo_errors++; 615 dev->stats.tx_fifo_errors++;
604 if (err & (0x2000 | 0x4000)) 616 if (err & (TX_EXCEEDC | TX_LATEC))
605 dev->stats.tx_carrier_errors++; 617 dev->stats.tx_carrier_errors++;
606 618
607 if (descptr->status & DSC_OWNER_MAC) 619 if (descptr->status & DSC_OWNER_MAC)
@@ -736,11 +748,7 @@ static void r6040_mac_address(struct net_device *dev)
736 u16 *adrp; 748 u16 *adrp;
737 749
738 /* Reset MAC */ 750 /* Reset MAC */
739 iowrite16(MAC_RST, ioaddr + MCR1); 751 r6040_reset_mac(lp);
740 /* Reset internal state machine */
741 iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
742 iowrite16(0, ioaddr + MAC_SM);
743 mdelay(5);
744 752
745 /* Restore MAC Address */ 753 /* Restore MAC Address */
746 adrp = (u16 *) dev->dev_addr; 754 adrp = (u16 *) dev->dev_addr;
@@ -840,7 +848,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
840 skb_tx_timestamp(skb); 848 skb_tx_timestamp(skb);
841 849
842 /* Trigger the MAC to check the TX descriptor */ 850 /* Trigger the MAC to check the TX descriptor */
843 iowrite16(0x01, ioaddr + MTPR); 851 iowrite16(TM2TX, ioaddr + MTPR);
844 lp->tx_insert_ptr = descptr->vndescp; 852 lp->tx_insert_ptr = descptr->vndescp;
845 853
846 /* If no tx resource, stop */ 854 /* If no tx resource, stop */
@@ -973,6 +981,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
973 .get_settings = netdev_get_settings, 981 .get_settings = netdev_get_settings,
974 .set_settings = netdev_set_settings, 982 .set_settings = netdev_set_settings,
975 .get_link = ethtool_op_get_link, 983 .get_link = ethtool_op_get_link,
984 .get_ts_info = ethtool_op_get_ts_info,
976}; 985};
977 986
978static const struct net_device_ops r6040_netdev_ops = { 987static const struct net_device_ops r6040_netdev_ops = {
@@ -1126,10 +1135,15 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1126 err = -EIO; 1135 err = -EIO;
1127 goto err_out_free_res; 1136 goto err_out_free_res;
1128 } 1137 }
1138
1129 /* If PHY status change register is still set to zero it means the 1139 /* If PHY status change register is still set to zero it means the
1130 * bootloader didn't initialize it */ 1140 * bootloader didn't initialize it, so we set it to:
1141 * - enable phy status change
1142 * - enable all phy addresses
1143 * - set to lowest timer divider */
1131 if (ioread16(ioaddr + PHY_CC) == 0) 1144 if (ioread16(ioaddr + PHY_CC) == 0)
1132 iowrite16(0x9f07, ioaddr + PHY_CC); 1145 iowrite16(SCEN | PHY_MAX_ADDR << PHYAD_SHIFT |
1146 7 << TMRDIV_SHIFT, ioaddr + PHY_CC);
1133 1147
1134 /* Init system & device */ 1148 /* Init system & device */
1135 lp->base = ioaddr; 1149 lp->base = ioaddr;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index abc79076f867..5eef290997f9 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -635,9 +635,12 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
635 */ 635 */
636static void cp_poll_controller(struct net_device *dev) 636static void cp_poll_controller(struct net_device *dev)
637{ 637{
638 disable_irq(dev->irq); 638 struct cp_private *cp = netdev_priv(dev);
639 cp_interrupt(dev->irq, dev); 639 const int irq = cp->pdev->irq;
640 enable_irq(dev->irq); 640
641 disable_irq(irq);
642 cp_interrupt(irq, dev);
643 enable_irq(irq);
641} 644}
642#endif 645#endif
643 646
@@ -958,6 +961,11 @@ static inline void cp_start_hw (struct cp_private *cp)
958 cpw8(Cmd, RxOn | TxOn); 961 cpw8(Cmd, RxOn | TxOn);
959} 962}
960 963
964static void cp_enable_irq(struct cp_private *cp)
965{
966 cpw16_f(IntrMask, cp_intr_mask);
967}
968
961static void cp_init_hw (struct cp_private *cp) 969static void cp_init_hw (struct cp_private *cp)
962{ 970{
963 struct net_device *dev = cp->dev; 971 struct net_device *dev = cp->dev;
@@ -997,8 +1005,6 @@ static void cp_init_hw (struct cp_private *cp)
997 1005
998 cpw16(MultiIntr, 0); 1006 cpw16(MultiIntr, 0);
999 1007
1000 cpw16_f(IntrMask, cp_intr_mask);
1001
1002 cpw8_f(Cfg9346, Cfg9346_Lock); 1008 cpw8_f(Cfg9346, Cfg9346_Lock);
1003} 1009}
1004 1010
@@ -1114,6 +1120,7 @@ static void cp_free_rings (struct cp_private *cp)
1114static int cp_open (struct net_device *dev) 1120static int cp_open (struct net_device *dev)
1115{ 1121{
1116 struct cp_private *cp = netdev_priv(dev); 1122 struct cp_private *cp = netdev_priv(dev);
1123 const int irq = cp->pdev->irq;
1117 int rc; 1124 int rc;
1118 1125
1119 netif_dbg(cp, ifup, dev, "enabling interface\n"); 1126 netif_dbg(cp, ifup, dev, "enabling interface\n");
@@ -1126,10 +1133,12 @@ static int cp_open (struct net_device *dev)
1126 1133
1127 cp_init_hw(cp); 1134 cp_init_hw(cp);
1128 1135
1129 rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev); 1136 rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1130 if (rc) 1137 if (rc)
1131 goto err_out_hw; 1138 goto err_out_hw;
1132 1139
1140 cp_enable_irq(cp);
1141
1133 netif_carrier_off(dev); 1142 netif_carrier_off(dev);
1134 mii_check_media(&cp->mii_if, netif_msg_link(cp), true); 1143 mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1135 netif_start_queue(dev); 1144 netif_start_queue(dev);
@@ -1161,7 +1170,7 @@ static int cp_close (struct net_device *dev)
1161 1170
1162 spin_unlock_irqrestore(&cp->lock, flags); 1171 spin_unlock_irqrestore(&cp->lock, flags);
1163 1172
1164 free_irq(dev->irq, dev); 1173 free_irq(cp->pdev->irq, dev);
1165 1174
1166 cp_free_rings(cp); 1175 cp_free_rings(cp);
1167 return 0; 1176 return 0;
@@ -1909,7 +1918,6 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1909 (unsigned long long)pciaddr); 1918 (unsigned long long)pciaddr);
1910 goto err_out_res; 1919 goto err_out_res;
1911 } 1920 }
1912 dev->base_addr = (unsigned long) regs;
1913 cp->regs = regs; 1921 cp->regs = regs;
1914 1922
1915 cp_stop_hw(cp); 1923 cp_stop_hw(cp);
@@ -1937,14 +1945,12 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1937 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 1945 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1938 NETIF_F_HIGHDMA; 1946 NETIF_F_HIGHDMA;
1939 1947
1940 dev->irq = pdev->irq;
1941
1942 rc = register_netdev(dev); 1948 rc = register_netdev(dev);
1943 if (rc) 1949 if (rc)
1944 goto err_out_iomap; 1950 goto err_out_iomap;
1945 1951
1946 netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n", 1952 netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
1947 dev->base_addr, dev->dev_addr, dev->irq); 1953 regs, dev->dev_addr, pdev->irq);
1948 1954
1949 pci_set_drvdata(pdev, dev); 1955 pci_set_drvdata(pdev, dev);
1950 1956
@@ -2031,6 +2037,7 @@ static int cp_resume (struct pci_dev *pdev)
2031 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ 2037 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2032 cp_init_rings_index (cp); 2038 cp_init_rings_index (cp);
2033 cp_init_hw (cp); 2039 cp_init_hw (cp);
2040 cp_enable_irq(cp);
2034 netif_start_queue (dev); 2041 netif_start_queue (dev);
2035 2042
2036 spin_lock_irqsave (&cp->lock, flags); 2043 spin_lock_irqsave (&cp->lock, flags);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index df7fd8d083dc..03df076ed596 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -148,9 +148,9 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
148 148
149/* Whether to use MMIO or PIO. Default to MMIO. */ 149/* Whether to use MMIO or PIO. Default to MMIO. */
150#ifdef CONFIG_8139TOO_PIO 150#ifdef CONFIG_8139TOO_PIO
151static int use_io = 1; 151static bool use_io = true;
152#else 152#else
153static int use_io = 0; 153static bool use_io = false;
154#endif 154#endif
155 155
156/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 156/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
@@ -620,7 +620,7 @@ MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
620MODULE_LICENSE("GPL"); 620MODULE_LICENSE("GPL");
621MODULE_VERSION(DRV_VERSION); 621MODULE_VERSION(DRV_VERSION);
622 622
623module_param(use_io, int, 0); 623module_param(use_io, bool, 0);
624MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO"); 624MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO");
625module_param(multicast_filter_limit, int, 0); 625module_param(multicast_filter_limit, int, 0);
626module_param_array(media, int, NULL, 0); 626module_param_array(media, int, NULL, 0);
@@ -750,15 +750,22 @@ static void rtl8139_chip_reset (void __iomem *ioaddr)
750 750
751static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev) 751static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev)
752{ 752{
753 struct device *d = &pdev->dev;
753 void __iomem *ioaddr; 754 void __iomem *ioaddr;
754 struct net_device *dev; 755 struct net_device *dev;
755 struct rtl8139_private *tp; 756 struct rtl8139_private *tp;
756 u8 tmp8; 757 u8 tmp8;
757 int rc, disable_dev_on_err = 0; 758 int rc, disable_dev_on_err = 0;
758 unsigned int i; 759 unsigned int i, bar;
759 unsigned long pio_start, pio_end, pio_flags, pio_len; 760 unsigned long io_len;
760 unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
761 u32 version; 761 u32 version;
762 static const struct {
763 unsigned long mask;
764 char *type;
765 } res[] = {
766 { IORESOURCE_IO, "PIO" },
767 { IORESOURCE_MEM, "MMIO" }
768 };
762 769
763 assert (pdev != NULL); 770 assert (pdev != NULL);
764 771
@@ -777,78 +784,45 @@ static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev)
777 if (rc) 784 if (rc)
778 goto err_out; 785 goto err_out;
779 786
780 pio_start = pci_resource_start (pdev, 0);
781 pio_end = pci_resource_end (pdev, 0);
782 pio_flags = pci_resource_flags (pdev, 0);
783 pio_len = pci_resource_len (pdev, 0);
784
785 mmio_start = pci_resource_start (pdev, 1);
786 mmio_end = pci_resource_end (pdev, 1);
787 mmio_flags = pci_resource_flags (pdev, 1);
788 mmio_len = pci_resource_len (pdev, 1);
789
790 /* set this immediately, we need to know before
791 * we talk to the chip directly */
792 pr_debug("PIO region size == 0x%02lX\n", pio_len);
793 pr_debug("MMIO region size == 0x%02lX\n", mmio_len);
794
795retry:
796 if (use_io) {
797 /* make sure PCI base addr 0 is PIO */
798 if (!(pio_flags & IORESOURCE_IO)) {
799 dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
800 rc = -ENODEV;
801 goto err_out;
802 }
803 /* check for weird/broken PCI region reporting */
804 if (pio_len < RTL_MIN_IO_SIZE) {
805 dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n");
806 rc = -ENODEV;
807 goto err_out;
808 }
809 } else {
810 /* make sure PCI base addr 1 is MMIO */
811 if (!(mmio_flags & IORESOURCE_MEM)) {
812 dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
813 rc = -ENODEV;
814 goto err_out;
815 }
816 if (mmio_len < RTL_MIN_IO_SIZE) {
817 dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n");
818 rc = -ENODEV;
819 goto err_out;
820 }
821 }
822
823 rc = pci_request_regions (pdev, DRV_NAME); 787 rc = pci_request_regions (pdev, DRV_NAME);
824 if (rc) 788 if (rc)
825 goto err_out; 789 goto err_out;
826 disable_dev_on_err = 1; 790 disable_dev_on_err = 1;
827 791
828 /* enable PCI bus-mastering */
829 pci_set_master (pdev); 792 pci_set_master (pdev);
830 793
831 if (use_io) { 794retry:
832 ioaddr = pci_iomap(pdev, 0, 0); 795 /* PIO bar register comes first. */
833 if (!ioaddr) { 796 bar = !use_io;
834 dev_err(&pdev->dev, "cannot map PIO, aborting\n"); 797
835 rc = -EIO; 798 io_len = pci_resource_len(pdev, bar);
836 goto err_out; 799
837 } 800 dev_dbg(d, "%s region size = 0x%02lX\n", res[bar].type, io_len);
838 dev->base_addr = pio_start; 801
839 tp->regs_len = pio_len; 802 if (!(pci_resource_flags(pdev, bar) & res[bar].mask)) {
840 } else { 803 dev_err(d, "region #%d not a %s resource, aborting\n", bar,
841 /* ioremap MMIO region */ 804 res[bar].type);
842 ioaddr = pci_iomap(pdev, 1, 0); 805 rc = -ENODEV;
843 if (ioaddr == NULL) { 806 goto err_out;
844 dev_err(&pdev->dev, "cannot remap MMIO, trying PIO\n"); 807 }
845 pci_release_regions(pdev); 808 if (io_len < RTL_MIN_IO_SIZE) {
846 use_io = 1; 809 dev_err(d, "Invalid PCI %s region size(s), aborting\n",
810 res[bar].type);
811 rc = -ENODEV;
812 goto err_out;
813 }
814
815 ioaddr = pci_iomap(pdev, bar, 0);
816 if (!ioaddr) {
817 dev_err(d, "cannot map %s\n", res[bar].type);
818 if (!use_io) {
819 use_io = true;
847 goto retry; 820 goto retry;
848 } 821 }
849 dev->base_addr = (long) ioaddr; 822 rc = -ENODEV;
850 tp->regs_len = mmio_len; 823 goto err_out;
851 } 824 }
825 tp->regs_len = io_len;
852 tp->mmio_addr = ioaddr; 826 tp->mmio_addr = ioaddr;
853 827
854 /* Bring old chips out of low-power mode. */ 828 /* Bring old chips out of low-power mode. */
@@ -1035,8 +1009,6 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
1035 dev->hw_features |= NETIF_F_RXALL; 1009 dev->hw_features |= NETIF_F_RXALL;
1036 dev->hw_features |= NETIF_F_RXFCS; 1010 dev->hw_features |= NETIF_F_RXFCS;
1037 1011
1038 dev->irq = pdev->irq;
1039
1040 /* tp zeroed and aligned in alloc_etherdev */ 1012 /* tp zeroed and aligned in alloc_etherdev */
1041 tp = netdev_priv(dev); 1013 tp = netdev_priv(dev);
1042 1014
@@ -1062,9 +1034,9 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
1062 1034
1063 pci_set_drvdata (pdev, dev); 1035 pci_set_drvdata (pdev, dev);
1064 1036
1065 netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n", 1037 netdev_info(dev, "%s at 0x%p, %pM, IRQ %d\n",
1066 board_info[ent->driver_data].name, 1038 board_info[ent->driver_data].name,
1067 dev->base_addr, dev->dev_addr, dev->irq); 1039 ioaddr, dev->dev_addr, pdev->irq);
1068 1040
1069 netdev_dbg(dev, "Identified 8139 chip type '%s'\n", 1041 netdev_dbg(dev, "Identified 8139 chip type '%s'\n",
1070 rtl_chip_info[tp->chipset].name); 1042 rtl_chip_info[tp->chipset].name);
@@ -1339,10 +1311,11 @@ static void mdio_write (struct net_device *dev, int phy_id, int location,
1339static int rtl8139_open (struct net_device *dev) 1311static int rtl8139_open (struct net_device *dev)
1340{ 1312{
1341 struct rtl8139_private *tp = netdev_priv(dev); 1313 struct rtl8139_private *tp = netdev_priv(dev);
1342 int retval;
1343 void __iomem *ioaddr = tp->mmio_addr; 1314 void __iomem *ioaddr = tp->mmio_addr;
1315 const int irq = tp->pci_dev->irq;
1316 int retval;
1344 1317
1345 retval = request_irq (dev->irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev); 1318 retval = request_irq(irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev);
1346 if (retval) 1319 if (retval)
1347 return retval; 1320 return retval;
1348 1321
@@ -1351,7 +1324,7 @@ static int rtl8139_open (struct net_device *dev)
1351 tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, 1324 tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
1352 &tp->rx_ring_dma, GFP_KERNEL); 1325 &tp->rx_ring_dma, GFP_KERNEL);
1353 if (tp->tx_bufs == NULL || tp->rx_ring == NULL) { 1326 if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
1354 free_irq(dev->irq, dev); 1327 free_irq(irq, dev);
1355 1328
1356 if (tp->tx_bufs) 1329 if (tp->tx_bufs)
1357 dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, 1330 dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
@@ -1377,7 +1350,7 @@ static int rtl8139_open (struct net_device *dev)
1377 "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n", 1350 "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
1378 __func__, 1351 __func__,
1379 (unsigned long long)pci_resource_start (tp->pci_dev, 1), 1352 (unsigned long long)pci_resource_start (tp->pci_dev, 1),
1380 dev->irq, RTL_R8 (MediaStatus), 1353 irq, RTL_R8 (MediaStatus),
1381 tp->mii.full_duplex ? "full" : "half"); 1354 tp->mii.full_duplex ? "full" : "half");
1382 1355
1383 rtl8139_start_thread(tp); 1356 rtl8139_start_thread(tp);
@@ -2240,9 +2213,12 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
2240 */ 2213 */
2241static void rtl8139_poll_controller(struct net_device *dev) 2214static void rtl8139_poll_controller(struct net_device *dev)
2242{ 2215{
2243 disable_irq(dev->irq); 2216 struct rtl8139_private *tp = netdev_priv(dev);
2244 rtl8139_interrupt(dev->irq, dev); 2217 const int irq = tp->pci_dev->irq;
2245 enable_irq(dev->irq); 2218
2219 disable_irq(irq);
2220 rtl8139_interrupt(irq, dev);
2221 enable_irq(irq);
2246} 2222}
2247#endif 2223#endif
2248 2224
@@ -2295,7 +2271,7 @@ static int rtl8139_close (struct net_device *dev)
2295 2271
2296 spin_unlock_irqrestore (&tp->lock, flags); 2272 spin_unlock_irqrestore (&tp->lock, flags);
2297 2273
2298 free_irq (dev->irq, dev); 2274 free_irq(tp->pci_dev->irq, dev);
2299 2275
2300 rtl8139_tx_clear (tp); 2276 rtl8139_tx_clear (tp);
2301 2277
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f54509377efa..00628d84342f 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -44,6 +44,8 @@
44#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" 44#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" 45#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" 46#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
47 49
48#ifdef RTL8169_DEBUG 50#ifdef RTL8169_DEBUG
49#define assert(expr) \ 51#define assert(expr) \
@@ -133,6 +135,8 @@ enum mac_version {
133 RTL_GIGA_MAC_VER_34, 135 RTL_GIGA_MAC_VER_34,
134 RTL_GIGA_MAC_VER_35, 136 RTL_GIGA_MAC_VER_35,
135 RTL_GIGA_MAC_VER_36, 137 RTL_GIGA_MAC_VER_36,
138 RTL_GIGA_MAC_VER_37,
139 RTL_GIGA_MAC_VER_38,
136 RTL_GIGA_MAC_NONE = 0xff, 140 RTL_GIGA_MAC_NONE = 0xff,
137}; 141};
138 142
@@ -245,6 +249,12 @@ static const struct {
245 [RTL_GIGA_MAC_VER_36] = 249 [RTL_GIGA_MAC_VER_36] =
246 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, 250 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
247 JUMBO_9K, false), 251 JUMBO_9K, false),
252 [RTL_GIGA_MAC_VER_37] =
253 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
254 JUMBO_1K, true),
255 [RTL_GIGA_MAC_VER_38] =
256 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
257 JUMBO_9K, false),
248}; 258};
249#undef _R 259#undef _R
250 260
@@ -315,6 +325,8 @@ enum rtl_registers {
315 Config0 = 0x51, 325 Config0 = 0x51,
316 Config1 = 0x52, 326 Config1 = 0x52,
317 Config2 = 0x53, 327 Config2 = 0x53,
328#define PME_SIGNAL (1 << 5) /* 8168c and later */
329
318 Config3 = 0x54, 330 Config3 = 0x54,
319 Config4 = 0x55, 331 Config4 = 0x55,
320 Config5 = 0x56, 332 Config5 = 0x56,
@@ -355,6 +367,9 @@ enum rtl8168_8101_registers {
355#define CSIAR_BYTE_ENABLE 0x0f 367#define CSIAR_BYTE_ENABLE 0x0f
356#define CSIAR_BYTE_ENABLE_SHIFT 12 368#define CSIAR_BYTE_ENABLE_SHIFT 12
357#define CSIAR_ADDR_MASK 0x0fff 369#define CSIAR_ADDR_MASK 0x0fff
370#define CSIAR_FUNC_CARD 0x00000000
371#define CSIAR_FUNC_SDIO 0x00010000
372#define CSIAR_FUNC_NIC 0x00020000
358 PMCH = 0x6f, 373 PMCH = 0x6f,
359 EPHYAR = 0x80, 374 EPHYAR = 0x80,
360#define EPHYAR_FLAG 0x80000000 375#define EPHYAR_FLAG 0x80000000
@@ -716,6 +731,11 @@ struct rtl8169_private {
716 void (*disable)(struct rtl8169_private *); 731 void (*disable)(struct rtl8169_private *);
717 } jumbo_ops; 732 } jumbo_ops;
718 733
734 struct csi_ops {
735 void (*write)(void __iomem *, int, int);
736 u32 (*read)(void __iomem *, int);
737 } csi_ops;
738
719 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); 739 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
720 int (*get_settings)(struct net_device *, struct ethtool_cmd *); 740 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
721 void (*phy_reset_enable)(struct rtl8169_private *tp); 741 void (*phy_reset_enable)(struct rtl8169_private *tp);
@@ -768,6 +788,8 @@ MODULE_FIRMWARE(FIRMWARE_8168E_3);
768MODULE_FIRMWARE(FIRMWARE_8105E_1); 788MODULE_FIRMWARE(FIRMWARE_8105E_1);
769MODULE_FIRMWARE(FIRMWARE_8168F_1); 789MODULE_FIRMWARE(FIRMWARE_8168F_1);
770MODULE_FIRMWARE(FIRMWARE_8168F_2); 790MODULE_FIRMWARE(FIRMWARE_8168F_2);
791MODULE_FIRMWARE(FIRMWARE_8402_1);
792MODULE_FIRMWARE(FIRMWARE_8411_1);
771 793
772static void rtl_lock_work(struct rtl8169_private *tp) 794static void rtl_lock_work(struct rtl8169_private *tp)
773{ 795{
@@ -1078,40 +1100,6 @@ static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
1078 return value; 1100 return value;
1079} 1101}
1080 1102
1081static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
1082{
1083 unsigned int i;
1084
1085 RTL_W32(CSIDR, value);
1086 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
1087 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
1088
1089 for (i = 0; i < 100; i++) {
1090 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
1091 break;
1092 udelay(10);
1093 }
1094}
1095
1096static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
1097{
1098 u32 value = ~0x00;
1099 unsigned int i;
1100
1101 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
1102 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
1103
1104 for (i = 0; i < 100; i++) {
1105 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
1106 value = RTL_R32(CSIDR);
1107 break;
1108 }
1109 udelay(10);
1110 }
1111
1112 return value;
1113}
1114
1115static 1103static
1116void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type) 1104void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
1117{ 1105{
@@ -1281,7 +1269,8 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
1281 if (!netif_running(dev)) 1269 if (!netif_running(dev))
1282 return; 1270 return;
1283 1271
1284 if (tp->mac_version == RTL_GIGA_MAC_VER_34) { 1272 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1273 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1285 if (RTL_R8(PHYstatus) & _1000bpsF) { 1274 if (RTL_R8(PHYstatus) & _1000bpsF) {
1286 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, 1275 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1287 0x00000011, ERIAR_EXGMAC); 1276 0x00000011, ERIAR_EXGMAC);
@@ -1316,6 +1305,16 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
1316 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, 1305 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1317 0x0000003f, ERIAR_EXGMAC); 1306 0x0000003f, ERIAR_EXGMAC);
1318 } 1307 }
1308 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1309 if (RTL_R8(PHYstatus) & _10bps) {
1310 rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
1311 0x4d02, ERIAR_EXGMAC);
1312 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_0011,
1313 0x0060, ERIAR_EXGMAC);
1314 } else {
1315 rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
1316 0x0000, ERIAR_EXGMAC);
1317 }
1319 } 1318 }
1320} 1319}
1321 1320
@@ -1396,7 +1395,6 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1396 u16 reg; 1395 u16 reg;
1397 u8 mask; 1396 u8 mask;
1398 } cfg[] = { 1397 } cfg[] = {
1399 { WAKE_ANY, Config1, PMEnable },
1400 { WAKE_PHY, Config3, LinkUp }, 1398 { WAKE_PHY, Config3, LinkUp },
1401 { WAKE_MAGIC, Config3, MagicPacket }, 1399 { WAKE_MAGIC, Config3, MagicPacket },
1402 { WAKE_UCAST, Config5, UWF }, 1400 { WAKE_UCAST, Config5, UWF },
@@ -1404,16 +1402,32 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1404 { WAKE_MCAST, Config5, MWF }, 1402 { WAKE_MCAST, Config5, MWF },
1405 { WAKE_ANY, Config5, LanWake } 1403 { WAKE_ANY, Config5, LanWake }
1406 }; 1404 };
1405 u8 options;
1407 1406
1408 RTL_W8(Cfg9346, Cfg9346_Unlock); 1407 RTL_W8(Cfg9346, Cfg9346_Unlock);
1409 1408
1410 for (i = 0; i < ARRAY_SIZE(cfg); i++) { 1409 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1411 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; 1410 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1412 if (wolopts & cfg[i].opt) 1411 if (wolopts & cfg[i].opt)
1413 options |= cfg[i].mask; 1412 options |= cfg[i].mask;
1414 RTL_W8(cfg[i].reg, options); 1413 RTL_W8(cfg[i].reg, options);
1415 } 1414 }
1416 1415
1416 switch (tp->mac_version) {
1417 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1418 options = RTL_R8(Config1) & ~PMEnable;
1419 if (wolopts)
1420 options |= PMEnable;
1421 RTL_W8(Config1, options);
1422 break;
1423 default:
1424 options = RTL_R8(Config2) & ~PME_SIGNAL;
1425 if (wolopts)
1426 options |= PME_SIGNAL;
1427 RTL_W8(Config2, options);
1428 break;
1429 }
1430
1417 RTL_W8(Cfg9346, Cfg9346_Lock); 1431 RTL_W8(Cfg9346, Cfg9346_Lock);
1418} 1432}
1419 1433
@@ -1853,6 +1867,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
1853 .get_strings = rtl8169_get_strings, 1867 .get_strings = rtl8169_get_strings,
1854 .get_sset_count = rtl8169_get_sset_count, 1868 .get_sset_count = rtl8169_get_sset_count,
1855 .get_ethtool_stats = rtl8169_get_ethtool_stats, 1869 .get_ethtool_stats = rtl8169_get_ethtool_stats,
1870 .get_ts_info = ethtool_op_get_ts_info,
1856}; 1871};
1857 1872
1858static void rtl8169_get_mac_version(struct rtl8169_private *tp, 1873static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -1876,6 +1891,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1876 int mac_version; 1891 int mac_version;
1877 } mac_info[] = { 1892 } mac_info[] = {
1878 /* 8168F family. */ 1893 /* 8168F family. */
1894 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
1879 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 }, 1895 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
1880 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 }, 1896 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
1881 1897
@@ -1913,6 +1929,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1913 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, 1929 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1914 1930
1915 /* 8101 family. */ 1931 /* 8101 family. */
1932 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
1916 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 }, 1933 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
1917 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, 1934 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
1918 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, 1935 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
@@ -3013,6 +3030,28 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3013 rtl_writephy(tp, 0x1f, 0x0000); 3030 rtl_writephy(tp, 0x1f, 0x0000);
3014} 3031}
3015 3032
3033static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3034{
3035 /* For 4-corner performance improve */
3036 rtl_writephy(tp, 0x1f, 0x0005);
3037 rtl_writephy(tp, 0x05, 0x8b80);
3038 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3039 rtl_writephy(tp, 0x1f, 0x0000);
3040
3041 /* PHY auto speed down */
3042 rtl_writephy(tp, 0x1f, 0x0007);
3043 rtl_writephy(tp, 0x1e, 0x002d);
3044 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3045 rtl_writephy(tp, 0x1f, 0x0000);
3046 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3047
3048 /* Improve 10M EEE waveform */
3049 rtl_writephy(tp, 0x1f, 0x0005);
3050 rtl_writephy(tp, 0x05, 0x8b86);
3051 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3052 rtl_writephy(tp, 0x1f, 0x0000);
3053}
3054
3016static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) 3055static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3017{ 3056{
3018 static const struct phy_reg phy_reg_init[] = { 3057 static const struct phy_reg phy_reg_init[] = {
@@ -3054,24 +3093,7 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3054 3093
3055 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 3094 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3056 3095
3057 /* For 4-corner performance improve */ 3096 rtl8168f_hw_phy_config(tp);
3058 rtl_writephy(tp, 0x1f, 0x0005);
3059 rtl_writephy(tp, 0x05, 0x8b80);
3060 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3061 rtl_writephy(tp, 0x1f, 0x0000);
3062
3063 /* PHY auto speed down */
3064 rtl_writephy(tp, 0x1f, 0x0007);
3065 rtl_writephy(tp, 0x1e, 0x002d);
3066 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3067 rtl_writephy(tp, 0x1f, 0x0000);
3068 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3069
3070 /* Improve 10M EEE waveform */
3071 rtl_writephy(tp, 0x1f, 0x0005);
3072 rtl_writephy(tp, 0x05, 0x8b86);
3073 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3074 rtl_writephy(tp, 0x1f, 0x0000);
3075 3097
3076 /* Improve 2-pair detection performance */ 3098 /* Improve 2-pair detection performance */
3077 rtl_writephy(tp, 0x1f, 0x0005); 3099 rtl_writephy(tp, 0x1f, 0x0005);
@@ -3084,23 +3106,104 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3084{ 3106{
3085 rtl_apply_firmware(tp); 3107 rtl_apply_firmware(tp);
3086 3108
3087 /* For 4-corner performance improve */ 3109 rtl8168f_hw_phy_config(tp);
3110}
3111
3112static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3113{
3114 void __iomem *ioaddr = tp->mmio_addr;
3115 static const struct phy_reg phy_reg_init[] = {
3116 /* Channel estimation fine tune */
3117 { 0x1f, 0x0003 },
3118 { 0x09, 0xa20f },
3119 { 0x1f, 0x0000 },
3120
3121 /* Modify green table for giga & fnet */
3122 { 0x1f, 0x0005 },
3123 { 0x05, 0x8b55 },
3124 { 0x06, 0x0000 },
3125 { 0x05, 0x8b5e },
3126 { 0x06, 0x0000 },
3127 { 0x05, 0x8b67 },
3128 { 0x06, 0x0000 },
3129 { 0x05, 0x8b70 },
3130 { 0x06, 0x0000 },
3131 { 0x1f, 0x0000 },
3132 { 0x1f, 0x0007 },
3133 { 0x1e, 0x0078 },
3134 { 0x17, 0x0000 },
3135 { 0x19, 0x00aa },
3136 { 0x1f, 0x0000 },
3137
3138 /* Modify green table for 10M */
3139 { 0x1f, 0x0005 },
3140 { 0x05, 0x8b79 },
3141 { 0x06, 0xaa00 },
3142 { 0x1f, 0x0000 },
3143
3144 /* Disable hiimpedance detection (RTCT) */
3145 { 0x1f, 0x0003 },
3146 { 0x01, 0x328a },
3147 { 0x1f, 0x0000 }
3148 };
3149
3150
3151 rtl_apply_firmware(tp);
3152
3153 rtl8168f_hw_phy_config(tp);
3154
3155 /* Improve 2-pair detection performance */
3088 rtl_writephy(tp, 0x1f, 0x0005); 3156 rtl_writephy(tp, 0x1f, 0x0005);
3089 rtl_writephy(tp, 0x05, 0x8b80); 3157 rtl_writephy(tp, 0x05, 0x8b85);
3090 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); 3158 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3091 rtl_writephy(tp, 0x1f, 0x0000); 3159 rtl_writephy(tp, 0x1f, 0x0000);
3092 3160
3093 /* PHY auto speed down */ 3161 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3094 rtl_writephy(tp, 0x1f, 0x0007); 3162
3095 rtl_writephy(tp, 0x1e, 0x002d); 3163 /* Modify green table for giga */
3096 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); 3164 rtl_writephy(tp, 0x1f, 0x0005);
3165 rtl_writephy(tp, 0x05, 0x8b54);
3166 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3167 rtl_writephy(tp, 0x05, 0x8b5d);
3168 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3169 rtl_writephy(tp, 0x05, 0x8a7c);
3170 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3171 rtl_writephy(tp, 0x05, 0x8a7f);
3172 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3173 rtl_writephy(tp, 0x05, 0x8a82);
3174 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3175 rtl_writephy(tp, 0x05, 0x8a85);
3176 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3177 rtl_writephy(tp, 0x05, 0x8a88);
3178 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3097 rtl_writephy(tp, 0x1f, 0x0000); 3179 rtl_writephy(tp, 0x1f, 0x0000);
3098 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3099 3180
3100 /* Improve 10M EEE waveform */ 3181 /* uc same-seed solution */
3101 rtl_writephy(tp, 0x1f, 0x0005); 3182 rtl_writephy(tp, 0x1f, 0x0005);
3102 rtl_writephy(tp, 0x05, 0x8b86); 3183 rtl_writephy(tp, 0x05, 0x8b85);
3103 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); 3184 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3185 rtl_writephy(tp, 0x1f, 0x0000);
3186
3187 /* eee setting */
3188 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3189 rtl_writephy(tp, 0x1f, 0x0005);
3190 rtl_writephy(tp, 0x05, 0x8b85);
3191 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3192 rtl_writephy(tp, 0x1f, 0x0004);
3193 rtl_writephy(tp, 0x1f, 0x0007);
3194 rtl_writephy(tp, 0x1e, 0x0020);
3195 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3196 rtl_writephy(tp, 0x1f, 0x0000);
3197 rtl_writephy(tp, 0x0d, 0x0007);
3198 rtl_writephy(tp, 0x0e, 0x003c);
3199 rtl_writephy(tp, 0x0d, 0x4007);
3200 rtl_writephy(tp, 0x0e, 0x0000);
3201 rtl_writephy(tp, 0x0d, 0x0000);
3202
3203 /* Green feature */
3204 rtl_writephy(tp, 0x1f, 0x0003);
3205 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3206 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3104 rtl_writephy(tp, 0x1f, 0x0000); 3207 rtl_writephy(tp, 0x1f, 0x0000);
3105} 3208}
3106 3209
@@ -3147,6 +3250,25 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3147 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 3250 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3148} 3251}
3149 3252
3253static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3254{
3255 void __iomem *ioaddr = tp->mmio_addr;
3256
3257 /* Disable ALDPS before setting firmware */
3258 rtl_writephy(tp, 0x1f, 0x0000);
3259 rtl_writephy(tp, 0x18, 0x0310);
3260 msleep(20);
3261
3262 rtl_apply_firmware(tp);
3263
3264 /* EEE setting */
3265 rtl_eri_write(ioaddr, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3266 rtl_writephy(tp, 0x1f, 0x0004);
3267 rtl_writephy(tp, 0x10, 0x401f);
3268 rtl_writephy(tp, 0x19, 0x7030);
3269 rtl_writephy(tp, 0x1f, 0x0000);
3270}
3271
3150static void rtl_hw_phy_config(struct net_device *dev) 3272static void rtl_hw_phy_config(struct net_device *dev)
3151{ 3273{
3152 struct rtl8169_private *tp = netdev_priv(dev); 3274 struct rtl8169_private *tp = netdev_priv(dev);
@@ -3235,6 +3357,14 @@ static void rtl_hw_phy_config(struct net_device *dev)
3235 rtl8168f_2_hw_phy_config(tp); 3357 rtl8168f_2_hw_phy_config(tp);
3236 break; 3358 break;
3237 3359
3360 case RTL_GIGA_MAC_VER_37:
3361 rtl8402_hw_phy_config(tp);
3362 break;
3363
3364 case RTL_GIGA_MAC_VER_38:
3365 rtl8411_hw_phy_config(tp);
3366 break;
3367
3238 default: 3368 default:
3239 break; 3369 break;
3240 } 3370 }
@@ -3472,6 +3602,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3472 case RTL_GIGA_MAC_VER_32: 3602 case RTL_GIGA_MAC_VER_32:
3473 case RTL_GIGA_MAC_VER_33: 3603 case RTL_GIGA_MAC_VER_33:
3474 case RTL_GIGA_MAC_VER_34: 3604 case RTL_GIGA_MAC_VER_34:
3605 case RTL_GIGA_MAC_VER_37:
3606 case RTL_GIGA_MAC_VER_38:
3475 RTL_W32(RxConfig, RTL_R32(RxConfig) | 3607 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3476 AcceptBroadcast | AcceptMulticast | AcceptMyPhys); 3608 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3477 break; 3609 break;
@@ -3507,15 +3639,45 @@ static void r810x_phy_power_up(struct rtl8169_private *tp)
3507 3639
3508static void r810x_pll_power_down(struct rtl8169_private *tp) 3640static void r810x_pll_power_down(struct rtl8169_private *tp)
3509{ 3641{
3642 void __iomem *ioaddr = tp->mmio_addr;
3643
3510 if (rtl_wol_pll_power_down(tp)) 3644 if (rtl_wol_pll_power_down(tp))
3511 return; 3645 return;
3512 3646
3513 r810x_phy_power_down(tp); 3647 r810x_phy_power_down(tp);
3648
3649 switch (tp->mac_version) {
3650 case RTL_GIGA_MAC_VER_07:
3651 case RTL_GIGA_MAC_VER_08:
3652 case RTL_GIGA_MAC_VER_09:
3653 case RTL_GIGA_MAC_VER_10:
3654 case RTL_GIGA_MAC_VER_13:
3655 case RTL_GIGA_MAC_VER_16:
3656 break;
3657 default:
3658 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3659 break;
3660 }
3514} 3661}
3515 3662
3516static void r810x_pll_power_up(struct rtl8169_private *tp) 3663static void r810x_pll_power_up(struct rtl8169_private *tp)
3517{ 3664{
3665 void __iomem *ioaddr = tp->mmio_addr;
3666
3518 r810x_phy_power_up(tp); 3667 r810x_phy_power_up(tp);
3668
3669 switch (tp->mac_version) {
3670 case RTL_GIGA_MAC_VER_07:
3671 case RTL_GIGA_MAC_VER_08:
3672 case RTL_GIGA_MAC_VER_09:
3673 case RTL_GIGA_MAC_VER_10:
3674 case RTL_GIGA_MAC_VER_13:
3675 case RTL_GIGA_MAC_VER_16:
3676 break;
3677 default:
3678 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3679 break;
3680 }
3519} 3681}
3520 3682
3521static void r8168_phy_power_up(struct rtl8169_private *tp) 3683static void r8168_phy_power_up(struct rtl8169_private *tp)
@@ -3619,13 +3781,6 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
3619{ 3781{
3620 void __iomem *ioaddr = tp->mmio_addr; 3782 void __iomem *ioaddr = tp->mmio_addr;
3621 3783
3622 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3623 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3624 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3625 r8168dp_check_dash(tp)) {
3626 return;
3627 }
3628
3629 switch (tp->mac_version) { 3784 switch (tp->mac_version) {
3630 case RTL_GIGA_MAC_VER_25: 3785 case RTL_GIGA_MAC_VER_25:
3631 case RTL_GIGA_MAC_VER_26: 3786 case RTL_GIGA_MAC_VER_26:
@@ -3670,6 +3825,7 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3670 case RTL_GIGA_MAC_VER_16: 3825 case RTL_GIGA_MAC_VER_16:
3671 case RTL_GIGA_MAC_VER_29: 3826 case RTL_GIGA_MAC_VER_29:
3672 case RTL_GIGA_MAC_VER_30: 3827 case RTL_GIGA_MAC_VER_30:
3828 case RTL_GIGA_MAC_VER_37:
3673 ops->down = r810x_pll_power_down; 3829 ops->down = r810x_pll_power_down;
3674 ops->up = r810x_pll_power_up; 3830 ops->up = r810x_pll_power_up;
3675 break; 3831 break;
@@ -3694,6 +3850,7 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3694 case RTL_GIGA_MAC_VER_34: 3850 case RTL_GIGA_MAC_VER_34:
3695 case RTL_GIGA_MAC_VER_35: 3851 case RTL_GIGA_MAC_VER_35:
3696 case RTL_GIGA_MAC_VER_36: 3852 case RTL_GIGA_MAC_VER_36:
3853 case RTL_GIGA_MAC_VER_38:
3697 ops->down = r8168_pll_power_down; 3854 ops->down = r8168_pll_power_down;
3698 ops->up = r8168_pll_power_up; 3855 ops->up = r8168_pll_power_up;
3699 break; 3856 break;
@@ -3979,7 +4136,9 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
3979 udelay(20); 4136 udelay(20);
3980 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || 4137 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
3981 tp->mac_version == RTL_GIGA_MAC_VER_35 || 4138 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
3982 tp->mac_version == RTL_GIGA_MAC_VER_36) { 4139 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4140 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4141 tp->mac_version == RTL_GIGA_MAC_VER_38) {
3983 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); 4142 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
3984 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY)) 4143 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
3985 udelay(100); 4144 udelay(100);
@@ -4185,22 +4344,141 @@ static void rtl_hw_start_8169(struct net_device *dev)
4185 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); 4344 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4186} 4345}
4187 4346
4188static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits) 4347static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4348{
4349 if (tp->csi_ops.write)
4350 tp->csi_ops.write(tp->mmio_addr, addr, value);
4351}
4352
4353static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4354{
4355 if (tp->csi_ops.read)
4356 return tp->csi_ops.read(tp->mmio_addr, addr);
4357 else
4358 return ~0;
4359}
4360
4361static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4189{ 4362{
4190 u32 csi; 4363 u32 csi;
4191 4364
4192 csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff; 4365 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4193 rtl_csi_write(ioaddr, 0x070c, csi | bits); 4366 rtl_csi_write(tp, 0x070c, csi | bits);
4367}
4368
4369static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4370{
4371 rtl_csi_access_enable(tp, 0x17000000);
4372}
4373
4374static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4375{
4376 rtl_csi_access_enable(tp, 0x27000000);
4377}
4378
4379static void r8169_csi_write(void __iomem *ioaddr, int addr, int value)
4380{
4381 unsigned int i;
4382
4383 RTL_W32(CSIDR, value);
4384 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4385 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4386
4387 for (i = 0; i < 100; i++) {
4388 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
4389 break;
4390 udelay(10);
4391 }
4392}
4393
4394static u32 r8169_csi_read(void __iomem *ioaddr, int addr)
4395{
4396 u32 value = ~0x00;
4397 unsigned int i;
4398
4399 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4400 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4401
4402 for (i = 0; i < 100; i++) {
4403 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
4404 value = RTL_R32(CSIDR);
4405 break;
4406 }
4407 udelay(10);
4408 }
4409
4410 return value;
4411}
4412
4413static void r8402_csi_write(void __iomem *ioaddr, int addr, int value)
4414{
4415 unsigned int i;
4416
4417 RTL_W32(CSIDR, value);
4418 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4419 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4420 CSIAR_FUNC_NIC);
4421
4422 for (i = 0; i < 100; i++) {
4423 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
4424 break;
4425 udelay(10);
4426 }
4194} 4427}
4195 4428
4196static void rtl_csi_access_enable_1(void __iomem *ioaddr) 4429static u32 r8402_csi_read(void __iomem *ioaddr, int addr)
4197{ 4430{
4198 rtl_csi_access_enable(ioaddr, 0x17000000); 4431 u32 value = ~0x00;
4432 unsigned int i;
4433
4434 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4435 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4436
4437 for (i = 0; i < 100; i++) {
4438 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
4439 value = RTL_R32(CSIDR);
4440 break;
4441 }
4442 udelay(10);
4443 }
4444
4445 return value;
4199} 4446}
4200 4447
4201static void rtl_csi_access_enable_2(void __iomem *ioaddr) 4448static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
4202{ 4449{
4203 rtl_csi_access_enable(ioaddr, 0x27000000); 4450 struct csi_ops *ops = &tp->csi_ops;
4451
4452 switch (tp->mac_version) {
4453 case RTL_GIGA_MAC_VER_01:
4454 case RTL_GIGA_MAC_VER_02:
4455 case RTL_GIGA_MAC_VER_03:
4456 case RTL_GIGA_MAC_VER_04:
4457 case RTL_GIGA_MAC_VER_05:
4458 case RTL_GIGA_MAC_VER_06:
4459 case RTL_GIGA_MAC_VER_10:
4460 case RTL_GIGA_MAC_VER_11:
4461 case RTL_GIGA_MAC_VER_12:
4462 case RTL_GIGA_MAC_VER_13:
4463 case RTL_GIGA_MAC_VER_14:
4464 case RTL_GIGA_MAC_VER_15:
4465 case RTL_GIGA_MAC_VER_16:
4466 case RTL_GIGA_MAC_VER_17:
4467 ops->write = NULL;
4468 ops->read = NULL;
4469 break;
4470
4471 case RTL_GIGA_MAC_VER_37:
4472 case RTL_GIGA_MAC_VER_38:
4473 ops->write = r8402_csi_write;
4474 ops->read = r8402_csi_read;
4475 break;
4476
4477 default:
4478 ops->write = r8169_csi_write;
4479 ops->read = r8169_csi_read;
4480 break;
4481 }
4204} 4482}
4205 4483
4206struct ephy_info { 4484struct ephy_info {
@@ -4257,8 +4535,11 @@ static void rtl_enable_clock_request(struct pci_dev *pdev)
4257 PktCntrDisable | \ 4535 PktCntrDisable | \
4258 Mac_dbgo_sel) 4536 Mac_dbgo_sel)
4259 4537
4260static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev) 4538static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4261{ 4539{
4540 void __iomem *ioaddr = tp->mmio_addr;
4541 struct pci_dev *pdev = tp->pci_dev;
4542
4262 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 4543 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4263 4544
4264 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4545 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
@@ -4267,17 +4548,22 @@ static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev)
4267 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); 4548 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4268} 4549}
4269 4550
4270static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev) 4551static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4271{ 4552{
4272 rtl_hw_start_8168bb(ioaddr, pdev); 4553 void __iomem *ioaddr = tp->mmio_addr;
4554
4555 rtl_hw_start_8168bb(tp);
4273 4556
4274 RTL_W8(MaxTxPacketSize, TxPacketMax); 4557 RTL_W8(MaxTxPacketSize, TxPacketMax);
4275 4558
4276 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); 4559 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4277} 4560}
4278 4561
4279static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev) 4562static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4280{ 4563{
4564 void __iomem *ioaddr = tp->mmio_addr;
4565 struct pci_dev *pdev = tp->pci_dev;
4566
4281 RTL_W8(Config1, RTL_R8(Config1) | Speed_down); 4567 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4282 4568
4283 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 4569 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
@@ -4289,8 +4575,9 @@ static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
4289 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4575 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4290} 4576}
4291 4577
4292static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev) 4578static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4293{ 4579{
4580 void __iomem *ioaddr = tp->mmio_addr;
4294 static const struct ephy_info e_info_8168cp[] = { 4581 static const struct ephy_info e_info_8168cp[] = {
4295 { 0x01, 0, 0x0001 }, 4582 { 0x01, 0, 0x0001 },
4296 { 0x02, 0x0800, 0x1000 }, 4583 { 0x02, 0x0800, 0x1000 },
@@ -4299,16 +4586,19 @@ static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
4299 { 0x07, 0, 0x2000 } 4586 { 0x07, 0, 0x2000 }
4300 }; 4587 };
4301 4588
4302 rtl_csi_access_enable_2(ioaddr); 4589 rtl_csi_access_enable_2(tp);
4303 4590
4304 rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); 4591 rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4305 4592
4306 __rtl_hw_start_8168cp(ioaddr, pdev); 4593 __rtl_hw_start_8168cp(tp);
4307} 4594}
4308 4595
4309static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev) 4596static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4310{ 4597{
4311 rtl_csi_access_enable_2(ioaddr); 4598 void __iomem *ioaddr = tp->mmio_addr;
4599 struct pci_dev *pdev = tp->pci_dev;
4600
4601 rtl_csi_access_enable_2(tp);
4312 4602
4313 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 4603 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4314 4604
@@ -4317,9 +4607,12 @@ static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
4317 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4607 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4318} 4608}
4319 4609
4320static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev) 4610static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4321{ 4611{
4322 rtl_csi_access_enable_2(ioaddr); 4612 void __iomem *ioaddr = tp->mmio_addr;
4613 struct pci_dev *pdev = tp->pci_dev;
4614
4615 rtl_csi_access_enable_2(tp);
4323 4616
4324 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 4617 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4325 4618
@@ -4333,52 +4626,57 @@ static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
4333 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4626 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4334} 4627}
4335 4628
4336static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev) 4629static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4337{ 4630{
4631 void __iomem *ioaddr = tp->mmio_addr;
4338 static const struct ephy_info e_info_8168c_1[] = { 4632 static const struct ephy_info e_info_8168c_1[] = {
4339 { 0x02, 0x0800, 0x1000 }, 4633 { 0x02, 0x0800, 0x1000 },
4340 { 0x03, 0, 0x0002 }, 4634 { 0x03, 0, 0x0002 },
4341 { 0x06, 0x0080, 0x0000 } 4635 { 0x06, 0x0080, 0x0000 }
4342 }; 4636 };
4343 4637
4344 rtl_csi_access_enable_2(ioaddr); 4638 rtl_csi_access_enable_2(tp);
4345 4639
4346 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); 4640 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4347 4641
4348 rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1)); 4642 rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4349 4643
4350 __rtl_hw_start_8168cp(ioaddr, pdev); 4644 __rtl_hw_start_8168cp(tp);
4351} 4645}
4352 4646
4353static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev) 4647static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4354{ 4648{
4649 void __iomem *ioaddr = tp->mmio_addr;
4355 static const struct ephy_info e_info_8168c_2[] = { 4650 static const struct ephy_info e_info_8168c_2[] = {
4356 { 0x01, 0, 0x0001 }, 4651 { 0x01, 0, 0x0001 },
4357 { 0x03, 0x0400, 0x0220 } 4652 { 0x03, 0x0400, 0x0220 }
4358 }; 4653 };
4359 4654
4360 rtl_csi_access_enable_2(ioaddr); 4655 rtl_csi_access_enable_2(tp);
4361 4656
4362 rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); 4657 rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4363 4658
4364 __rtl_hw_start_8168cp(ioaddr, pdev); 4659 __rtl_hw_start_8168cp(tp);
4365} 4660}
4366 4661
4367static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev) 4662static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4368{ 4663{
4369 rtl_hw_start_8168c_2(ioaddr, pdev); 4664 rtl_hw_start_8168c_2(tp);
4370} 4665}
4371 4666
4372static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev) 4667static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4373{ 4668{
4374 rtl_csi_access_enable_2(ioaddr); 4669 rtl_csi_access_enable_2(tp);
4375 4670
4376 __rtl_hw_start_8168cp(ioaddr, pdev); 4671 __rtl_hw_start_8168cp(tp);
4377} 4672}
4378 4673
4379static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev) 4674static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4380{ 4675{
4381 rtl_csi_access_enable_2(ioaddr); 4676 void __iomem *ioaddr = tp->mmio_addr;
4677 struct pci_dev *pdev = tp->pci_dev;
4678
4679 rtl_csi_access_enable_2(tp);
4382 4680
4383 rtl_disable_clock_request(pdev); 4681 rtl_disable_clock_request(pdev);
4384 4682
@@ -4389,9 +4687,12 @@ static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
4389 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4687 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4390} 4688}
4391 4689
4392static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev) 4690static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4393{ 4691{
4394 rtl_csi_access_enable_1(ioaddr); 4692 void __iomem *ioaddr = tp->mmio_addr;
4693 struct pci_dev *pdev = tp->pci_dev;
4694
4695 rtl_csi_access_enable_1(tp);
4395 4696
4396 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4697 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4397 4698
@@ -4400,8 +4701,10 @@ static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev)
4400 rtl_disable_clock_request(pdev); 4701 rtl_disable_clock_request(pdev);
4401} 4702}
4402 4703
4403static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev) 4704static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4404{ 4705{
4706 void __iomem *ioaddr = tp->mmio_addr;
4707 struct pci_dev *pdev = tp->pci_dev;
4405 static const struct ephy_info e_info_8168d_4[] = { 4708 static const struct ephy_info e_info_8168d_4[] = {
4406 { 0x0b, ~0, 0x48 }, 4709 { 0x0b, ~0, 0x48 },
4407 { 0x19, 0x20, 0x50 }, 4710 { 0x19, 0x20, 0x50 },
@@ -4409,7 +4712,7 @@ static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
4409 }; 4712 };
4410 int i; 4713 int i;
4411 4714
4412 rtl_csi_access_enable_1(ioaddr); 4715 rtl_csi_access_enable_1(tp);
4413 4716
4414 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4717 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4415 4718
@@ -4426,8 +4729,10 @@ static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
4426 rtl_enable_clock_request(pdev); 4729 rtl_enable_clock_request(pdev);
4427} 4730}
4428 4731
4429static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev) 4732static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4430{ 4733{
4734 void __iomem *ioaddr = tp->mmio_addr;
4735 struct pci_dev *pdev = tp->pci_dev;
4431 static const struct ephy_info e_info_8168e_1[] = { 4736 static const struct ephy_info e_info_8168e_1[] = {
4432 { 0x00, 0x0200, 0x0100 }, 4737 { 0x00, 0x0200, 0x0100 },
4433 { 0x00, 0x0000, 0x0004 }, 4738 { 0x00, 0x0000, 0x0004 },
@@ -4444,7 +4749,7 @@ static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4444 { 0x0a, 0x0000, 0x0040 } 4749 { 0x0a, 0x0000, 0x0040 }
4445 }; 4750 };
4446 4751
4447 rtl_csi_access_enable_2(ioaddr); 4752 rtl_csi_access_enable_2(tp);
4448 4753
4449 rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); 4754 rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4450 4755
@@ -4461,14 +4766,16 @@ static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4461 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); 4766 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4462} 4767}
4463 4768
4464static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev) 4769static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
4465{ 4770{
4771 void __iomem *ioaddr = tp->mmio_addr;
4772 struct pci_dev *pdev = tp->pci_dev;
4466 static const struct ephy_info e_info_8168e_2[] = { 4773 static const struct ephy_info e_info_8168e_2[] = {
4467 { 0x09, 0x0000, 0x0080 }, 4774 { 0x09, 0x0000, 0x0080 },
4468 { 0x19, 0x0000, 0x0224 } 4775 { 0x19, 0x0000, 0x0224 }
4469 }; 4776 };
4470 4777
4471 rtl_csi_access_enable_1(ioaddr); 4778 rtl_csi_access_enable_1(tp);
4472 4779
4473 rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); 4780 rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
4474 4781
@@ -4499,18 +4806,12 @@ static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev)
4499 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); 4806 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4500} 4807}
4501 4808
4502static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev) 4809static void rtl_hw_start_8168f(struct rtl8169_private *tp)
4503{ 4810{
4504 static const struct ephy_info e_info_8168f_1[] = { 4811 void __iomem *ioaddr = tp->mmio_addr;
4505 { 0x06, 0x00c0, 0x0020 }, 4812 struct pci_dev *pdev = tp->pci_dev;
4506 { 0x08, 0x0001, 0x0002 },
4507 { 0x09, 0x0000, 0x0080 },
4508 { 0x19, 0x0000, 0x0224 }
4509 };
4510
4511 rtl_csi_access_enable_1(ioaddr);
4512 4813
4513 rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); 4814 rtl_csi_access_enable_2(tp);
4514 4815
4515 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4816 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4516 4817
@@ -4524,8 +4825,6 @@ static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev)
4524 rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); 4825 rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4525 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); 4826 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4526 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); 4827 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
4527 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
4528 ERIAR_EXGMAC);
4529 4828
4530 RTL_W8(MaxTxPacketSize, EarlySize); 4829 RTL_W8(MaxTxPacketSize, EarlySize);
4531 4830
@@ -4533,20 +4832,54 @@ static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev)
4533 4832
4534 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); 4833 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
4535 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); 4834 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
4835 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
4836 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
4837 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4838}
4839
4840static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
4841{
4842 void __iomem *ioaddr = tp->mmio_addr;
4843 static const struct ephy_info e_info_8168f_1[] = {
4844 { 0x06, 0x00c0, 0x0020 },
4845 { 0x08, 0x0001, 0x0002 },
4846 { 0x09, 0x0000, 0x0080 },
4847 { 0x19, 0x0000, 0x0224 }
4848 };
4849
4850 rtl_hw_start_8168f(tp);
4851
4852 rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
4853
4854 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
4855 ERIAR_EXGMAC);
4536 4856
4537 /* Adjust EEE LED frequency */ 4857 /* Adjust EEE LED frequency */
4538 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); 4858 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
4859}
4539 4860
4540 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 4861static void rtl_hw_start_8411(struct rtl8169_private *tp)
4541 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); 4862{
4542 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); 4863 void __iomem *ioaddr = tp->mmio_addr;
4864 static const struct ephy_info e_info_8168f_1[] = {
4865 { 0x06, 0x00c0, 0x0020 },
4866 { 0x0f, 0xffff, 0x5200 },
4867 { 0x1e, 0x0000, 0x4000 },
4868 { 0x19, 0x0000, 0x0224 }
4869 };
4870
4871 rtl_hw_start_8168f(tp);
4872
4873 rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
4874
4875 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000,
4876 ERIAR_EXGMAC);
4543} 4877}
4544 4878
4545static void rtl_hw_start_8168(struct net_device *dev) 4879static void rtl_hw_start_8168(struct net_device *dev)
4546{ 4880{
4547 struct rtl8169_private *tp = netdev_priv(dev); 4881 struct rtl8169_private *tp = netdev_priv(dev);
4548 void __iomem *ioaddr = tp->mmio_addr; 4882 void __iomem *ioaddr = tp->mmio_addr;
4549 struct pci_dev *pdev = tp->pci_dev;
4550 4883
4551 RTL_W8(Cfg9346, Cfg9346_Unlock); 4884 RTL_W8(Cfg9346, Cfg9346_Unlock);
4552 4885
@@ -4577,67 +4910,71 @@ static void rtl_hw_start_8168(struct net_device *dev)
4577 4910
4578 switch (tp->mac_version) { 4911 switch (tp->mac_version) {
4579 case RTL_GIGA_MAC_VER_11: 4912 case RTL_GIGA_MAC_VER_11:
4580 rtl_hw_start_8168bb(ioaddr, pdev); 4913 rtl_hw_start_8168bb(tp);
4581 break; 4914 break;
4582 4915
4583 case RTL_GIGA_MAC_VER_12: 4916 case RTL_GIGA_MAC_VER_12:
4584 case RTL_GIGA_MAC_VER_17: 4917 case RTL_GIGA_MAC_VER_17:
4585 rtl_hw_start_8168bef(ioaddr, pdev); 4918 rtl_hw_start_8168bef(tp);
4586 break; 4919 break;
4587 4920
4588 case RTL_GIGA_MAC_VER_18: 4921 case RTL_GIGA_MAC_VER_18:
4589 rtl_hw_start_8168cp_1(ioaddr, pdev); 4922 rtl_hw_start_8168cp_1(tp);
4590 break; 4923 break;
4591 4924
4592 case RTL_GIGA_MAC_VER_19: 4925 case RTL_GIGA_MAC_VER_19:
4593 rtl_hw_start_8168c_1(ioaddr, pdev); 4926 rtl_hw_start_8168c_1(tp);
4594 break; 4927 break;
4595 4928
4596 case RTL_GIGA_MAC_VER_20: 4929 case RTL_GIGA_MAC_VER_20:
4597 rtl_hw_start_8168c_2(ioaddr, pdev); 4930 rtl_hw_start_8168c_2(tp);
4598 break; 4931 break;
4599 4932
4600 case RTL_GIGA_MAC_VER_21: 4933 case RTL_GIGA_MAC_VER_21:
4601 rtl_hw_start_8168c_3(ioaddr, pdev); 4934 rtl_hw_start_8168c_3(tp);
4602 break; 4935 break;
4603 4936
4604 case RTL_GIGA_MAC_VER_22: 4937 case RTL_GIGA_MAC_VER_22:
4605 rtl_hw_start_8168c_4(ioaddr, pdev); 4938 rtl_hw_start_8168c_4(tp);
4606 break; 4939 break;
4607 4940
4608 case RTL_GIGA_MAC_VER_23: 4941 case RTL_GIGA_MAC_VER_23:
4609 rtl_hw_start_8168cp_2(ioaddr, pdev); 4942 rtl_hw_start_8168cp_2(tp);
4610 break; 4943 break;
4611 4944
4612 case RTL_GIGA_MAC_VER_24: 4945 case RTL_GIGA_MAC_VER_24:
4613 rtl_hw_start_8168cp_3(ioaddr, pdev); 4946 rtl_hw_start_8168cp_3(tp);
4614 break; 4947 break;
4615 4948
4616 case RTL_GIGA_MAC_VER_25: 4949 case RTL_GIGA_MAC_VER_25:
4617 case RTL_GIGA_MAC_VER_26: 4950 case RTL_GIGA_MAC_VER_26:
4618 case RTL_GIGA_MAC_VER_27: 4951 case RTL_GIGA_MAC_VER_27:
4619 rtl_hw_start_8168d(ioaddr, pdev); 4952 rtl_hw_start_8168d(tp);
4620 break; 4953 break;
4621 4954
4622 case RTL_GIGA_MAC_VER_28: 4955 case RTL_GIGA_MAC_VER_28:
4623 rtl_hw_start_8168d_4(ioaddr, pdev); 4956 rtl_hw_start_8168d_4(tp);
4624 break; 4957 break;
4625 4958
4626 case RTL_GIGA_MAC_VER_31: 4959 case RTL_GIGA_MAC_VER_31:
4627 rtl_hw_start_8168dp(ioaddr, pdev); 4960 rtl_hw_start_8168dp(tp);
4628 break; 4961 break;
4629 4962
4630 case RTL_GIGA_MAC_VER_32: 4963 case RTL_GIGA_MAC_VER_32:
4631 case RTL_GIGA_MAC_VER_33: 4964 case RTL_GIGA_MAC_VER_33:
4632 rtl_hw_start_8168e_1(ioaddr, pdev); 4965 rtl_hw_start_8168e_1(tp);
4633 break; 4966 break;
4634 case RTL_GIGA_MAC_VER_34: 4967 case RTL_GIGA_MAC_VER_34:
4635 rtl_hw_start_8168e_2(ioaddr, pdev); 4968 rtl_hw_start_8168e_2(tp);
4636 break; 4969 break;
4637 4970
4638 case RTL_GIGA_MAC_VER_35: 4971 case RTL_GIGA_MAC_VER_35:
4639 case RTL_GIGA_MAC_VER_36: 4972 case RTL_GIGA_MAC_VER_36:
4640 rtl_hw_start_8168f_1(ioaddr, pdev); 4973 rtl_hw_start_8168f_1(tp);
4974 break;
4975
4976 case RTL_GIGA_MAC_VER_38:
4977 rtl_hw_start_8411(tp);
4641 break; 4978 break;
4642 4979
4643 default: 4980 default:
@@ -4664,8 +5001,10 @@ static void rtl_hw_start_8168(struct net_device *dev)
4664 PktCntrDisable | \ 5001 PktCntrDisable | \
4665 Mac_dbgo_sel) 5002 Mac_dbgo_sel)
4666 5003
4667static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) 5004static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
4668{ 5005{
5006 void __iomem *ioaddr = tp->mmio_addr;
5007 struct pci_dev *pdev = tp->pci_dev;
4669 static const struct ephy_info e_info_8102e_1[] = { 5008 static const struct ephy_info e_info_8102e_1[] = {
4670 { 0x01, 0, 0x6e65 }, 5009 { 0x01, 0, 0x6e65 },
4671 { 0x02, 0, 0x091f }, 5010 { 0x02, 0, 0x091f },
@@ -4678,7 +5017,7 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4678 }; 5017 };
4679 u8 cfg1; 5018 u8 cfg1;
4680 5019
4681 rtl_csi_access_enable_2(ioaddr); 5020 rtl_csi_access_enable_2(tp);
4682 5021
4683 RTL_W8(DBG_REG, FIX_NAK_1); 5022 RTL_W8(DBG_REG, FIX_NAK_1);
4684 5023
@@ -4695,9 +5034,12 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4695 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); 5034 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
4696} 5035}
4697 5036
4698static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev) 5037static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
4699{ 5038{
4700 rtl_csi_access_enable_2(ioaddr); 5039 void __iomem *ioaddr = tp->mmio_addr;
5040 struct pci_dev *pdev = tp->pci_dev;
5041
5042 rtl_csi_access_enable_2(tp);
4701 5043
4702 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 5044 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4703 5045
@@ -4705,15 +5047,16 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
4705 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 5047 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4706} 5048}
4707 5049
4708static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) 5050static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
4709{ 5051{
4710 rtl_hw_start_8102e_2(ioaddr, pdev); 5052 rtl_hw_start_8102e_2(tp);
4711 5053
4712 rtl_ephy_write(ioaddr, 0x03, 0xc2f9); 5054 rtl_ephy_write(tp->mmio_addr, 0x03, 0xc2f9);
4713} 5055}
4714 5056
4715static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev) 5057static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
4716{ 5058{
5059 void __iomem *ioaddr = tp->mmio_addr;
4717 static const struct ephy_info e_info_8105e_1[] = { 5060 static const struct ephy_info e_info_8105e_1[] = {
4718 { 0x07, 0, 0x4000 }, 5061 { 0x07, 0, 0x4000 },
4719 { 0x19, 0, 0x0200 }, 5062 { 0x19, 0, 0x0200 },
@@ -4737,12 +5080,44 @@ static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4737 rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); 5080 rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
4738} 5081}
4739 5082
4740static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev) 5083static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
4741{ 5084{
4742 rtl_hw_start_8105e_1(ioaddr, pdev); 5085 void __iomem *ioaddr = tp->mmio_addr;
5086
5087 rtl_hw_start_8105e_1(tp);
4743 rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000); 5088 rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
4744} 5089}
4745 5090
5091static void rtl_hw_start_8402(struct rtl8169_private *tp)
5092{
5093 void __iomem *ioaddr = tp->mmio_addr;
5094 static const struct ephy_info e_info_8402[] = {
5095 { 0x19, 0xffff, 0xff64 },
5096 { 0x1e, 0, 0x4000 }
5097 };
5098
5099 rtl_csi_access_enable_2(tp);
5100
5101 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5102 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5103
5104 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5105 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5106
5107 rtl_ephy_init(ioaddr, e_info_8402, ARRAY_SIZE(e_info_8402));
5108
5109 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5110
5111 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5112 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5113 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5114 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5115 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5116 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5117 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00,
5118 ERIAR_EXGMAC);
5119}
5120
4746static void rtl_hw_start_8101(struct net_device *dev) 5121static void rtl_hw_start_8101(struct net_device *dev)
4747{ 5122{
4748 struct rtl8169_private *tp = netdev_priv(dev); 5123 struct rtl8169_private *tp = netdev_priv(dev);
@@ -4766,22 +5141,26 @@ static void rtl_hw_start_8101(struct net_device *dev)
4766 5141
4767 switch (tp->mac_version) { 5142 switch (tp->mac_version) {
4768 case RTL_GIGA_MAC_VER_07: 5143 case RTL_GIGA_MAC_VER_07:
4769 rtl_hw_start_8102e_1(ioaddr, pdev); 5144 rtl_hw_start_8102e_1(tp);
4770 break; 5145 break;
4771 5146
4772 case RTL_GIGA_MAC_VER_08: 5147 case RTL_GIGA_MAC_VER_08:
4773 rtl_hw_start_8102e_3(ioaddr, pdev); 5148 rtl_hw_start_8102e_3(tp);
4774 break; 5149 break;
4775 5150
4776 case RTL_GIGA_MAC_VER_09: 5151 case RTL_GIGA_MAC_VER_09:
4777 rtl_hw_start_8102e_2(ioaddr, pdev); 5152 rtl_hw_start_8102e_2(tp);
4778 break; 5153 break;
4779 5154
4780 case RTL_GIGA_MAC_VER_29: 5155 case RTL_GIGA_MAC_VER_29:
4781 rtl_hw_start_8105e_1(ioaddr, pdev); 5156 rtl_hw_start_8105e_1(tp);
4782 break; 5157 break;
4783 case RTL_GIGA_MAC_VER_30: 5158 case RTL_GIGA_MAC_VER_30:
4784 rtl_hw_start_8105e_2(ioaddr, pdev); 5159 rtl_hw_start_8105e_2(tp);
5160 break;
5161
5162 case RTL_GIGA_MAC_VER_37:
5163 rtl_hw_start_8402(tp);
4785 break; 5164 break;
4786 } 5165 }
4787 5166
@@ -6178,6 +6557,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6178 rtl_init_mdio_ops(tp); 6557 rtl_init_mdio_ops(tp);
6179 rtl_init_pll_power_ops(tp); 6558 rtl_init_pll_power_ops(tp);
6180 rtl_init_jumbo_ops(tp); 6559 rtl_init_jumbo_ops(tp);
6560 rtl_init_csi_ops(tp);
6181 6561
6182 rtl8169_print_mac_version(tp); 6562 rtl8169_print_mac_version(tp);
6183 6563
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 3fb2355af37e..46df3a04030c 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -4,11 +4,11 @@
4 4
5config SH_ETH 5config SH_ETH
6 tristate "Renesas SuperH Ethernet support" 6 tristate "Renesas SuperH Ethernet support"
7 depends on SUPERH && \ 7 depends on (SUPERH || ARCH_SHMOBILE) && \
8 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \ 8 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
9 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \ 9 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
10 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \ 10 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \
11 CPU_SUBTYPE_SH7757) 11 CPU_SUBTYPE_SH7757 || ARCH_R8A7740)
12 select CRC32 12 select CRC32
13 select NET_CORE 13 select NET_CORE
14 select MII 14 select MII
@@ -17,4 +17,5 @@ config SH_ETH
17 ---help--- 17 ---help---
18 Renesas SuperH Ethernet device driver. 18 Renesas SuperH Ethernet device driver.
19 This driver supporting CPUs are: 19 This driver supporting CPUs are:
20 - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763 and SH7757. 20 - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
21 and R8A7740.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index d63e09b29a96..be3c22179161 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -386,6 +386,114 @@ static void sh_eth_reset_hw_crc(struct net_device *ndev)
386 sh_eth_write(ndev, 0x0, CSMR); 386 sh_eth_write(ndev, 0x0, CSMR);
387} 387}
388 388
389#elif defined(CONFIG_ARCH_R8A7740)
390#define SH_ETH_HAS_TSU 1
391static void sh_eth_chip_reset(struct net_device *ndev)
392{
393 struct sh_eth_private *mdp = netdev_priv(ndev);
394 unsigned long mii;
395
396 /* reset device */
397 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
398 mdelay(1);
399
400 switch (mdp->phy_interface) {
401 case PHY_INTERFACE_MODE_GMII:
402 mii = 2;
403 break;
404 case PHY_INTERFACE_MODE_MII:
405 mii = 1;
406 break;
407 case PHY_INTERFACE_MODE_RMII:
408 default:
409 mii = 0;
410 break;
411 }
412 sh_eth_write(ndev, mii, RMII_MII);
413}
414
415static void sh_eth_reset(struct net_device *ndev)
416{
417 int cnt = 100;
418
419 sh_eth_write(ndev, EDSR_ENALL, EDSR);
420 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
421 while (cnt > 0) {
422 if (!(sh_eth_read(ndev, EDMR) & 0x3))
423 break;
424 mdelay(1);
425 cnt--;
426 }
427 if (cnt == 0)
428 printk(KERN_ERR "Device reset fail\n");
429
430 /* Table Init */
431 sh_eth_write(ndev, 0x0, TDLAR);
432 sh_eth_write(ndev, 0x0, TDFAR);
433 sh_eth_write(ndev, 0x0, TDFXR);
434 sh_eth_write(ndev, 0x0, TDFFR);
435 sh_eth_write(ndev, 0x0, RDLAR);
436 sh_eth_write(ndev, 0x0, RDFAR);
437 sh_eth_write(ndev, 0x0, RDFXR);
438 sh_eth_write(ndev, 0x0, RDFFR);
439}
440
441static void sh_eth_set_duplex(struct net_device *ndev)
442{
443 struct sh_eth_private *mdp = netdev_priv(ndev);
444
445 if (mdp->duplex) /* Full */
446 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
447 else /* Half */
448 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
449}
450
451static void sh_eth_set_rate(struct net_device *ndev)
452{
453 struct sh_eth_private *mdp = netdev_priv(ndev);
454
455 switch (mdp->speed) {
456 case 10: /* 10BASE */
457 sh_eth_write(ndev, GECMR_10, GECMR);
458 break;
459 case 100:/* 100BASE */
460 sh_eth_write(ndev, GECMR_100, GECMR);
461 break;
462 case 1000: /* 1000BASE */
463 sh_eth_write(ndev, GECMR_1000, GECMR);
464 break;
465 default:
466 break;
467 }
468}
469
470/* R8A7740 */
471static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
472 .chip_reset = sh_eth_chip_reset,
473 .set_duplex = sh_eth_set_duplex,
474 .set_rate = sh_eth_set_rate,
475
476 .ecsr_value = ECSR_ICD | ECSR_MPD,
477 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
478 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
479
480 .tx_check = EESR_TC1 | EESR_FTC,
481 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
482 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
483 EESR_ECI,
484 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
485 EESR_TFE,
486
487 .apr = 1,
488 .mpr = 1,
489 .tpauser = 1,
490 .bculr = 1,
491 .hw_swap = 1,
492 .no_trimd = 1,
493 .no_ade = 1,
494 .tsu = 1,
495};
496
389#elif defined(CONFIG_CPU_SUBTYPE_SH7619) 497#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
390#define SH_ETH_RESET_DEFAULT 1 498#define SH_ETH_RESET_DEFAULT 1
391static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 499static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
@@ -443,7 +551,7 @@ static void sh_eth_reset(struct net_device *ndev)
443} 551}
444#endif 552#endif
445 553
446#if defined(CONFIG_CPU_SH4) 554#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
447static void sh_eth_set_receive_align(struct sk_buff *skb) 555static void sh_eth_set_receive_align(struct sk_buff *skb)
448{ 556{
449 int reserve; 557 int reserve;
@@ -919,6 +1027,10 @@ static int sh_eth_rx(struct net_device *ndev)
919 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1027 desc_status = edmac_to_cpu(mdp, rxdesc->status);
920 pkt_len = rxdesc->frame_length; 1028 pkt_len = rxdesc->frame_length;
921 1029
1030#if defined(CONFIG_ARCH_R8A7740)
1031 desc_status >>= 16;
1032#endif
1033
922 if (--boguscnt < 0) 1034 if (--boguscnt < 0)
923 break; 1035 break;
924 1036
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 0fa14afce23d..57b8e1fc5d15 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -372,7 +372,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
372}; 372};
373 373
374/* Driver's parameters */ 374/* Driver's parameters */
375#if defined(CONFIG_CPU_SH4) 375#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
376#define SH4_SKB_RX_ALIGN 32 376#define SH4_SKB_RX_ALIGN 32
377#else 377#else
378#define SH2_SH3_SKB_RX_ALIGN 2 378#define SH2_SH3_SKB_RX_ALIGN 2
@@ -381,7 +381,8 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
381/* 381/*
382 * Register's bits 382 * Register's bits
383 */ 383 */
384#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) 384#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) ||\
385 defined(CONFIG_ARCH_R8A7740)
385/* EDSR */ 386/* EDSR */
386enum EDSR_BIT { 387enum EDSR_BIT {
387 EDSR_ENT = 0x01, EDSR_ENR = 0x02, 388 EDSR_ENT = 0x01, EDSR_ENR = 0x02,
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 1895605abb35..8e9fda0c7aeb 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -937,7 +937,7 @@ static struct net_device_stats *s6gmac_stats(struct net_device *dev)
937 do { 937 do {
938 unsigned long flags; 938 unsigned long flags;
939 spin_lock_irqsave(&pd->lock, flags); 939 spin_lock_irqsave(&pd->lock, flags);
940 for (i = 0; i < sizeof(pd->stats) / sizeof(unsigned long); i++) 940 for (i = 0; i < ARRAY_SIZE(pd->stats); i++)
941 pd->stats[i] = 941 pd->stats[i] =
942 pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1); 942 pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1);
943 s6gmac_stats_collect(pd, &statinf[0][0]); 943 s6gmac_stats_collect(pd, &statinf[0][0]);
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index a284d6440538..32e55664df6e 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -39,9 +39,7 @@
39#define SC92031_NAME "sc92031" 39#define SC92031_NAME "sc92031"
40 40
41/* BAR 0 is MMIO, BAR 1 is PIO */ 41/* BAR 0 is MMIO, BAR 1 is PIO */
42#ifndef SC92031_USE_BAR 42#define SC92031_USE_PIO 0
43#define SC92031_USE_BAR 0
44#endif
45 43
46/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */ 44/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
47static int multicast_filter_limit = 64; 45static int multicast_filter_limit = 64;
@@ -366,7 +364,7 @@ static void sc92031_disable_interrupts(struct net_device *dev)
366 mmiowb(); 364 mmiowb();
367 365
368 /* wait for any concurrent interrupt/tasklet to finish */ 366 /* wait for any concurrent interrupt/tasklet to finish */
369 synchronize_irq(dev->irq); 367 synchronize_irq(priv->pdev->irq);
370 tasklet_disable(&priv->tasklet); 368 tasklet_disable(&priv->tasklet);
371} 369}
372 370
@@ -1114,10 +1112,13 @@ static void sc92031_tx_timeout(struct net_device *dev)
1114#ifdef CONFIG_NET_POLL_CONTROLLER 1112#ifdef CONFIG_NET_POLL_CONTROLLER
1115static void sc92031_poll_controller(struct net_device *dev) 1113static void sc92031_poll_controller(struct net_device *dev)
1116{ 1114{
1117 disable_irq(dev->irq); 1115 struct sc92031_priv *priv = netdev_priv(dev);
1118 if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE) 1116 const int irq = priv->pdev->irq;
1117
1118 disable_irq(irq);
1119 if (sc92031_interrupt(irq, dev) != IRQ_NONE)
1119 sc92031_tasklet((unsigned long)dev); 1120 sc92031_tasklet((unsigned long)dev);
1120 enable_irq(dev->irq); 1121 enable_irq(irq);
1121} 1122}
1122#endif 1123#endif
1123 1124
@@ -1402,7 +1403,6 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1402 struct net_device *dev; 1403 struct net_device *dev;
1403 struct sc92031_priv *priv; 1404 struct sc92031_priv *priv;
1404 u32 mac0, mac1; 1405 u32 mac0, mac1;
1405 unsigned long base_addr;
1406 1406
1407 err = pci_enable_device(pdev); 1407 err = pci_enable_device(pdev);
1408 if (unlikely(err < 0)) 1408 if (unlikely(err < 0))
@@ -1422,7 +1422,7 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1422 if (unlikely(err < 0)) 1422 if (unlikely(err < 0))
1423 goto out_request_regions; 1423 goto out_request_regions;
1424 1424
1425 port_base = pci_iomap(pdev, SC92031_USE_BAR, 0); 1425 port_base = pci_iomap(pdev, SC92031_USE_PIO, 0);
1426 if (unlikely(!port_base)) { 1426 if (unlikely(!port_base)) {
1427 err = -EIO; 1427 err = -EIO;
1428 goto out_iomap; 1428 goto out_iomap;
@@ -1437,14 +1437,6 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1437 pci_set_drvdata(pdev, dev); 1437 pci_set_drvdata(pdev, dev);
1438 SET_NETDEV_DEV(dev, &pdev->dev); 1438 SET_NETDEV_DEV(dev, &pdev->dev);
1439 1439
1440#if SC92031_USE_BAR == 0
1441 dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR);
1442 dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR);
1443#elif SC92031_USE_BAR == 1
1444 dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR);
1445#endif
1446 dev->irq = pdev->irq;
1447
1448 /* faked with skb_copy_and_csum_dev */ 1440 /* faked with skb_copy_and_csum_dev */
1449 dev->features = NETIF_F_SG | NETIF_F_HIGHDMA | 1441 dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
1450 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1442 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -1478,13 +1470,9 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1478 if (err < 0) 1470 if (err < 0)
1479 goto out_register_netdev; 1471 goto out_register_netdev;
1480 1472
1481#if SC92031_USE_BAR == 0
1482 base_addr = dev->mem_start;
1483#elif SC92031_USE_BAR == 1
1484 base_addr = dev->base_addr;
1485#endif
1486 printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name, 1473 printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name,
1487 base_addr, dev->dev_addr, dev->irq); 1474 (long)pci_resource_start(pdev, SC92031_USE_PIO), dev->dev_addr,
1475 pdev->irq);
1488 1476
1489 return 0; 1477 return 0;
1490 1478
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index a9deda8eaf63..4613591b43e7 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -729,7 +729,7 @@ static void sis190_tx_interrupt(struct net_device *dev,
729 * The interrupt handler does all of the Rx thread work and cleans up after 729 * The interrupt handler does all of the Rx thread work and cleans up after
730 * the Tx thread. 730 * the Tx thread.
731 */ 731 */
732static irqreturn_t sis190_interrupt(int irq, void *__dev) 732static irqreturn_t sis190_irq(int irq, void *__dev)
733{ 733{
734 struct net_device *dev = __dev; 734 struct net_device *dev = __dev;
735 struct sis190_private *tp = netdev_priv(dev); 735 struct sis190_private *tp = netdev_priv(dev);
@@ -772,11 +772,11 @@ out:
772static void sis190_netpoll(struct net_device *dev) 772static void sis190_netpoll(struct net_device *dev)
773{ 773{
774 struct sis190_private *tp = netdev_priv(dev); 774 struct sis190_private *tp = netdev_priv(dev);
775 struct pci_dev *pdev = tp->pci_dev; 775 const int irq = tp->pci_dev->irq;
776 776
777 disable_irq(pdev->irq); 777 disable_irq(irq);
778 sis190_interrupt(pdev->irq, dev); 778 sis190_irq(irq, dev);
779 enable_irq(pdev->irq); 779 enable_irq(irq);
780} 780}
781#endif 781#endif
782 782
@@ -1085,7 +1085,7 @@ static int sis190_open(struct net_device *dev)
1085 1085
1086 sis190_request_timer(dev); 1086 sis190_request_timer(dev);
1087 1087
1088 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev); 1088 rc = request_irq(pdev->irq, sis190_irq, IRQF_SHARED, dev->name, dev);
1089 if (rc < 0) 1089 if (rc < 0)
1090 goto err_release_timer_2; 1090 goto err_release_timer_2;
1091 1091
@@ -1097,11 +1097,9 @@ err_release_timer_2:
1097 sis190_delete_timer(dev); 1097 sis190_delete_timer(dev);
1098 sis190_rx_clear(tp); 1098 sis190_rx_clear(tp);
1099err_free_rx_1: 1099err_free_rx_1:
1100 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing, 1100 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1101 tp->rx_dma);
1102err_free_tx_0: 1101err_free_tx_0:
1103 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing, 1102 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1104 tp->tx_dma);
1105 goto out; 1103 goto out;
1106} 1104}
1107 1105
@@ -1141,7 +1139,7 @@ static void sis190_down(struct net_device *dev)
1141 1139
1142 spin_unlock_irq(&tp->lock); 1140 spin_unlock_irq(&tp->lock);
1143 1141
1144 synchronize_irq(dev->irq); 1142 synchronize_irq(tp->pci_dev->irq);
1145 1143
1146 if (!poll_locked) 1144 if (!poll_locked)
1147 poll_locked++; 1145 poll_locked++;
@@ -1161,7 +1159,7 @@ static int sis190_close(struct net_device *dev)
1161 1159
1162 sis190_down(dev); 1160 sis190_down(dev);
1163 1161
1164 free_irq(dev->irq, dev); 1162 free_irq(pdev->irq, dev);
1165 1163
1166 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); 1164 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1167 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); 1165 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
@@ -1884,8 +1882,6 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
1884 dev->netdev_ops = &sis190_netdev_ops; 1882 dev->netdev_ops = &sis190_netdev_ops;
1885 1883
1886 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops); 1884 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1887 dev->irq = pdev->irq;
1888 dev->base_addr = (unsigned long) 0xdead;
1889 dev->watchdog_timeo = SIS190_TX_TIMEOUT; 1885 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1890 1886
1891 spin_lock_init(&tp->lock); 1887 spin_lock_init(&tp->lock);
@@ -1902,7 +1898,7 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
1902 netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n", 1898 netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
1903 pci_name(pdev), 1899 pci_name(pdev),
1904 sis_chip_info[ent->driver_data].name, 1900 sis_chip_info[ent->driver_data].name,
1905 ioaddr, dev->irq, dev->dev_addr); 1901 ioaddr, pdev->irq, dev->dev_addr);
1906 netdev_info(dev, "%s mode.\n", 1902 netdev_info(dev, "%s mode.\n",
1907 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII"); 1903 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1908 } 1904 }
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 5ccf02e7e3ad..203d9c6ec23a 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -168,6 +168,8 @@ struct sis900_private {
168 unsigned int cur_phy; 168 unsigned int cur_phy;
169 struct mii_if_info mii_info; 169 struct mii_if_info mii_info;
170 170
171 void __iomem *ioaddr;
172
171 struct timer_list timer; /* Link status detection timer. */ 173 struct timer_list timer; /* Link status detection timer. */
172 u8 autong_complete; /* 1: auto-negotiate complete */ 174 u8 autong_complete; /* 1: auto-negotiate complete */
173 175
@@ -201,13 +203,18 @@ MODULE_PARM_DESC(multicast_filter_limit, "SiS 900/7016 maximum number of filtere
201MODULE_PARM_DESC(max_interrupt_work, "SiS 900/7016 maximum events handled per interrupt"); 203MODULE_PARM_DESC(max_interrupt_work, "SiS 900/7016 maximum events handled per interrupt");
202MODULE_PARM_DESC(sis900_debug, "SiS 900/7016 bitmapped debugging message level"); 204MODULE_PARM_DESC(sis900_debug, "SiS 900/7016 bitmapped debugging message level");
203 205
206#define sw32(reg, val) iowrite32(val, ioaddr + (reg))
207#define sw8(reg, val) iowrite8(val, ioaddr + (reg))
208#define sr32(reg) ioread32(ioaddr + (reg))
209#define sr16(reg) ioread16(ioaddr + (reg))
210
204#ifdef CONFIG_NET_POLL_CONTROLLER 211#ifdef CONFIG_NET_POLL_CONTROLLER
205static void sis900_poll(struct net_device *dev); 212static void sis900_poll(struct net_device *dev);
206#endif 213#endif
207static int sis900_open(struct net_device *net_dev); 214static int sis900_open(struct net_device *net_dev);
208static int sis900_mii_probe (struct net_device * net_dev); 215static int sis900_mii_probe (struct net_device * net_dev);
209static void sis900_init_rxfilter (struct net_device * net_dev); 216static void sis900_init_rxfilter (struct net_device * net_dev);
210static u16 read_eeprom(long ioaddr, int location); 217static u16 read_eeprom(void __iomem *ioaddr, int location);
211static int mdio_read(struct net_device *net_dev, int phy_id, int location); 218static int mdio_read(struct net_device *net_dev, int phy_id, int location);
212static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val); 219static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val);
213static void sis900_timer(unsigned long data); 220static void sis900_timer(unsigned long data);
@@ -231,7 +238,7 @@ static u16 sis900_default_phy(struct net_device * net_dev);
231static void sis900_set_capability( struct net_device *net_dev ,struct mii_phy *phy); 238static void sis900_set_capability( struct net_device *net_dev ,struct mii_phy *phy);
232static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr); 239static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr);
233static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr); 240static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr);
234static void sis900_set_mode (long ioaddr, int speed, int duplex); 241static void sis900_set_mode(struct sis900_private *, int speed, int duplex);
235static const struct ethtool_ops sis900_ethtool_ops; 242static const struct ethtool_ops sis900_ethtool_ops;
236 243
237/** 244/**
@@ -246,7 +253,8 @@ static const struct ethtool_ops sis900_ethtool_ops;
246 253
247static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev) 254static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev)
248{ 255{
249 long ioaddr = pci_resource_start(pci_dev, 0); 256 struct sis900_private *sis_priv = netdev_priv(net_dev);
257 void __iomem *ioaddr = sis_priv->ioaddr;
250 u16 signature; 258 u16 signature;
251 int i; 259 int i;
252 260
@@ -325,29 +333,30 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
325static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev, 333static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
326 struct net_device *net_dev) 334 struct net_device *net_dev)
327{ 335{
328 long ioaddr = net_dev->base_addr; 336 struct sis900_private *sis_priv = netdev_priv(net_dev);
337 void __iomem *ioaddr = sis_priv->ioaddr;
329 u32 rfcrSave; 338 u32 rfcrSave;
330 u32 i; 339 u32 i;
331 340
332 rfcrSave = inl(rfcr + ioaddr); 341 rfcrSave = sr32(rfcr);
333 342
334 outl(rfcrSave | RELOAD, ioaddr + cr); 343 sw32(cr, rfcrSave | RELOAD);
335 outl(0, ioaddr + cr); 344 sw32(cr, 0);
336 345
337 /* disable packet filtering before setting filter */ 346 /* disable packet filtering before setting filter */
338 outl(rfcrSave & ~RFEN, rfcr + ioaddr); 347 sw32(rfcr, rfcrSave & ~RFEN);
339 348
340 /* load MAC addr to filter data register */ 349 /* load MAC addr to filter data register */
341 for (i = 0 ; i < 3 ; i++) { 350 for (i = 0 ; i < 3 ; i++) {
342 outl((i << RFADDR_shift), ioaddr + rfcr); 351 sw32(rfcr, (i << RFADDR_shift));
343 *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr); 352 *( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr);
344 } 353 }
345 354
346 /* Store MAC Address in perm_addr */ 355 /* Store MAC Address in perm_addr */
347 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); 356 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
348 357
349 /* enable packet filtering */ 358 /* enable packet filtering */
350 outl(rfcrSave | RFEN, rfcr + ioaddr); 359 sw32(rfcr, rfcrSave | RFEN);
351 360
352 return 1; 361 return 1;
353} 362}
@@ -371,31 +380,30 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
371static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev, 380static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
372 struct net_device *net_dev) 381 struct net_device *net_dev)
373{ 382{
374 long ioaddr = net_dev->base_addr; 383 struct sis900_private *sis_priv = netdev_priv(net_dev);
375 long ee_addr = ioaddr + mear; 384 void __iomem *ioaddr = sis_priv->ioaddr;
376 u32 waittime = 0; 385 int wait, rc = 0;
377 int i;
378 386
379 outl(EEREQ, ee_addr); 387 sw32(mear, EEREQ);
380 while(waittime < 2000) { 388 for (wait = 0; wait < 2000; wait++) {
381 if(inl(ee_addr) & EEGNT) { 389 if (sr32(mear) & EEGNT) {
390 u16 *mac = (u16 *)net_dev->dev_addr;
391 int i;
382 392
383 /* get MAC address from EEPROM */ 393 /* get MAC address from EEPROM */
384 for (i = 0; i < 3; i++) 394 for (i = 0; i < 3; i++)
385 ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); 395 mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
386 396
387 /* Store MAC Address in perm_addr */ 397 /* Store MAC Address in perm_addr */
388 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); 398 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
389 399
390 outl(EEDONE, ee_addr); 400 rc = 1;
391 return 1; 401 break;
392 } else {
393 udelay(1);
394 waittime ++;
395 } 402 }
403 udelay(1);
396 } 404 }
397 outl(EEDONE, ee_addr); 405 sw32(mear, EEDONE);
398 return 0; 406 return rc;
399} 407}
400 408
401static const struct net_device_ops sis900_netdev_ops = { 409static const struct net_device_ops sis900_netdev_ops = {
@@ -433,7 +441,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
433 struct pci_dev *dev; 441 struct pci_dev *dev;
434 dma_addr_t ring_dma; 442 dma_addr_t ring_dma;
435 void *ring_space; 443 void *ring_space;
436 long ioaddr; 444 void __iomem *ioaddr;
437 int i, ret; 445 int i, ret;
438 const char *card_name = card_names[pci_id->driver_data]; 446 const char *card_name = card_names[pci_id->driver_data];
439 const char *dev_name = pci_name(pci_dev); 447 const char *dev_name = pci_name(pci_dev);
@@ -464,14 +472,17 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
464 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 472 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
465 473
466 /* We do a request_region() to register /proc/ioports info. */ 474 /* We do a request_region() to register /proc/ioports info. */
467 ioaddr = pci_resource_start(pci_dev, 0);
468 ret = pci_request_regions(pci_dev, "sis900"); 475 ret = pci_request_regions(pci_dev, "sis900");
469 if (ret) 476 if (ret)
470 goto err_out; 477 goto err_out;
471 478
479 /* IO region. */
480 ioaddr = pci_iomap(pci_dev, 0, 0);
481 if (!ioaddr)
482 goto err_out_cleardev;
483
472 sis_priv = netdev_priv(net_dev); 484 sis_priv = netdev_priv(net_dev);
473 net_dev->base_addr = ioaddr; 485 sis_priv->ioaddr = ioaddr;
474 net_dev->irq = pci_dev->irq;
475 sis_priv->pci_dev = pci_dev; 486 sis_priv->pci_dev = pci_dev;
476 spin_lock_init(&sis_priv->lock); 487 spin_lock_init(&sis_priv->lock);
477 488
@@ -480,7 +491,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
480 ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma); 491 ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma);
481 if (!ring_space) { 492 if (!ring_space) {
482 ret = -ENOMEM; 493 ret = -ENOMEM;
483 goto err_out_cleardev; 494 goto err_out_unmap;
484 } 495 }
485 sis_priv->tx_ring = ring_space; 496 sis_priv->tx_ring = ring_space;
486 sis_priv->tx_ring_dma = ring_dma; 497 sis_priv->tx_ring_dma = ring_dma;
@@ -534,7 +545,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
534 545
535 /* 630ET : set the mii access mode as software-mode */ 546 /* 630ET : set the mii access mode as software-mode */
536 if (sis_priv->chipset_rev == SIS630ET_900_REV) 547 if (sis_priv->chipset_rev == SIS630ET_900_REV)
537 outl(ACCESSMODE | inl(ioaddr + cr), ioaddr + cr); 548 sw32(cr, ACCESSMODE | sr32(cr));
538 549
539 /* probe for mii transceiver */ 550 /* probe for mii transceiver */
540 if (sis900_mii_probe(net_dev) == 0) { 551 if (sis900_mii_probe(net_dev) == 0) {
@@ -556,25 +567,27 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
556 goto err_unmap_rx; 567 goto err_unmap_rx;
557 568
558 /* print some information about our NIC */ 569 /* print some information about our NIC */
559 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n", 570 printk(KERN_INFO "%s: %s at 0x%p, IRQ %d, %pM\n",
560 net_dev->name, card_name, ioaddr, net_dev->irq, 571 net_dev->name, card_name, ioaddr, pci_dev->irq,
561 net_dev->dev_addr); 572 net_dev->dev_addr);
562 573
563 /* Detect Wake on Lan support */ 574 /* Detect Wake on Lan support */
564 ret = (inl(net_dev->base_addr + CFGPMC) & PMESP) >> 27; 575 ret = (sr32(CFGPMC) & PMESP) >> 27;
565 if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0) 576 if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0)
566 printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name); 577 printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name);
567 578
568 return 0; 579 return 0;
569 580
570 err_unmap_rx: 581err_unmap_rx:
571 pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring, 582 pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
572 sis_priv->rx_ring_dma); 583 sis_priv->rx_ring_dma);
573 err_unmap_tx: 584err_unmap_tx:
574 pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring, 585 pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
575 sis_priv->tx_ring_dma); 586 sis_priv->tx_ring_dma);
576 err_out_cleardev: 587err_out_unmap:
577 pci_set_drvdata(pci_dev, NULL); 588 pci_iounmap(pci_dev, ioaddr);
589err_out_cleardev:
590 pci_set_drvdata(pci_dev, NULL);
578 pci_release_regions(pci_dev); 591 pci_release_regions(pci_dev);
579 err_out: 592 err_out:
580 free_netdev(net_dev); 593 free_netdev(net_dev);
@@ -798,7 +811,7 @@ static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *ph
798 811
799 812
800/* Delay between EEPROM clock transitions. */ 813/* Delay between EEPROM clock transitions. */
801#define eeprom_delay() inl(ee_addr) 814#define eeprom_delay() sr32(mear)
802 815
803/** 816/**
804 * read_eeprom - Read Serial EEPROM 817 * read_eeprom - Read Serial EEPROM
@@ -809,41 +822,41 @@ static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *ph
809 * Note that location is in word (16 bits) unit 822 * Note that location is in word (16 bits) unit
810 */ 823 */
811 824
812static u16 __devinit read_eeprom(long ioaddr, int location) 825static u16 __devinit read_eeprom(void __iomem *ioaddr, int location)
813{ 826{
827 u32 read_cmd = location | EEread;
814 int i; 828 int i;
815 u16 retval = 0; 829 u16 retval = 0;
816 long ee_addr = ioaddr + mear;
817 u32 read_cmd = location | EEread;
818 830
819 outl(0, ee_addr); 831 sw32(mear, 0);
820 eeprom_delay(); 832 eeprom_delay();
821 outl(EECS, ee_addr); 833 sw32(mear, EECS);
822 eeprom_delay(); 834 eeprom_delay();
823 835
824 /* Shift the read command (9) bits out. */ 836 /* Shift the read command (9) bits out. */
825 for (i = 8; i >= 0; i--) { 837 for (i = 8; i >= 0; i--) {
826 u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS; 838 u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS;
827 outl(dataval, ee_addr); 839
840 sw32(mear, dataval);
828 eeprom_delay(); 841 eeprom_delay();
829 outl(dataval | EECLK, ee_addr); 842 sw32(mear, dataval | EECLK);
830 eeprom_delay(); 843 eeprom_delay();
831 } 844 }
832 outl(EECS, ee_addr); 845 sw32(mear, EECS);
833 eeprom_delay(); 846 eeprom_delay();
834 847
835 /* read the 16-bits data in */ 848 /* read the 16-bits data in */
836 for (i = 16; i > 0; i--) { 849 for (i = 16; i > 0; i--) {
837 outl(EECS, ee_addr); 850 sw32(mear, EECS);
838 eeprom_delay(); 851 eeprom_delay();
839 outl(EECS | EECLK, ee_addr); 852 sw32(mear, EECS | EECLK);
840 eeprom_delay(); 853 eeprom_delay();
841 retval = (retval << 1) | ((inl(ee_addr) & EEDO) ? 1 : 0); 854 retval = (retval << 1) | ((sr32(mear) & EEDO) ? 1 : 0);
842 eeprom_delay(); 855 eeprom_delay();
843 } 856 }
844 857
845 /* Terminate the EEPROM access. */ 858 /* Terminate the EEPROM access. */
846 outl(0, ee_addr); 859 sw32(mear, 0);
847 eeprom_delay(); 860 eeprom_delay();
848 861
849 return retval; 862 return retval;
@@ -852,24 +865,27 @@ static u16 __devinit read_eeprom(long ioaddr, int location)
852/* Read and write the MII management registers using software-generated 865/* Read and write the MII management registers using software-generated
853 serial MDIO protocol. Note that the command bits and data bits are 866 serial MDIO protocol. Note that the command bits and data bits are
854 send out separately */ 867 send out separately */
855#define mdio_delay() inl(mdio_addr) 868#define mdio_delay() sr32(mear)
856 869
857static void mdio_idle(long mdio_addr) 870static void mdio_idle(struct sis900_private *sp)
858{ 871{
859 outl(MDIO | MDDIR, mdio_addr); 872 void __iomem *ioaddr = sp->ioaddr;
873
874 sw32(mear, MDIO | MDDIR);
860 mdio_delay(); 875 mdio_delay();
861 outl(MDIO | MDDIR | MDC, mdio_addr); 876 sw32(mear, MDIO | MDDIR | MDC);
862} 877}
863 878
864/* Syncronize the MII management interface by shifting 32 one bits out. */ 879/* Synchronize the MII management interface by shifting 32 one bits out. */
865static void mdio_reset(long mdio_addr) 880static void mdio_reset(struct sis900_private *sp)
866{ 881{
882 void __iomem *ioaddr = sp->ioaddr;
867 int i; 883 int i;
868 884
869 for (i = 31; i >= 0; i--) { 885 for (i = 31; i >= 0; i--) {
870 outl(MDDIR | MDIO, mdio_addr); 886 sw32(mear, MDDIR | MDIO);
871 mdio_delay(); 887 mdio_delay();
872 outl(MDDIR | MDIO | MDC, mdio_addr); 888 sw32(mear, MDDIR | MDIO | MDC);
873 mdio_delay(); 889 mdio_delay();
874 } 890 }
875} 891}
@@ -887,31 +903,33 @@ static void mdio_reset(long mdio_addr)
887 903
888static int mdio_read(struct net_device *net_dev, int phy_id, int location) 904static int mdio_read(struct net_device *net_dev, int phy_id, int location)
889{ 905{
890 long mdio_addr = net_dev->base_addr + mear;
891 int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift); 906 int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
907 struct sis900_private *sp = netdev_priv(net_dev);
908 void __iomem *ioaddr = sp->ioaddr;
892 u16 retval = 0; 909 u16 retval = 0;
893 int i; 910 int i;
894 911
895 mdio_reset(mdio_addr); 912 mdio_reset(sp);
896 mdio_idle(mdio_addr); 913 mdio_idle(sp);
897 914
898 for (i = 15; i >= 0; i--) { 915 for (i = 15; i >= 0; i--) {
899 int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR; 916 int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
900 outl(dataval, mdio_addr); 917
918 sw32(mear, dataval);
901 mdio_delay(); 919 mdio_delay();
902 outl(dataval | MDC, mdio_addr); 920 sw32(mear, dataval | MDC);
903 mdio_delay(); 921 mdio_delay();
904 } 922 }
905 923
906 /* Read the 16 data bits. */ 924 /* Read the 16 data bits. */
907 for (i = 16; i > 0; i--) { 925 for (i = 16; i > 0; i--) {
908 outl(0, mdio_addr); 926 sw32(mear, 0);
909 mdio_delay(); 927 mdio_delay();
910 retval = (retval << 1) | ((inl(mdio_addr) & MDIO) ? 1 : 0); 928 retval = (retval << 1) | ((sr32(mear) & MDIO) ? 1 : 0);
911 outl(MDC, mdio_addr); 929 sw32(mear, MDC);
912 mdio_delay(); 930 mdio_delay();
913 } 931 }
914 outl(0x00, mdio_addr); 932 sw32(mear, 0x00);
915 933
916 return retval; 934 return retval;
917} 935}
@@ -931,19 +949,21 @@ static int mdio_read(struct net_device *net_dev, int phy_id, int location)
931static void mdio_write(struct net_device *net_dev, int phy_id, int location, 949static void mdio_write(struct net_device *net_dev, int phy_id, int location,
932 int value) 950 int value)
933{ 951{
934 long mdio_addr = net_dev->base_addr + mear;
935 int mii_cmd = MIIwrite|(phy_id<<MIIpmdShift)|(location<<MIIregShift); 952 int mii_cmd = MIIwrite|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
953 struct sis900_private *sp = netdev_priv(net_dev);
954 void __iomem *ioaddr = sp->ioaddr;
936 int i; 955 int i;
937 956
938 mdio_reset(mdio_addr); 957 mdio_reset(sp);
939 mdio_idle(mdio_addr); 958 mdio_idle(sp);
940 959
941 /* Shift the command bits out. */ 960 /* Shift the command bits out. */
942 for (i = 15; i >= 0; i--) { 961 for (i = 15; i >= 0; i--) {
943 int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR; 962 int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
944 outb(dataval, mdio_addr); 963
964 sw8(mear, dataval);
945 mdio_delay(); 965 mdio_delay();
946 outb(dataval | MDC, mdio_addr); 966 sw8(mear, dataval | MDC);
947 mdio_delay(); 967 mdio_delay();
948 } 968 }
949 mdio_delay(); 969 mdio_delay();
@@ -951,21 +971,22 @@ static void mdio_write(struct net_device *net_dev, int phy_id, int location,
951 /* Shift the value bits out. */ 971 /* Shift the value bits out. */
952 for (i = 15; i >= 0; i--) { 972 for (i = 15; i >= 0; i--) {
953 int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR; 973 int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR;
954 outl(dataval, mdio_addr); 974
975 sw32(mear, dataval);
955 mdio_delay(); 976 mdio_delay();
956 outl(dataval | MDC, mdio_addr); 977 sw32(mear, dataval | MDC);
957 mdio_delay(); 978 mdio_delay();
958 } 979 }
959 mdio_delay(); 980 mdio_delay();
960 981
961 /* Clear out extra bits. */ 982 /* Clear out extra bits. */
962 for (i = 2; i > 0; i--) { 983 for (i = 2; i > 0; i--) {
963 outb(0, mdio_addr); 984 sw8(mear, 0);
964 mdio_delay(); 985 mdio_delay();
965 outb(MDC, mdio_addr); 986 sw8(mear, MDC);
966 mdio_delay(); 987 mdio_delay();
967 } 988 }
968 outl(0x00, mdio_addr); 989 sw32(mear, 0x00);
969} 990}
970 991
971 992
@@ -1000,9 +1021,12 @@ static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr)
1000*/ 1021*/
1001static void sis900_poll(struct net_device *dev) 1022static void sis900_poll(struct net_device *dev)
1002{ 1023{
1003 disable_irq(dev->irq); 1024 struct sis900_private *sp = netdev_priv(dev);
1004 sis900_interrupt(dev->irq, dev); 1025 const int irq = sp->pci_dev->irq;
1005 enable_irq(dev->irq); 1026
1027 disable_irq(irq);
1028 sis900_interrupt(irq, dev);
1029 enable_irq(irq);
1006} 1030}
1007#endif 1031#endif
1008 1032
@@ -1018,7 +1042,7 @@ static int
1018sis900_open(struct net_device *net_dev) 1042sis900_open(struct net_device *net_dev)
1019{ 1043{
1020 struct sis900_private *sis_priv = netdev_priv(net_dev); 1044 struct sis900_private *sis_priv = netdev_priv(net_dev);
1021 long ioaddr = net_dev->base_addr; 1045 void __iomem *ioaddr = sis_priv->ioaddr;
1022 int ret; 1046 int ret;
1023 1047
1024 /* Soft reset the chip. */ 1048 /* Soft reset the chip. */
@@ -1027,8 +1051,8 @@ sis900_open(struct net_device *net_dev)
1027 /* Equalizer workaround Rule */ 1051 /* Equalizer workaround Rule */
1028 sis630_set_eq(net_dev, sis_priv->chipset_rev); 1052 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1029 1053
1030 ret = request_irq(net_dev->irq, sis900_interrupt, IRQF_SHARED, 1054 ret = request_irq(sis_priv->pci_dev->irq, sis900_interrupt, IRQF_SHARED,
1031 net_dev->name, net_dev); 1055 net_dev->name, net_dev);
1032 if (ret) 1056 if (ret)
1033 return ret; 1057 return ret;
1034 1058
@@ -1042,12 +1066,12 @@ sis900_open(struct net_device *net_dev)
1042 netif_start_queue(net_dev); 1066 netif_start_queue(net_dev);
1043 1067
1044 /* Workaround for EDB */ 1068 /* Workaround for EDB */
1045 sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); 1069 sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
1046 1070
1047 /* Enable all known interrupts by setting the interrupt mask. */ 1071 /* Enable all known interrupts by setting the interrupt mask. */
1048 outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr); 1072 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
1049 outl(RxENA | inl(ioaddr + cr), ioaddr + cr); 1073 sw32(cr, RxENA | sr32(cr));
1050 outl(IE, ioaddr + ier); 1074 sw32(ier, IE);
1051 1075
1052 sis900_check_mode(net_dev, sis_priv->mii); 1076 sis900_check_mode(net_dev, sis_priv->mii);
1053 1077
@@ -1074,31 +1098,30 @@ static void
1074sis900_init_rxfilter (struct net_device * net_dev) 1098sis900_init_rxfilter (struct net_device * net_dev)
1075{ 1099{
1076 struct sis900_private *sis_priv = netdev_priv(net_dev); 1100 struct sis900_private *sis_priv = netdev_priv(net_dev);
1077 long ioaddr = net_dev->base_addr; 1101 void __iomem *ioaddr = sis_priv->ioaddr;
1078 u32 rfcrSave; 1102 u32 rfcrSave;
1079 u32 i; 1103 u32 i;
1080 1104
1081 rfcrSave = inl(rfcr + ioaddr); 1105 rfcrSave = sr32(rfcr);
1082 1106
1083 /* disable packet filtering before setting filter */ 1107 /* disable packet filtering before setting filter */
1084 outl(rfcrSave & ~RFEN, rfcr + ioaddr); 1108 sw32(rfcr, rfcrSave & ~RFEN);
1085 1109
1086 /* load MAC addr to filter data register */ 1110 /* load MAC addr to filter data register */
1087 for (i = 0 ; i < 3 ; i++) { 1111 for (i = 0 ; i < 3 ; i++) {
1088 u32 w; 1112 u32 w = (u32) *((u16 *)(net_dev->dev_addr)+i);
1089 1113
1090 w = (u32) *((u16 *)(net_dev->dev_addr)+i); 1114 sw32(rfcr, i << RFADDR_shift);
1091 outl((i << RFADDR_shift), ioaddr + rfcr); 1115 sw32(rfdr, w);
1092 outl(w, ioaddr + rfdr);
1093 1116
1094 if (netif_msg_hw(sis_priv)) { 1117 if (netif_msg_hw(sis_priv)) {
1095 printk(KERN_DEBUG "%s: Receive Filter Addrss[%d]=%x\n", 1118 printk(KERN_DEBUG "%s: Receive Filter Addrss[%d]=%x\n",
1096 net_dev->name, i, inl(ioaddr + rfdr)); 1119 net_dev->name, i, sr32(rfdr));
1097 } 1120 }
1098 } 1121 }
1099 1122
1100 /* enable packet filtering */ 1123 /* enable packet filtering */
1101 outl(rfcrSave | RFEN, rfcr + ioaddr); 1124 sw32(rfcr, rfcrSave | RFEN);
1102} 1125}
1103 1126
1104/** 1127/**
@@ -1112,7 +1135,7 @@ static void
1112sis900_init_tx_ring(struct net_device *net_dev) 1135sis900_init_tx_ring(struct net_device *net_dev)
1113{ 1136{
1114 struct sis900_private *sis_priv = netdev_priv(net_dev); 1137 struct sis900_private *sis_priv = netdev_priv(net_dev);
1115 long ioaddr = net_dev->base_addr; 1138 void __iomem *ioaddr = sis_priv->ioaddr;
1116 int i; 1139 int i;
1117 1140
1118 sis_priv->tx_full = 0; 1141 sis_priv->tx_full = 0;
@@ -1128,10 +1151,10 @@ sis900_init_tx_ring(struct net_device *net_dev)
1128 } 1151 }
1129 1152
1130 /* load Transmit Descriptor Register */ 1153 /* load Transmit Descriptor Register */
1131 outl(sis_priv->tx_ring_dma, ioaddr + txdp); 1154 sw32(txdp, sis_priv->tx_ring_dma);
1132 if (netif_msg_hw(sis_priv)) 1155 if (netif_msg_hw(sis_priv))
1133 printk(KERN_DEBUG "%s: TX descriptor register loaded with: %8.8x\n", 1156 printk(KERN_DEBUG "%s: TX descriptor register loaded with: %8.8x\n",
1134 net_dev->name, inl(ioaddr + txdp)); 1157 net_dev->name, sr32(txdp));
1135} 1158}
1136 1159
1137/** 1160/**
@@ -1146,7 +1169,7 @@ static void
1146sis900_init_rx_ring(struct net_device *net_dev) 1169sis900_init_rx_ring(struct net_device *net_dev)
1147{ 1170{
1148 struct sis900_private *sis_priv = netdev_priv(net_dev); 1171 struct sis900_private *sis_priv = netdev_priv(net_dev);
1149 long ioaddr = net_dev->base_addr; 1172 void __iomem *ioaddr = sis_priv->ioaddr;
1150 int i; 1173 int i;
1151 1174
1152 sis_priv->cur_rx = 0; 1175 sis_priv->cur_rx = 0;
@@ -1181,10 +1204,10 @@ sis900_init_rx_ring(struct net_device *net_dev)
1181 sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC); 1204 sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
1182 1205
1183 /* load Receive Descriptor Register */ 1206 /* load Receive Descriptor Register */
1184 outl(sis_priv->rx_ring_dma, ioaddr + rxdp); 1207 sw32(rxdp, sis_priv->rx_ring_dma);
1185 if (netif_msg_hw(sis_priv)) 1208 if (netif_msg_hw(sis_priv))
1186 printk(KERN_DEBUG "%s: RX descriptor register loaded with: %8.8x\n", 1209 printk(KERN_DEBUG "%s: RX descriptor register loaded with: %8.8x\n",
1187 net_dev->name, inl(ioaddr + rxdp)); 1210 net_dev->name, sr32(rxdp));
1188} 1211}
1189 1212
1190/** 1213/**
@@ -1298,7 +1321,7 @@ static void sis900_timer(unsigned long data)
1298 1321
1299 sis900_read_mode(net_dev, &speed, &duplex); 1322 sis900_read_mode(net_dev, &speed, &duplex);
1300 if (duplex){ 1323 if (duplex){
1301 sis900_set_mode(net_dev->base_addr, speed, duplex); 1324 sis900_set_mode(sis_priv, speed, duplex);
1302 sis630_set_eq(net_dev, sis_priv->chipset_rev); 1325 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1303 netif_start_queue(net_dev); 1326 netif_start_queue(net_dev);
1304 } 1327 }
@@ -1359,25 +1382,25 @@ static void sis900_timer(unsigned long data)
1359static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy) 1382static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy)
1360{ 1383{
1361 struct sis900_private *sis_priv = netdev_priv(net_dev); 1384 struct sis900_private *sis_priv = netdev_priv(net_dev);
1362 long ioaddr = net_dev->base_addr; 1385 void __iomem *ioaddr = sis_priv->ioaddr;
1363 int speed, duplex; 1386 int speed, duplex;
1364 1387
1365 if (mii_phy->phy_types == LAN) { 1388 if (mii_phy->phy_types == LAN) {
1366 outl(~EXD & inl(ioaddr + cfg), ioaddr + cfg); 1389 sw32(cfg, ~EXD & sr32(cfg));
1367 sis900_set_capability(net_dev , mii_phy); 1390 sis900_set_capability(net_dev , mii_phy);
1368 sis900_auto_negotiate(net_dev, sis_priv->cur_phy); 1391 sis900_auto_negotiate(net_dev, sis_priv->cur_phy);
1369 } else { 1392 } else {
1370 outl(EXD | inl(ioaddr + cfg), ioaddr + cfg); 1393 sw32(cfg, EXD | sr32(cfg));
1371 speed = HW_SPEED_HOME; 1394 speed = HW_SPEED_HOME;
1372 duplex = FDX_CAPABLE_HALF_SELECTED; 1395 duplex = FDX_CAPABLE_HALF_SELECTED;
1373 sis900_set_mode(ioaddr, speed, duplex); 1396 sis900_set_mode(sis_priv, speed, duplex);
1374 sis_priv->autong_complete = 1; 1397 sis_priv->autong_complete = 1;
1375 } 1398 }
1376} 1399}
1377 1400
1378/** 1401/**
1379 * sis900_set_mode - Set the media mode of mac register. 1402 * sis900_set_mode - Set the media mode of mac register.
1380 * @ioaddr: the address of the device 1403 * @sp: the device private data
1381 * @speed : the transmit speed to be determined 1404 * @speed : the transmit speed to be determined
1382 * @duplex: the duplex mode to be determined 1405 * @duplex: the duplex mode to be determined
1383 * 1406 *
@@ -1388,11 +1411,12 @@ static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_ph
1388 * double words. 1411 * double words.
1389 */ 1412 */
1390 1413
1391static void sis900_set_mode (long ioaddr, int speed, int duplex) 1414static void sis900_set_mode(struct sis900_private *sp, int speed, int duplex)
1392{ 1415{
1416 void __iomem *ioaddr = sp->ioaddr;
1393 u32 tx_flags = 0, rx_flags = 0; 1417 u32 tx_flags = 0, rx_flags = 0;
1394 1418
1395 if (inl(ioaddr + cfg) & EDB_MASTER_EN) { 1419 if (sr32( cfg) & EDB_MASTER_EN) {
1396 tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) | 1420 tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) |
1397 (TX_FILL_THRESH << TxFILLT_shift); 1421 (TX_FILL_THRESH << TxFILLT_shift);
1398 rx_flags = DMA_BURST_64 << RxMXDMA_shift; 1422 rx_flags = DMA_BURST_64 << RxMXDMA_shift;
@@ -1420,8 +1444,8 @@ static void sis900_set_mode (long ioaddr, int speed, int duplex)
1420 rx_flags |= RxAJAB; 1444 rx_flags |= RxAJAB;
1421#endif 1445#endif
1422 1446
1423 outl (tx_flags, ioaddr + txcfg); 1447 sw32(txcfg, tx_flags);
1424 outl (rx_flags, ioaddr + rxcfg); 1448 sw32(rxcfg, rx_flags);
1425} 1449}
1426 1450
1427/** 1451/**
@@ -1528,16 +1552,17 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
1528static void sis900_tx_timeout(struct net_device *net_dev) 1552static void sis900_tx_timeout(struct net_device *net_dev)
1529{ 1553{
1530 struct sis900_private *sis_priv = netdev_priv(net_dev); 1554 struct sis900_private *sis_priv = netdev_priv(net_dev);
1531 long ioaddr = net_dev->base_addr; 1555 void __iomem *ioaddr = sis_priv->ioaddr;
1532 unsigned long flags; 1556 unsigned long flags;
1533 int i; 1557 int i;
1534 1558
1535 if(netif_msg_tx_err(sis_priv)) 1559 if (netif_msg_tx_err(sis_priv)) {
1536 printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n", 1560 printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n",
1537 net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr)); 1561 net_dev->name, sr32(cr), sr32(isr));
1562 }
1538 1563
1539 /* Disable interrupts by clearing the interrupt mask. */ 1564 /* Disable interrupts by clearing the interrupt mask. */
1540 outl(0x0000, ioaddr + imr); 1565 sw32(imr, 0x0000);
1541 1566
1542 /* use spinlock to prevent interrupt handler accessing buffer ring */ 1567 /* use spinlock to prevent interrupt handler accessing buffer ring */
1543 spin_lock_irqsave(&sis_priv->lock, flags); 1568 spin_lock_irqsave(&sis_priv->lock, flags);
@@ -1566,10 +1591,10 @@ static void sis900_tx_timeout(struct net_device *net_dev)
1566 net_dev->trans_start = jiffies; /* prevent tx timeout */ 1591 net_dev->trans_start = jiffies; /* prevent tx timeout */
1567 1592
1568 /* load Transmit Descriptor Register */ 1593 /* load Transmit Descriptor Register */
1569 outl(sis_priv->tx_ring_dma, ioaddr + txdp); 1594 sw32(txdp, sis_priv->tx_ring_dma);
1570 1595
1571 /* Enable all known interrupts by setting the interrupt mask. */ 1596 /* Enable all known interrupts by setting the interrupt mask. */
1572 outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr); 1597 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
1573} 1598}
1574 1599
1575/** 1600/**
@@ -1586,7 +1611,7 @@ static netdev_tx_t
1586sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev) 1611sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1587{ 1612{
1588 struct sis900_private *sis_priv = netdev_priv(net_dev); 1613 struct sis900_private *sis_priv = netdev_priv(net_dev);
1589 long ioaddr = net_dev->base_addr; 1614 void __iomem *ioaddr = sis_priv->ioaddr;
1590 unsigned int entry; 1615 unsigned int entry;
1591 unsigned long flags; 1616 unsigned long flags;
1592 unsigned int index_cur_tx, index_dirty_tx; 1617 unsigned int index_cur_tx, index_dirty_tx;
@@ -1608,7 +1633,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1608 sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev, 1633 sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev,
1609 skb->data, skb->len, PCI_DMA_TODEVICE); 1634 skb->data, skb->len, PCI_DMA_TODEVICE);
1610 sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len); 1635 sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
1611 outl(TxENA | inl(ioaddr + cr), ioaddr + cr); 1636 sw32(cr, TxENA | sr32(cr));
1612 1637
1613 sis_priv->cur_tx ++; 1638 sis_priv->cur_tx ++;
1614 index_cur_tx = sis_priv->cur_tx; 1639 index_cur_tx = sis_priv->cur_tx;
@@ -1654,14 +1679,14 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
1654 struct net_device *net_dev = dev_instance; 1679 struct net_device *net_dev = dev_instance;
1655 struct sis900_private *sis_priv = netdev_priv(net_dev); 1680 struct sis900_private *sis_priv = netdev_priv(net_dev);
1656 int boguscnt = max_interrupt_work; 1681 int boguscnt = max_interrupt_work;
1657 long ioaddr = net_dev->base_addr; 1682 void __iomem *ioaddr = sis_priv->ioaddr;
1658 u32 status; 1683 u32 status;
1659 unsigned int handled = 0; 1684 unsigned int handled = 0;
1660 1685
1661 spin_lock (&sis_priv->lock); 1686 spin_lock (&sis_priv->lock);
1662 1687
1663 do { 1688 do {
1664 status = inl(ioaddr + isr); 1689 status = sr32(isr);
1665 1690
1666 if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0) 1691 if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0)
1667 /* nothing intresting happened */ 1692 /* nothing intresting happened */
@@ -1696,7 +1721,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
1696 if(netif_msg_intr(sis_priv)) 1721 if(netif_msg_intr(sis_priv))
1697 printk(KERN_DEBUG "%s: exiting interrupt, " 1722 printk(KERN_DEBUG "%s: exiting interrupt, "
1698 "interrupt status = 0x%#8.8x.\n", 1723 "interrupt status = 0x%#8.8x.\n",
1699 net_dev->name, inl(ioaddr + isr)); 1724 net_dev->name, sr32(isr));
1700 1725
1701 spin_unlock (&sis_priv->lock); 1726 spin_unlock (&sis_priv->lock);
1702 return IRQ_RETVAL(handled); 1727 return IRQ_RETVAL(handled);
@@ -1715,7 +1740,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
1715static int sis900_rx(struct net_device *net_dev) 1740static int sis900_rx(struct net_device *net_dev)
1716{ 1741{
1717 struct sis900_private *sis_priv = netdev_priv(net_dev); 1742 struct sis900_private *sis_priv = netdev_priv(net_dev);
1718 long ioaddr = net_dev->base_addr; 1743 void __iomem *ioaddr = sis_priv->ioaddr;
1719 unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC; 1744 unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC;
1720 u32 rx_status = sis_priv->rx_ring[entry].cmdsts; 1745 u32 rx_status = sis_priv->rx_ring[entry].cmdsts;
1721 int rx_work_limit; 1746 int rx_work_limit;
@@ -1847,7 +1872,7 @@ refill_rx_ring:
1847 } 1872 }
1848 } 1873 }
1849 /* re-enable the potentially idle receive state matchine */ 1874 /* re-enable the potentially idle receive state matchine */
1850 outl(RxENA | inl(ioaddr + cr), ioaddr + cr ); 1875 sw32(cr , RxENA | sr32(cr));
1851 1876
1852 return 0; 1877 return 0;
1853} 1878}
@@ -1932,31 +1957,31 @@ static void sis900_finish_xmit (struct net_device *net_dev)
1932 1957
1933static int sis900_close(struct net_device *net_dev) 1958static int sis900_close(struct net_device *net_dev)
1934{ 1959{
1935 long ioaddr = net_dev->base_addr;
1936 struct sis900_private *sis_priv = netdev_priv(net_dev); 1960 struct sis900_private *sis_priv = netdev_priv(net_dev);
1961 struct pci_dev *pdev = sis_priv->pci_dev;
1962 void __iomem *ioaddr = sis_priv->ioaddr;
1937 struct sk_buff *skb; 1963 struct sk_buff *skb;
1938 int i; 1964 int i;
1939 1965
1940 netif_stop_queue(net_dev); 1966 netif_stop_queue(net_dev);
1941 1967
1942 /* Disable interrupts by clearing the interrupt mask. */ 1968 /* Disable interrupts by clearing the interrupt mask. */
1943 outl(0x0000, ioaddr + imr); 1969 sw32(imr, 0x0000);
1944 outl(0x0000, ioaddr + ier); 1970 sw32(ier, 0x0000);
1945 1971
1946 /* Stop the chip's Tx and Rx Status Machine */ 1972 /* Stop the chip's Tx and Rx Status Machine */
1947 outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr); 1973 sw32(cr, RxDIS | TxDIS | sr32(cr));
1948 1974
1949 del_timer(&sis_priv->timer); 1975 del_timer(&sis_priv->timer);
1950 1976
1951 free_irq(net_dev->irq, net_dev); 1977 free_irq(pdev->irq, net_dev);
1952 1978
1953 /* Free Tx and RX skbuff */ 1979 /* Free Tx and RX skbuff */
1954 for (i = 0; i < NUM_RX_DESC; i++) { 1980 for (i = 0; i < NUM_RX_DESC; i++) {
1955 skb = sis_priv->rx_skbuff[i]; 1981 skb = sis_priv->rx_skbuff[i];
1956 if (skb) { 1982 if (skb) {
1957 pci_unmap_single(sis_priv->pci_dev, 1983 pci_unmap_single(pdev, sis_priv->rx_ring[i].bufptr,
1958 sis_priv->rx_ring[i].bufptr, 1984 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1959 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1960 dev_kfree_skb(skb); 1985 dev_kfree_skb(skb);
1961 sis_priv->rx_skbuff[i] = NULL; 1986 sis_priv->rx_skbuff[i] = NULL;
1962 } 1987 }
@@ -1964,9 +1989,8 @@ static int sis900_close(struct net_device *net_dev)
1964 for (i = 0; i < NUM_TX_DESC; i++) { 1989 for (i = 0; i < NUM_TX_DESC; i++) {
1965 skb = sis_priv->tx_skbuff[i]; 1990 skb = sis_priv->tx_skbuff[i];
1966 if (skb) { 1991 if (skb) {
1967 pci_unmap_single(sis_priv->pci_dev, 1992 pci_unmap_single(pdev, sis_priv->tx_ring[i].bufptr,
1968 sis_priv->tx_ring[i].bufptr, skb->len, 1993 skb->len, PCI_DMA_TODEVICE);
1969 PCI_DMA_TODEVICE);
1970 dev_kfree_skb(skb); 1994 dev_kfree_skb(skb);
1971 sis_priv->tx_skbuff[i] = NULL; 1995 sis_priv->tx_skbuff[i] = NULL;
1972 } 1996 }
@@ -2055,14 +2079,14 @@ static int sis900_nway_reset(struct net_device *net_dev)
2055static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) 2079static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
2056{ 2080{
2057 struct sis900_private *sis_priv = netdev_priv(net_dev); 2081 struct sis900_private *sis_priv = netdev_priv(net_dev);
2058 long pmctrl_addr = net_dev->base_addr + pmctrl; 2082 void __iomem *ioaddr = sis_priv->ioaddr;
2059 u32 cfgpmcsr = 0, pmctrl_bits = 0; 2083 u32 cfgpmcsr = 0, pmctrl_bits = 0;
2060 2084
2061 if (wol->wolopts == 0) { 2085 if (wol->wolopts == 0) {
2062 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr); 2086 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2063 cfgpmcsr &= ~PME_EN; 2087 cfgpmcsr &= ~PME_EN;
2064 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr); 2088 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
2065 outl(pmctrl_bits, pmctrl_addr); 2089 sw32(pmctrl, pmctrl_bits);
2066 if (netif_msg_wol(sis_priv)) 2090 if (netif_msg_wol(sis_priv))
2067 printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name); 2091 printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name);
2068 return 0; 2092 return 0;
@@ -2077,7 +2101,7 @@ static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wo
2077 if (wol->wolopts & WAKE_PHY) 2101 if (wol->wolopts & WAKE_PHY)
2078 pmctrl_bits |= LINKON; 2102 pmctrl_bits |= LINKON;
2079 2103
2080 outl(pmctrl_bits, pmctrl_addr); 2104 sw32(pmctrl, pmctrl_bits);
2081 2105
2082 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr); 2106 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2083 cfgpmcsr |= PME_EN; 2107 cfgpmcsr |= PME_EN;
@@ -2090,10 +2114,11 @@ static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wo
2090 2114
2091static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) 2115static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
2092{ 2116{
2093 long pmctrl_addr = net_dev->base_addr + pmctrl; 2117 struct sis900_private *sp = netdev_priv(net_dev);
2118 void __iomem *ioaddr = sp->ioaddr;
2094 u32 pmctrl_bits; 2119 u32 pmctrl_bits;
2095 2120
2096 pmctrl_bits = inl(pmctrl_addr); 2121 pmctrl_bits = sr32(pmctrl);
2097 if (pmctrl_bits & MAGICPKT) 2122 if (pmctrl_bits & MAGICPKT)
2098 wol->wolopts |= WAKE_MAGIC; 2123 wol->wolopts |= WAKE_MAGIC;
2099 if (pmctrl_bits & LINKON) 2124 if (pmctrl_bits & LINKON)
@@ -2279,8 +2304,8 @@ static inline u16 sis900_mcast_bitnr(u8 *addr, u8 revision)
2279 2304
2280static void set_rx_mode(struct net_device *net_dev) 2305static void set_rx_mode(struct net_device *net_dev)
2281{ 2306{
2282 long ioaddr = net_dev->base_addr;
2283 struct sis900_private *sis_priv = netdev_priv(net_dev); 2307 struct sis900_private *sis_priv = netdev_priv(net_dev);
2308 void __iomem *ioaddr = sis_priv->ioaddr;
2284 u16 mc_filter[16] = {0}; /* 256/128 bits multicast hash table */ 2309 u16 mc_filter[16] = {0}; /* 256/128 bits multicast hash table */
2285 int i, table_entries; 2310 int i, table_entries;
2286 u32 rx_mode; 2311 u32 rx_mode;
@@ -2322,24 +2347,24 @@ static void set_rx_mode(struct net_device *net_dev)
2322 /* update Multicast Hash Table in Receive Filter */ 2347 /* update Multicast Hash Table in Receive Filter */
2323 for (i = 0; i < table_entries; i++) { 2348 for (i = 0; i < table_entries; i++) {
2324 /* why plus 0x04 ??, That makes the correct value for hash table. */ 2349 /* why plus 0x04 ??, That makes the correct value for hash table. */
2325 outl((u32)(0x00000004+i) << RFADDR_shift, ioaddr + rfcr); 2350 sw32(rfcr, (u32)(0x00000004 + i) << RFADDR_shift);
2326 outl(mc_filter[i], ioaddr + rfdr); 2351 sw32(rfdr, mc_filter[i]);
2327 } 2352 }
2328 2353
2329 outl(RFEN | rx_mode, ioaddr + rfcr); 2354 sw32(rfcr, RFEN | rx_mode);
2330 2355
2331 /* sis900 is capable of looping back packets at MAC level for 2356 /* sis900 is capable of looping back packets at MAC level for
2332 * debugging purpose */ 2357 * debugging purpose */
2333 if (net_dev->flags & IFF_LOOPBACK) { 2358 if (net_dev->flags & IFF_LOOPBACK) {
2334 u32 cr_saved; 2359 u32 cr_saved;
2335 /* We must disable Tx/Rx before setting loopback mode */ 2360 /* We must disable Tx/Rx before setting loopback mode */
2336 cr_saved = inl(ioaddr + cr); 2361 cr_saved = sr32(cr);
2337 outl(cr_saved | TxDIS | RxDIS, ioaddr + cr); 2362 sw32(cr, cr_saved | TxDIS | RxDIS);
2338 /* enable loopback */ 2363 /* enable loopback */
2339 outl(inl(ioaddr + txcfg) | TxMLB, ioaddr + txcfg); 2364 sw32(txcfg, sr32(txcfg) | TxMLB);
2340 outl(inl(ioaddr + rxcfg) | RxATX, ioaddr + rxcfg); 2365 sw32(rxcfg, sr32(rxcfg) | RxATX);
2341 /* restore cr */ 2366 /* restore cr */
2342 outl(cr_saved, ioaddr + cr); 2367 sw32(cr, cr_saved);
2343 } 2368 }
2344} 2369}
2345 2370
@@ -2355,26 +2380,25 @@ static void set_rx_mode(struct net_device *net_dev)
2355static void sis900_reset(struct net_device *net_dev) 2380static void sis900_reset(struct net_device *net_dev)
2356{ 2381{
2357 struct sis900_private *sis_priv = netdev_priv(net_dev); 2382 struct sis900_private *sis_priv = netdev_priv(net_dev);
2358 long ioaddr = net_dev->base_addr; 2383 void __iomem *ioaddr = sis_priv->ioaddr;
2359 int i = 0;
2360 u32 status = TxRCMP | RxRCMP; 2384 u32 status = TxRCMP | RxRCMP;
2385 int i;
2361 2386
2362 outl(0, ioaddr + ier); 2387 sw32(ier, 0);
2363 outl(0, ioaddr + imr); 2388 sw32(imr, 0);
2364 outl(0, ioaddr + rfcr); 2389 sw32(rfcr, 0);
2365 2390
2366 outl(RxRESET | TxRESET | RESET | inl(ioaddr + cr), ioaddr + cr); 2391 sw32(cr, RxRESET | TxRESET | RESET | sr32(cr));
2367 2392
2368 /* Check that the chip has finished the reset. */ 2393 /* Check that the chip has finished the reset. */
2369 while (status && (i++ < 1000)) { 2394 for (i = 0; status && (i < 1000); i++)
2370 status ^= (inl(isr + ioaddr) & status); 2395 status ^= sr32(isr) & status;
2371 }
2372 2396
2373 if( (sis_priv->chipset_rev >= SIS635A_900_REV) || 2397 if (sis_priv->chipset_rev >= SIS635A_900_REV ||
2374 (sis_priv->chipset_rev == SIS900B_900_REV) ) 2398 sis_priv->chipset_rev == SIS900B_900_REV)
2375 outl(PESEL | RND_CNT, ioaddr + cfg); 2399 sw32(cfg, PESEL | RND_CNT);
2376 else 2400 else
2377 outl(PESEL, ioaddr + cfg); 2401 sw32(cfg, PESEL);
2378} 2402}
2379 2403
2380/** 2404/**
@@ -2388,10 +2412,12 @@ static void __devexit sis900_remove(struct pci_dev *pci_dev)
2388{ 2412{
2389 struct net_device *net_dev = pci_get_drvdata(pci_dev); 2413 struct net_device *net_dev = pci_get_drvdata(pci_dev);
2390 struct sis900_private *sis_priv = netdev_priv(net_dev); 2414 struct sis900_private *sis_priv = netdev_priv(net_dev);
2391 struct mii_phy *phy = NULL; 2415
2416 unregister_netdev(net_dev);
2392 2417
2393 while (sis_priv->first_mii) { 2418 while (sis_priv->first_mii) {
2394 phy = sis_priv->first_mii; 2419 struct mii_phy *phy = sis_priv->first_mii;
2420
2395 sis_priv->first_mii = phy->next; 2421 sis_priv->first_mii = phy->next;
2396 kfree(phy); 2422 kfree(phy);
2397 } 2423 }
@@ -2400,7 +2426,7 @@ static void __devexit sis900_remove(struct pci_dev *pci_dev)
2400 sis_priv->rx_ring_dma); 2426 sis_priv->rx_ring_dma);
2401 pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring, 2427 pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
2402 sis_priv->tx_ring_dma); 2428 sis_priv->tx_ring_dma);
2403 unregister_netdev(net_dev); 2429 pci_iounmap(pci_dev, sis_priv->ioaddr);
2404 free_netdev(net_dev); 2430 free_netdev(net_dev);
2405 pci_release_regions(pci_dev); 2431 pci_release_regions(pci_dev);
2406 pci_set_drvdata(pci_dev, NULL); 2432 pci_set_drvdata(pci_dev, NULL);
@@ -2411,7 +2437,8 @@ static void __devexit sis900_remove(struct pci_dev *pci_dev)
2411static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state) 2437static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state)
2412{ 2438{
2413 struct net_device *net_dev = pci_get_drvdata(pci_dev); 2439 struct net_device *net_dev = pci_get_drvdata(pci_dev);
2414 long ioaddr = net_dev->base_addr; 2440 struct sis900_private *sis_priv = netdev_priv(net_dev);
2441 void __iomem *ioaddr = sis_priv->ioaddr;
2415 2442
2416 if(!netif_running(net_dev)) 2443 if(!netif_running(net_dev))
2417 return 0; 2444 return 0;
@@ -2420,7 +2447,7 @@ static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state)
2420 netif_device_detach(net_dev); 2447 netif_device_detach(net_dev);
2421 2448
2422 /* Stop the chip's Tx and Rx Status Machine */ 2449 /* Stop the chip's Tx and Rx Status Machine */
2423 outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr); 2450 sw32(cr, RxDIS | TxDIS | sr32(cr));
2424 2451
2425 pci_set_power_state(pci_dev, PCI_D3hot); 2452 pci_set_power_state(pci_dev, PCI_D3hot);
2426 pci_save_state(pci_dev); 2453 pci_save_state(pci_dev);
@@ -2432,7 +2459,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
2432{ 2459{
2433 struct net_device *net_dev = pci_get_drvdata(pci_dev); 2460 struct net_device *net_dev = pci_get_drvdata(pci_dev);
2434 struct sis900_private *sis_priv = netdev_priv(net_dev); 2461 struct sis900_private *sis_priv = netdev_priv(net_dev);
2435 long ioaddr = net_dev->base_addr; 2462 void __iomem *ioaddr = sis_priv->ioaddr;
2436 2463
2437 if(!netif_running(net_dev)) 2464 if(!netif_running(net_dev))
2438 return 0; 2465 return 0;
@@ -2453,9 +2480,9 @@ static int sis900_resume(struct pci_dev *pci_dev)
2453 sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); 2480 sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
2454 2481
2455 /* Enable all known interrupts by setting the interrupt mask. */ 2482 /* Enable all known interrupts by setting the interrupt mask. */
2456 outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr); 2483 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
2457 outl(RxENA | inl(ioaddr + cr), ioaddr + cr); 2484 sw32(cr, RxENA | sr32(cr));
2458 outl(IE, ioaddr + ier); 2485 sw32(ier, IE);
2459 2486
2460 sis900_check_mode(net_dev, sis_priv->mii); 2487 sis900_check_mode(net_dev, sis_priv->mii);
2461 2488
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 2a662e6112e9..d01e59c348ad 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -146,6 +146,12 @@ enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
146#define EPIC_TOTAL_SIZE 0x100 146#define EPIC_TOTAL_SIZE 0x100
147#define USE_IO_OPS 1 147#define USE_IO_OPS 1
148 148
149#ifdef USE_IO_OPS
150#define EPIC_BAR 0
151#else
152#define EPIC_BAR 1
153#endif
154
149typedef enum { 155typedef enum {
150 SMSC_83C170_0, 156 SMSC_83C170_0,
151 SMSC_83C170, 157 SMSC_83C170,
@@ -176,21 +182,11 @@ static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
176}; 182};
177MODULE_DEVICE_TABLE (pci, epic_pci_tbl); 183MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
178 184
179 185#define ew16(reg, val) iowrite16(val, ioaddr + (reg))
180#ifndef USE_IO_OPS 186#define ew32(reg, val) iowrite32(val, ioaddr + (reg))
181#undef inb 187#define er8(reg) ioread8(ioaddr + (reg))
182#undef inw 188#define er16(reg) ioread16(ioaddr + (reg))
183#undef inl 189#define er32(reg) ioread32(ioaddr + (reg))
184#undef outb
185#undef outw
186#undef outl
187#define inb readb
188#define inw readw
189#define inl readl
190#define outb writeb
191#define outw writew
192#define outl writel
193#endif
194 190
195/* Offsets to registers, using the (ugh) SMC names. */ 191/* Offsets to registers, using the (ugh) SMC names. */
196enum epic_registers { 192enum epic_registers {
@@ -275,6 +271,7 @@ struct epic_private {
275 u32 irq_mask; 271 u32 irq_mask;
276 unsigned int rx_buf_sz; /* Based on MTU+slack. */ 272 unsigned int rx_buf_sz; /* Based on MTU+slack. */
277 273
274 void __iomem *ioaddr;
278 struct pci_dev *pci_dev; /* PCI bus location. */ 275 struct pci_dev *pci_dev; /* PCI bus location. */
279 int chip_id, chip_flags; 276 int chip_id, chip_flags;
280 277
@@ -290,7 +287,7 @@ struct epic_private {
290}; 287};
291 288
292static int epic_open(struct net_device *dev); 289static int epic_open(struct net_device *dev);
293static int read_eeprom(long ioaddr, int location); 290static int read_eeprom(struct epic_private *, int);
294static int mdio_read(struct net_device *dev, int phy_id, int location); 291static int mdio_read(struct net_device *dev, int phy_id, int location);
295static void mdio_write(struct net_device *dev, int phy_id, int loc, int val); 292static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
296static void epic_restart(struct net_device *dev); 293static void epic_restart(struct net_device *dev);
@@ -321,11 +318,11 @@ static const struct net_device_ops epic_netdev_ops = {
321 .ndo_validate_addr = eth_validate_addr, 318 .ndo_validate_addr = eth_validate_addr,
322}; 319};
323 320
324static int __devinit epic_init_one (struct pci_dev *pdev, 321static int __devinit epic_init_one(struct pci_dev *pdev,
325 const struct pci_device_id *ent) 322 const struct pci_device_id *ent)
326{ 323{
327 static int card_idx = -1; 324 static int card_idx = -1;
328 long ioaddr; 325 void __iomem *ioaddr;
329 int chip_idx = (int) ent->driver_data; 326 int chip_idx = (int) ent->driver_data;
330 int irq; 327 int irq;
331 struct net_device *dev; 328 struct net_device *dev;
@@ -368,19 +365,15 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
368 365
369 SET_NETDEV_DEV(dev, &pdev->dev); 366 SET_NETDEV_DEV(dev, &pdev->dev);
370 367
371#ifdef USE_IO_OPS 368 ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
372 ioaddr = pci_resource_start (pdev, 0);
373#else
374 ioaddr = pci_resource_start (pdev, 1);
375 ioaddr = (long) pci_ioremap_bar(pdev, 1);
376 if (!ioaddr) { 369 if (!ioaddr) {
377 dev_err(&pdev->dev, "ioremap failed\n"); 370 dev_err(&pdev->dev, "ioremap failed\n");
378 goto err_out_free_netdev; 371 goto err_out_free_netdev;
379 } 372 }
380#endif
381 373
382 pci_set_drvdata(pdev, dev); 374 pci_set_drvdata(pdev, dev);
383 ep = netdev_priv(dev); 375 ep = netdev_priv(dev);
376 ep->ioaddr = ioaddr;
384 ep->mii.dev = dev; 377 ep->mii.dev = dev;
385 ep->mii.mdio_read = mdio_read; 378 ep->mii.mdio_read = mdio_read;
386 ep->mii.mdio_write = mdio_write; 379 ep->mii.mdio_write = mdio_write;
@@ -409,34 +402,31 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
409 duplex = full_duplex[card_idx]; 402 duplex = full_duplex[card_idx];
410 } 403 }
411 404
412 dev->base_addr = ioaddr;
413 dev->irq = irq;
414
415 spin_lock_init(&ep->lock); 405 spin_lock_init(&ep->lock);
416 spin_lock_init(&ep->napi_lock); 406 spin_lock_init(&ep->napi_lock);
417 ep->reschedule_in_poll = 0; 407 ep->reschedule_in_poll = 0;
418 408
419 /* Bring the chip out of low-power mode. */ 409 /* Bring the chip out of low-power mode. */
420 outl(0x4200, ioaddr + GENCTL); 410 ew32(GENCTL, 0x4200);
421 /* Magic?! If we don't set this bit the MII interface won't work. */ 411 /* Magic?! If we don't set this bit the MII interface won't work. */
422 /* This magic is documented in SMSC app note 7.15 */ 412 /* This magic is documented in SMSC app note 7.15 */
423 for (i = 16; i > 0; i--) 413 for (i = 16; i > 0; i--)
424 outl(0x0008, ioaddr + TEST1); 414 ew32(TEST1, 0x0008);
425 415
426 /* Turn on the MII transceiver. */ 416 /* Turn on the MII transceiver. */
427 outl(0x12, ioaddr + MIICfg); 417 ew32(MIICfg, 0x12);
428 if (chip_idx == 1) 418 if (chip_idx == 1)
429 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 419 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
430 outl(0x0200, ioaddr + GENCTL); 420 ew32(GENCTL, 0x0200);
431 421
432 /* Note: the '175 does not have a serial EEPROM. */ 422 /* Note: the '175 does not have a serial EEPROM. */
433 for (i = 0; i < 3; i++) 423 for (i = 0; i < 3; i++)
434 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(inw(ioaddr + LAN0 + i*4)); 424 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
435 425
436 if (debug > 2) { 426 if (debug > 2) {
437 dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n"); 427 dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
438 for (i = 0; i < 64; i++) 428 for (i = 0; i < 64; i++)
439 printk(" %4.4x%s", read_eeprom(ioaddr, i), 429 printk(" %4.4x%s", read_eeprom(ep, i),
440 i % 16 == 15 ? "\n" : ""); 430 i % 16 == 15 ? "\n" : "");
441 } 431 }
442 432
@@ -481,8 +471,8 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
481 471
482 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */ 472 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
483 if (ep->chip_flags & MII_PWRDWN) 473 if (ep->chip_flags & MII_PWRDWN)
484 outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL); 474 ew32(NVCTL, er32(NVCTL) & ~0x483c);
485 outl(0x0008, ioaddr + GENCTL); 475 ew32(GENCTL, 0x0008);
486 476
487 /* The lower four bits are the media type. */ 477 /* The lower four bits are the media type. */
488 if (duplex) { 478 if (duplex) {
@@ -501,8 +491,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
501 if (ret < 0) 491 if (ret < 0)
502 goto err_out_unmap_rx; 492 goto err_out_unmap_rx;
503 493
504 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n", 494 printk(KERN_INFO "%s: %s at %lx, IRQ %d, %pM\n",
505 dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq, 495 dev->name, pci_id_tbl[chip_idx].name,
496 (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
506 dev->dev_addr); 497 dev->dev_addr);
507 498
508out: 499out:
@@ -513,10 +504,8 @@ err_out_unmap_rx:
513err_out_unmap_tx: 504err_out_unmap_tx:
514 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); 505 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
515err_out_iounmap: 506err_out_iounmap:
516#ifndef USE_IO_OPS 507 pci_iounmap(pdev, ioaddr);
517 iounmap(ioaddr);
518err_out_free_netdev: 508err_out_free_netdev:
519#endif
520 free_netdev(dev); 509 free_netdev(dev);
521err_out_free_res: 510err_out_free_res:
522 pci_release_regions(pdev); 511 pci_release_regions(pdev);
@@ -540,7 +529,7 @@ err_out_disable:
540 This serves to flush the operation to the PCI bus. 529 This serves to flush the operation to the PCI bus.
541 */ 530 */
542 531
543#define eeprom_delay() inl(ee_addr) 532#define eeprom_delay() er32(EECTL)
544 533
545/* The EEPROM commands include the alway-set leading bit. */ 534/* The EEPROM commands include the alway-set leading bit. */
546#define EE_WRITE_CMD (5 << 6) 535#define EE_WRITE_CMD (5 << 6)
@@ -550,67 +539,67 @@ err_out_disable:
550 539
551static void epic_disable_int(struct net_device *dev, struct epic_private *ep) 540static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
552{ 541{
553 long ioaddr = dev->base_addr; 542 void __iomem *ioaddr = ep->ioaddr;
554 543
555 outl(0x00000000, ioaddr + INTMASK); 544 ew32(INTMASK, 0x00000000);
556} 545}
557 546
558static inline void __epic_pci_commit(long ioaddr) 547static inline void __epic_pci_commit(void __iomem *ioaddr)
559{ 548{
560#ifndef USE_IO_OPS 549#ifndef USE_IO_OPS
561 inl(ioaddr + INTMASK); 550 er32(INTMASK);
562#endif 551#endif
563} 552}
564 553
565static inline void epic_napi_irq_off(struct net_device *dev, 554static inline void epic_napi_irq_off(struct net_device *dev,
566 struct epic_private *ep) 555 struct epic_private *ep)
567{ 556{
568 long ioaddr = dev->base_addr; 557 void __iomem *ioaddr = ep->ioaddr;
569 558
570 outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK); 559 ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
571 __epic_pci_commit(ioaddr); 560 __epic_pci_commit(ioaddr);
572} 561}
573 562
574static inline void epic_napi_irq_on(struct net_device *dev, 563static inline void epic_napi_irq_on(struct net_device *dev,
575 struct epic_private *ep) 564 struct epic_private *ep)
576{ 565{
577 long ioaddr = dev->base_addr; 566 void __iomem *ioaddr = ep->ioaddr;
578 567
579 /* No need to commit possible posted write */ 568 /* No need to commit possible posted write */
580 outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK); 569 ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
581} 570}
582 571
583static int __devinit read_eeprom(long ioaddr, int location) 572static int __devinit read_eeprom(struct epic_private *ep, int location)
584{ 573{
574 void __iomem *ioaddr = ep->ioaddr;
585 int i; 575 int i;
586 int retval = 0; 576 int retval = 0;
587 long ee_addr = ioaddr + EECTL;
588 int read_cmd = location | 577 int read_cmd = location |
589 (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD); 578 (er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
590 579
591 outl(EE_ENB & ~EE_CS, ee_addr); 580 ew32(EECTL, EE_ENB & ~EE_CS);
592 outl(EE_ENB, ee_addr); 581 ew32(EECTL, EE_ENB);
593 582
594 /* Shift the read command bits out. */ 583 /* Shift the read command bits out. */
595 for (i = 12; i >= 0; i--) { 584 for (i = 12; i >= 0; i--) {
596 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0; 585 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
597 outl(EE_ENB | dataval, ee_addr); 586 ew32(EECTL, EE_ENB | dataval);
598 eeprom_delay(); 587 eeprom_delay();
599 outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); 588 ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
600 eeprom_delay(); 589 eeprom_delay();
601 } 590 }
602 outl(EE_ENB, ee_addr); 591 ew32(EECTL, EE_ENB);
603 592
604 for (i = 16; i > 0; i--) { 593 for (i = 16; i > 0; i--) {
605 outl(EE_ENB | EE_SHIFT_CLK, ee_addr); 594 ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
606 eeprom_delay(); 595 eeprom_delay();
607 retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0); 596 retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
608 outl(EE_ENB, ee_addr); 597 ew32(EECTL, EE_ENB);
609 eeprom_delay(); 598 eeprom_delay();
610 } 599 }
611 600
612 /* Terminate the EEPROM access. */ 601 /* Terminate the EEPROM access. */
613 outl(EE_ENB & ~EE_CS, ee_addr); 602 ew32(EECTL, EE_ENB & ~EE_CS);
614 return retval; 603 return retval;
615} 604}
616 605
@@ -618,22 +607,23 @@ static int __devinit read_eeprom(long ioaddr, int location)
618#define MII_WRITEOP 2 607#define MII_WRITEOP 2
619static int mdio_read(struct net_device *dev, int phy_id, int location) 608static int mdio_read(struct net_device *dev, int phy_id, int location)
620{ 609{
621 long ioaddr = dev->base_addr; 610 struct epic_private *ep = netdev_priv(dev);
611 void __iomem *ioaddr = ep->ioaddr;
622 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP; 612 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
623 int i; 613 int i;
624 614
625 outl(read_cmd, ioaddr + MIICtrl); 615 ew32(MIICtrl, read_cmd);
626 /* Typical operation takes 25 loops. */ 616 /* Typical operation takes 25 loops. */
627 for (i = 400; i > 0; i--) { 617 for (i = 400; i > 0; i--) {
628 barrier(); 618 barrier();
629 if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) { 619 if ((er32(MIICtrl) & MII_READOP) == 0) {
630 /* Work around read failure bug. */ 620 /* Work around read failure bug. */
631 if (phy_id == 1 && location < 6 && 621 if (phy_id == 1 && location < 6 &&
632 inw(ioaddr + MIIData) == 0xffff) { 622 er16(MIIData) == 0xffff) {
633 outl(read_cmd, ioaddr + MIICtrl); 623 ew32(MIICtrl, read_cmd);
634 continue; 624 continue;
635 } 625 }
636 return inw(ioaddr + MIIData); 626 return er16(MIIData);
637 } 627 }
638 } 628 }
639 return 0xffff; 629 return 0xffff;
@@ -641,14 +631,15 @@ static int mdio_read(struct net_device *dev, int phy_id, int location)
641 631
642static void mdio_write(struct net_device *dev, int phy_id, int loc, int value) 632static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
643{ 633{
644 long ioaddr = dev->base_addr; 634 struct epic_private *ep = netdev_priv(dev);
635 void __iomem *ioaddr = ep->ioaddr;
645 int i; 636 int i;
646 637
647 outw(value, ioaddr + MIIData); 638 ew16(MIIData, value);
648 outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl); 639 ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
649 for (i = 10000; i > 0; i--) { 640 for (i = 10000; i > 0; i--) {
650 barrier(); 641 barrier();
651 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0) 642 if ((er32(MIICtrl) & MII_WRITEOP) == 0)
652 break; 643 break;
653 } 644 }
654} 645}
@@ -657,25 +648,26 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
657static int epic_open(struct net_device *dev) 648static int epic_open(struct net_device *dev)
658{ 649{
659 struct epic_private *ep = netdev_priv(dev); 650 struct epic_private *ep = netdev_priv(dev);
660 long ioaddr = dev->base_addr; 651 void __iomem *ioaddr = ep->ioaddr;
661 int i; 652 const int irq = ep->pci_dev->irq;
662 int retval; 653 int rc, i;
663 654
664 /* Soft reset the chip. */ 655 /* Soft reset the chip. */
665 outl(0x4001, ioaddr + GENCTL); 656 ew32(GENCTL, 0x4001);
666 657
667 napi_enable(&ep->napi); 658 napi_enable(&ep->napi);
668 if ((retval = request_irq(dev->irq, epic_interrupt, IRQF_SHARED, dev->name, dev))) { 659 rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
660 if (rc) {
669 napi_disable(&ep->napi); 661 napi_disable(&ep->napi);
670 return retval; 662 return rc;
671 } 663 }
672 664
673 epic_init_ring(dev); 665 epic_init_ring(dev);
674 666
675 outl(0x4000, ioaddr + GENCTL); 667 ew32(GENCTL, 0x4000);
676 /* This magic is documented in SMSC app note 7.15 */ 668 /* This magic is documented in SMSC app note 7.15 */
677 for (i = 16; i > 0; i--) 669 for (i = 16; i > 0; i--)
678 outl(0x0008, ioaddr + TEST1); 670 ew32(TEST1, 0x0008);
679 671
680 /* Pull the chip out of low-power mode, enable interrupts, and set for 672 /* Pull the chip out of low-power mode, enable interrupts, and set for
681 PCI read multiple. The MIIcfg setting and strange write order are 673 PCI read multiple. The MIIcfg setting and strange write order are
@@ -683,29 +675,29 @@ static int epic_open(struct net_device *dev)
683 wiring on the Ositech CardBus card. 675 wiring on the Ositech CardBus card.
684 */ 676 */
685#if 0 677#if 0
686 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg); 678 ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
687#endif 679#endif
688 if (ep->chip_flags & MII_PWRDWN) 680 if (ep->chip_flags & MII_PWRDWN)
689 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 681 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
690 682
691 /* Tell the chip to byteswap descriptors on big-endian hosts */ 683 /* Tell the chip to byteswap descriptors on big-endian hosts */
692#ifdef __BIG_ENDIAN 684#ifdef __BIG_ENDIAN
693 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 685 ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
694 inl(ioaddr + GENCTL); 686 er32(GENCTL);
695 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 687 ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
696#else 688#else
697 outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 689 ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
698 inl(ioaddr + GENCTL); 690 er32(GENCTL);
699 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 691 ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
700#endif 692#endif
701 693
702 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */ 694 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
703 695
704 for (i = 0; i < 3; i++) 696 for (i = 0; i < 3; i++)
705 outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4); 697 ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
706 698
707 ep->tx_threshold = TX_FIFO_THRESH; 699 ep->tx_threshold = TX_FIFO_THRESH;
708 outl(ep->tx_threshold, ioaddr + TxThresh); 700 ew32(TxThresh, ep->tx_threshold);
709 701
710 if (media2miictl[dev->if_port & 15]) { 702 if (media2miictl[dev->if_port & 15]) {
711 if (ep->mii_phy_cnt) 703 if (ep->mii_phy_cnt)
@@ -731,26 +723,27 @@ static int epic_open(struct net_device *dev)
731 } 723 }
732 } 724 }
733 725
734 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); 726 ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
735 outl(ep->rx_ring_dma, ioaddr + PRxCDAR); 727 ew32(PRxCDAR, ep->rx_ring_dma);
736 outl(ep->tx_ring_dma, ioaddr + PTxCDAR); 728 ew32(PTxCDAR, ep->tx_ring_dma);
737 729
738 /* Start the chip's Rx process. */ 730 /* Start the chip's Rx process. */
739 set_rx_mode(dev); 731 set_rx_mode(dev);
740 outl(StartRx | RxQueued, ioaddr + COMMAND); 732 ew32(COMMAND, StartRx | RxQueued);
741 733
742 netif_start_queue(dev); 734 netif_start_queue(dev);
743 735
744 /* Enable interrupts by setting the interrupt mask. */ 736 /* Enable interrupts by setting the interrupt mask. */
745 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) 737 ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
746 | CntFull | TxUnderrun 738 ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
747 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK); 739 TxUnderrun);
748 740
749 if (debug > 1) 741 if (debug > 1) {
750 printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x " 742 printk(KERN_DEBUG "%s: epic_open() ioaddr %p IRQ %d "
751 "%s-duplex.\n", 743 "status %4.4x %s-duplex.\n",
752 dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL), 744 dev->name, ioaddr, irq, er32(GENCTL),
753 ep->mii.full_duplex ? "full" : "half"); 745 ep->mii.full_duplex ? "full" : "half");
746 }
754 747
755 /* Set the timer to switch to check for link beat and perhaps switch 748 /* Set the timer to switch to check for link beat and perhaps switch
756 to an alternate media type. */ 749 to an alternate media type. */
@@ -760,27 +753,29 @@ static int epic_open(struct net_device *dev)
760 ep->timer.function = epic_timer; /* timer handler */ 753 ep->timer.function = epic_timer; /* timer handler */
761 add_timer(&ep->timer); 754 add_timer(&ep->timer);
762 755
763 return 0; 756 return rc;
764} 757}
765 758
766/* Reset the chip to recover from a PCI transaction error. 759/* Reset the chip to recover from a PCI transaction error.
767 This may occur at interrupt time. */ 760 This may occur at interrupt time. */
768static void epic_pause(struct net_device *dev) 761static void epic_pause(struct net_device *dev)
769{ 762{
770 long ioaddr = dev->base_addr; 763 struct net_device_stats *stats = &dev->stats;
764 struct epic_private *ep = netdev_priv(dev);
765 void __iomem *ioaddr = ep->ioaddr;
771 766
772 netif_stop_queue (dev); 767 netif_stop_queue (dev);
773 768
774 /* Disable interrupts by clearing the interrupt mask. */ 769 /* Disable interrupts by clearing the interrupt mask. */
775 outl(0x00000000, ioaddr + INTMASK); 770 ew32(INTMASK, 0x00000000);
776 /* Stop the chip's Tx and Rx DMA processes. */ 771 /* Stop the chip's Tx and Rx DMA processes. */
777 outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND); 772 ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
778 773
779 /* Update the error counts. */ 774 /* Update the error counts. */
780 if (inw(ioaddr + COMMAND) != 0xffff) { 775 if (er16(COMMAND) != 0xffff) {
781 dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); 776 stats->rx_missed_errors += er8(MPCNT);
782 dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); 777 stats->rx_frame_errors += er8(ALICNT);
783 dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); 778 stats->rx_crc_errors += er8(CRCCNT);
784 } 779 }
785 780
786 /* Remove the packets on the Rx queue. */ 781 /* Remove the packets on the Rx queue. */
@@ -789,12 +784,12 @@ static void epic_pause(struct net_device *dev)
789 784
790static void epic_restart(struct net_device *dev) 785static void epic_restart(struct net_device *dev)
791{ 786{
792 long ioaddr = dev->base_addr;
793 struct epic_private *ep = netdev_priv(dev); 787 struct epic_private *ep = netdev_priv(dev);
788 void __iomem *ioaddr = ep->ioaddr;
794 int i; 789 int i;
795 790
796 /* Soft reset the chip. */ 791 /* Soft reset the chip. */
797 outl(0x4001, ioaddr + GENCTL); 792 ew32(GENCTL, 0x4001);
798 793
799 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n", 794 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
800 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx); 795 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
@@ -802,47 +797,46 @@ static void epic_restart(struct net_device *dev)
802 797
803 /* This magic is documented in SMSC app note 7.15 */ 798 /* This magic is documented in SMSC app note 7.15 */
804 for (i = 16; i > 0; i--) 799 for (i = 16; i > 0; i--)
805 outl(0x0008, ioaddr + TEST1); 800 ew32(TEST1, 0x0008);
806 801
807#ifdef __BIG_ENDIAN 802#ifdef __BIG_ENDIAN
808 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 803 ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
809#else 804#else
810 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 805 ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
811#endif 806#endif
812 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg); 807 ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
813 if (ep->chip_flags & MII_PWRDWN) 808 if (ep->chip_flags & MII_PWRDWN)
814 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 809 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
815 810
816 for (i = 0; i < 3; i++) 811 for (i = 0; i < 3; i++)
817 outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4); 812 ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
818 813
819 ep->tx_threshold = TX_FIFO_THRESH; 814 ep->tx_threshold = TX_FIFO_THRESH;
820 outl(ep->tx_threshold, ioaddr + TxThresh); 815 ew32(TxThresh, ep->tx_threshold);
821 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); 816 ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
822 outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)* 817 ew32(PRxCDAR, ep->rx_ring_dma +
823 sizeof(struct epic_rx_desc), ioaddr + PRxCDAR); 818 (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
824 outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)* 819 ew32(PTxCDAR, ep->tx_ring_dma +
825 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR); 820 (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
826 821
827 /* Start the chip's Rx process. */ 822 /* Start the chip's Rx process. */
828 set_rx_mode(dev); 823 set_rx_mode(dev);
829 outl(StartRx | RxQueued, ioaddr + COMMAND); 824 ew32(COMMAND, StartRx | RxQueued);
830 825
831 /* Enable interrupts by setting the interrupt mask. */ 826 /* Enable interrupts by setting the interrupt mask. */
832 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) 827 ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
833 | CntFull | TxUnderrun 828 ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
834 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK); 829 TxUnderrun);
835 830
836 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x" 831 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
837 " interrupt %4.4x.\n", 832 " interrupt %4.4x.\n",
838 dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL), 833 dev->name, er32(COMMAND), er32(GENCTL), er32(INTSTAT));
839 (int)inl(ioaddr + INTSTAT));
840} 834}
841 835
842static void check_media(struct net_device *dev) 836static void check_media(struct net_device *dev)
843{ 837{
844 struct epic_private *ep = netdev_priv(dev); 838 struct epic_private *ep = netdev_priv(dev);
845 long ioaddr = dev->base_addr; 839 void __iomem *ioaddr = ep->ioaddr;
846 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0; 840 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
847 int negotiated = mii_lpa & ep->mii.advertising; 841 int negotiated = mii_lpa & ep->mii.advertising;
848 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040; 842 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
@@ -856,7 +850,7 @@ static void check_media(struct net_device *dev)
856 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link" 850 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
857 " partner capability of %4.4x.\n", dev->name, 851 " partner capability of %4.4x.\n", dev->name,
858 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa); 852 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
859 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); 853 ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
860 } 854 }
861} 855}
862 856
@@ -864,16 +858,15 @@ static void epic_timer(unsigned long data)
864{ 858{
865 struct net_device *dev = (struct net_device *)data; 859 struct net_device *dev = (struct net_device *)data;
866 struct epic_private *ep = netdev_priv(dev); 860 struct epic_private *ep = netdev_priv(dev);
867 long ioaddr = dev->base_addr; 861 void __iomem *ioaddr = ep->ioaddr;
868 int next_tick = 5*HZ; 862 int next_tick = 5*HZ;
869 863
870 if (debug > 3) { 864 if (debug > 3) {
871 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n", 865 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
872 dev->name, (int)inl(ioaddr + TxSTAT)); 866 dev->name, er32(TxSTAT));
873 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x " 867 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
874 "IntStatus %4.4x RxStatus %4.4x.\n", 868 "IntStatus %4.4x RxStatus %4.4x.\n", dev->name,
875 dev->name, (int)inl(ioaddr + INTMASK), 869 er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
876 (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
877 } 870 }
878 871
879 check_media(dev); 872 check_media(dev);
@@ -885,23 +878,22 @@ static void epic_timer(unsigned long data)
885static void epic_tx_timeout(struct net_device *dev) 878static void epic_tx_timeout(struct net_device *dev)
886{ 879{
887 struct epic_private *ep = netdev_priv(dev); 880 struct epic_private *ep = netdev_priv(dev);
888 long ioaddr = dev->base_addr; 881 void __iomem *ioaddr = ep->ioaddr;
889 882
890 if (debug > 0) { 883 if (debug > 0) {
891 printk(KERN_WARNING "%s: Transmit timeout using MII device, " 884 printk(KERN_WARNING "%s: Transmit timeout using MII device, "
892 "Tx status %4.4x.\n", 885 "Tx status %4.4x.\n", dev->name, er16(TxSTAT));
893 dev->name, (int)inw(ioaddr + TxSTAT));
894 if (debug > 1) { 886 if (debug > 1) {
895 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n", 887 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
896 dev->name, ep->dirty_tx, ep->cur_tx); 888 dev->name, ep->dirty_tx, ep->cur_tx);
897 } 889 }
898 } 890 }
899 if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */ 891 if (er16(TxSTAT) & 0x10) { /* Tx FIFO underflow. */
900 dev->stats.tx_fifo_errors++; 892 dev->stats.tx_fifo_errors++;
901 outl(RestartTx, ioaddr + COMMAND); 893 ew32(COMMAND, RestartTx);
902 } else { 894 } else {
903 epic_restart(dev); 895 epic_restart(dev);
904 outl(TxQueued, dev->base_addr + COMMAND); 896 ew32(COMMAND, TxQueued);
905 } 897 }
906 898
907 dev->trans_start = jiffies; /* prevent tx timeout */ 899 dev->trans_start = jiffies; /* prevent tx timeout */
@@ -959,6 +951,7 @@ static void epic_init_ring(struct net_device *dev)
959static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev) 951static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
960{ 952{
961 struct epic_private *ep = netdev_priv(dev); 953 struct epic_private *ep = netdev_priv(dev);
954 void __iomem *ioaddr = ep->ioaddr;
962 int entry, free_count; 955 int entry, free_count;
963 u32 ctrl_word; 956 u32 ctrl_word;
964 unsigned long flags; 957 unsigned long flags;
@@ -999,13 +992,12 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
999 992
1000 spin_unlock_irqrestore(&ep->lock, flags); 993 spin_unlock_irqrestore(&ep->lock, flags);
1001 /* Trigger an immediate transmit demand. */ 994 /* Trigger an immediate transmit demand. */
1002 outl(TxQueued, dev->base_addr + COMMAND); 995 ew32(COMMAND, TxQueued);
1003 996
1004 if (debug > 4) 997 if (debug > 4)
1005 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, " 998 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1006 "flag %2.2x Tx status %8.8x.\n", 999 "flag %2.2x Tx status %8.8x.\n", dev->name, skb->len,
1007 dev->name, (int)skb->len, entry, ctrl_word, 1000 entry, ctrl_word, er32(TxSTAT));
1008 (int)inl(dev->base_addr + TxSTAT));
1009 1001
1010 return NETDEV_TX_OK; 1002 return NETDEV_TX_OK;
1011} 1003}
@@ -1086,18 +1078,17 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1086{ 1078{
1087 struct net_device *dev = dev_instance; 1079 struct net_device *dev = dev_instance;
1088 struct epic_private *ep = netdev_priv(dev); 1080 struct epic_private *ep = netdev_priv(dev);
1089 long ioaddr = dev->base_addr; 1081 void __iomem *ioaddr = ep->ioaddr;
1090 unsigned int handled = 0; 1082 unsigned int handled = 0;
1091 int status; 1083 int status;
1092 1084
1093 status = inl(ioaddr + INTSTAT); 1085 status = er32(INTSTAT);
1094 /* Acknowledge all of the current interrupt sources ASAP. */ 1086 /* Acknowledge all of the current interrupt sources ASAP. */
1095 outl(status & EpicNormalEvent, ioaddr + INTSTAT); 1087 ew32(INTSTAT, status & EpicNormalEvent);
1096 1088
1097 if (debug > 4) { 1089 if (debug > 4) {
1098 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new " 1090 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1099 "intstat=%#8.8x.\n", dev->name, status, 1091 "intstat=%#8.8x.\n", dev->name, status, er32(INTSTAT));
1100 (int)inl(ioaddr + INTSTAT));
1101 } 1092 }
1102 1093
1103 if ((status & IntrSummary) == 0) 1094 if ((status & IntrSummary) == 0)
@@ -1118,19 +1109,21 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1118 1109
1119 /* Check uncommon events all at once. */ 1110 /* Check uncommon events all at once. */
1120 if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) { 1111 if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1112 struct net_device_stats *stats = &dev->stats;
1113
1121 if (status == EpicRemoved) 1114 if (status == EpicRemoved)
1122 goto out; 1115 goto out;
1123 1116
1124 /* Always update the error counts to avoid overhead later. */ 1117 /* Always update the error counts to avoid overhead later. */
1125 dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); 1118 stats->rx_missed_errors += er8(MPCNT);
1126 dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); 1119 stats->rx_frame_errors += er8(ALICNT);
1127 dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); 1120 stats->rx_crc_errors += er8(CRCCNT);
1128 1121
1129 if (status & TxUnderrun) { /* Tx FIFO underflow. */ 1122 if (status & TxUnderrun) { /* Tx FIFO underflow. */
1130 dev->stats.tx_fifo_errors++; 1123 stats->tx_fifo_errors++;
1131 outl(ep->tx_threshold += 128, ioaddr + TxThresh); 1124 ew32(TxThresh, ep->tx_threshold += 128);
1132 /* Restart the transmit process. */ 1125 /* Restart the transmit process. */
1133 outl(RestartTx, ioaddr + COMMAND); 1126 ew32(COMMAND, RestartTx);
1134 } 1127 }
1135 if (status & PCIBusErr170) { 1128 if (status & PCIBusErr170) {
1136 printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n", 1129 printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
@@ -1139,7 +1132,7 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1139 epic_restart(dev); 1132 epic_restart(dev);
1140 } 1133 }
1141 /* Clear all error sources. */ 1134 /* Clear all error sources. */
1142 outl(status & 0x7f18, ioaddr + INTSTAT); 1135 ew32(INTSTAT, status & 0x7f18);
1143 } 1136 }
1144 1137
1145out: 1138out:
@@ -1248,17 +1241,17 @@ static int epic_rx(struct net_device *dev, int budget)
1248 1241
1249static void epic_rx_err(struct net_device *dev, struct epic_private *ep) 1242static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1250{ 1243{
1251 long ioaddr = dev->base_addr; 1244 void __iomem *ioaddr = ep->ioaddr;
1252 int status; 1245 int status;
1253 1246
1254 status = inl(ioaddr + INTSTAT); 1247 status = er32(INTSTAT);
1255 1248
1256 if (status == EpicRemoved) 1249 if (status == EpicRemoved)
1257 return; 1250 return;
1258 if (status & RxOverflow) /* Missed a Rx frame. */ 1251 if (status & RxOverflow) /* Missed a Rx frame. */
1259 dev->stats.rx_errors++; 1252 dev->stats.rx_errors++;
1260 if (status & (RxOverflow | RxFull)) 1253 if (status & (RxOverflow | RxFull))
1261 outw(RxQueued, ioaddr + COMMAND); 1254 ew16(COMMAND, RxQueued);
1262} 1255}
1263 1256
1264static int epic_poll(struct napi_struct *napi, int budget) 1257static int epic_poll(struct napi_struct *napi, int budget)
@@ -1266,7 +1259,7 @@ static int epic_poll(struct napi_struct *napi, int budget)
1266 struct epic_private *ep = container_of(napi, struct epic_private, napi); 1259 struct epic_private *ep = container_of(napi, struct epic_private, napi);
1267 struct net_device *dev = ep->mii.dev; 1260 struct net_device *dev = ep->mii.dev;
1268 int work_done = 0; 1261 int work_done = 0;
1269 long ioaddr = dev->base_addr; 1262 void __iomem *ioaddr = ep->ioaddr;
1270 1263
1271rx_action: 1264rx_action:
1272 1265
@@ -1287,7 +1280,7 @@ rx_action:
1287 more = ep->reschedule_in_poll; 1280 more = ep->reschedule_in_poll;
1288 if (!more) { 1281 if (!more) {
1289 __napi_complete(napi); 1282 __napi_complete(napi);
1290 outl(EpicNapiEvent, ioaddr + INTSTAT); 1283 ew32(INTSTAT, EpicNapiEvent);
1291 epic_napi_irq_on(dev, ep); 1284 epic_napi_irq_on(dev, ep);
1292 } else 1285 } else
1293 ep->reschedule_in_poll--; 1286 ep->reschedule_in_poll--;
@@ -1303,8 +1296,9 @@ rx_action:
1303 1296
1304static int epic_close(struct net_device *dev) 1297static int epic_close(struct net_device *dev)
1305{ 1298{
1306 long ioaddr = dev->base_addr;
1307 struct epic_private *ep = netdev_priv(dev); 1299 struct epic_private *ep = netdev_priv(dev);
1300 struct pci_dev *pdev = ep->pci_dev;
1301 void __iomem *ioaddr = ep->ioaddr;
1308 struct sk_buff *skb; 1302 struct sk_buff *skb;
1309 int i; 1303 int i;
1310 1304
@@ -1313,13 +1307,13 @@ static int epic_close(struct net_device *dev)
1313 1307
1314 if (debug > 1) 1308 if (debug > 1)
1315 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", 1309 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1316 dev->name, (int)inl(ioaddr + INTSTAT)); 1310 dev->name, er32(INTSTAT));
1317 1311
1318 del_timer_sync(&ep->timer); 1312 del_timer_sync(&ep->timer);
1319 1313
1320 epic_disable_int(dev, ep); 1314 epic_disable_int(dev, ep);
1321 1315
1322 free_irq(dev->irq, dev); 1316 free_irq(pdev->irq, dev);
1323 1317
1324 epic_pause(dev); 1318 epic_pause(dev);
1325 1319
@@ -1330,7 +1324,7 @@ static int epic_close(struct net_device *dev)
1330 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */ 1324 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1331 ep->rx_ring[i].buflength = 0; 1325 ep->rx_ring[i].buflength = 0;
1332 if (skb) { 1326 if (skb) {
1333 pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr, 1327 pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
1334 ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 1328 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1335 dev_kfree_skb(skb); 1329 dev_kfree_skb(skb);
1336 } 1330 }
@@ -1341,26 +1335,28 @@ static int epic_close(struct net_device *dev)
1341 ep->tx_skbuff[i] = NULL; 1335 ep->tx_skbuff[i] = NULL;
1342 if (!skb) 1336 if (!skb)
1343 continue; 1337 continue;
1344 pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr, 1338 pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
1345 skb->len, PCI_DMA_TODEVICE); 1339 PCI_DMA_TODEVICE);
1346 dev_kfree_skb(skb); 1340 dev_kfree_skb(skb);
1347 } 1341 }
1348 1342
1349 /* Green! Leave the chip in low-power mode. */ 1343 /* Green! Leave the chip in low-power mode. */
1350 outl(0x0008, ioaddr + GENCTL); 1344 ew32(GENCTL, 0x0008);
1351 1345
1352 return 0; 1346 return 0;
1353} 1347}
1354 1348
1355static struct net_device_stats *epic_get_stats(struct net_device *dev) 1349static struct net_device_stats *epic_get_stats(struct net_device *dev)
1356{ 1350{
1357 long ioaddr = dev->base_addr; 1351 struct epic_private *ep = netdev_priv(dev);
1352 void __iomem *ioaddr = ep->ioaddr;
1358 1353
1359 if (netif_running(dev)) { 1354 if (netif_running(dev)) {
1360 /* Update the error counts. */ 1355 struct net_device_stats *stats = &dev->stats;
1361 dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); 1356
1362 dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); 1357 stats->rx_missed_errors += er8(MPCNT);
1363 dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); 1358 stats->rx_frame_errors += er8(ALICNT);
1359 stats->rx_crc_errors += er8(CRCCNT);
1364 } 1360 }
1365 1361
1366 return &dev->stats; 1362 return &dev->stats;
@@ -1373,13 +1369,13 @@ static struct net_device_stats *epic_get_stats(struct net_device *dev)
1373 1369
1374static void set_rx_mode(struct net_device *dev) 1370static void set_rx_mode(struct net_device *dev)
1375{ 1371{
1376 long ioaddr = dev->base_addr;
1377 struct epic_private *ep = netdev_priv(dev); 1372 struct epic_private *ep = netdev_priv(dev);
1373 void __iomem *ioaddr = ep->ioaddr;
1378 unsigned char mc_filter[8]; /* Multicast hash filter */ 1374 unsigned char mc_filter[8]; /* Multicast hash filter */
1379 int i; 1375 int i;
1380 1376
1381 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1377 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1382 outl(0x002C, ioaddr + RxCtrl); 1378 ew32(RxCtrl, 0x002c);
1383 /* Unconditionally log net taps. */ 1379 /* Unconditionally log net taps. */
1384 memset(mc_filter, 0xff, sizeof(mc_filter)); 1380 memset(mc_filter, 0xff, sizeof(mc_filter));
1385 } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) { 1381 } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
@@ -1387,9 +1383,9 @@ static void set_rx_mode(struct net_device *dev)
1387 is never enabled. */ 1383 is never enabled. */
1388 /* Too many to filter perfectly -- accept all multicasts. */ 1384 /* Too many to filter perfectly -- accept all multicasts. */
1389 memset(mc_filter, 0xff, sizeof(mc_filter)); 1385 memset(mc_filter, 0xff, sizeof(mc_filter));
1390 outl(0x000C, ioaddr + RxCtrl); 1386 ew32(RxCtrl, 0x000c);
1391 } else if (netdev_mc_empty(dev)) { 1387 } else if (netdev_mc_empty(dev)) {
1392 outl(0x0004, ioaddr + RxCtrl); 1388 ew32(RxCtrl, 0x0004);
1393 return; 1389 return;
1394 } else { /* Never executed, for now. */ 1390 } else { /* Never executed, for now. */
1395 struct netdev_hw_addr *ha; 1391 struct netdev_hw_addr *ha;
@@ -1404,7 +1400,7 @@ static void set_rx_mode(struct net_device *dev)
1404 /* ToDo: perhaps we need to stop the Tx and Rx process here? */ 1400 /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1405 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) { 1401 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1406 for (i = 0; i < 4; i++) 1402 for (i = 0; i < 4; i++)
1407 outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4); 1403 ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
1408 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter)); 1404 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1409 } 1405 }
1410} 1406}
@@ -1466,22 +1462,26 @@ static void netdev_set_msglevel(struct net_device *dev, u32 value)
1466 1462
1467static int ethtool_begin(struct net_device *dev) 1463static int ethtool_begin(struct net_device *dev)
1468{ 1464{
1469 unsigned long ioaddr = dev->base_addr; 1465 struct epic_private *ep = netdev_priv(dev);
1466 void __iomem *ioaddr = ep->ioaddr;
1467
1470 /* power-up, if interface is down */ 1468 /* power-up, if interface is down */
1471 if (! netif_running(dev)) { 1469 if (!netif_running(dev)) {
1472 outl(0x0200, ioaddr + GENCTL); 1470 ew32(GENCTL, 0x0200);
1473 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 1471 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1474 } 1472 }
1475 return 0; 1473 return 0;
1476} 1474}
1477 1475
1478static void ethtool_complete(struct net_device *dev) 1476static void ethtool_complete(struct net_device *dev)
1479{ 1477{
1480 unsigned long ioaddr = dev->base_addr; 1478 struct epic_private *ep = netdev_priv(dev);
1479 void __iomem *ioaddr = ep->ioaddr;
1480
1481 /* power-down, if interface is down */ 1481 /* power-down, if interface is down */
1482 if (! netif_running(dev)) { 1482 if (!netif_running(dev)) {
1483 outl(0x0008, ioaddr + GENCTL); 1483 ew32(GENCTL, 0x0008);
1484 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL); 1484 ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1485 } 1485 }
1486} 1486}
1487 1487
@@ -1500,14 +1500,14 @@ static const struct ethtool_ops netdev_ethtool_ops = {
1500static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1500static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1501{ 1501{
1502 struct epic_private *np = netdev_priv(dev); 1502 struct epic_private *np = netdev_priv(dev);
1503 long ioaddr = dev->base_addr; 1503 void __iomem *ioaddr = np->ioaddr;
1504 struct mii_ioctl_data *data = if_mii(rq); 1504 struct mii_ioctl_data *data = if_mii(rq);
1505 int rc; 1505 int rc;
1506 1506
1507 /* power-up, if interface is down */ 1507 /* power-up, if interface is down */
1508 if (! netif_running(dev)) { 1508 if (! netif_running(dev)) {
1509 outl(0x0200, ioaddr + GENCTL); 1509 ew32(GENCTL, 0x0200);
1510 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 1510 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1511 } 1511 }
1512 1512
1513 /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */ 1513 /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
@@ -1517,14 +1517,14 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1517 1517
1518 /* power-down, if interface is down */ 1518 /* power-down, if interface is down */
1519 if (! netif_running(dev)) { 1519 if (! netif_running(dev)) {
1520 outl(0x0008, ioaddr + GENCTL); 1520 ew32(GENCTL, 0x0008);
1521 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL); 1521 ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1522 } 1522 }
1523 return rc; 1523 return rc;
1524} 1524}
1525 1525
1526 1526
1527static void __devexit epic_remove_one (struct pci_dev *pdev) 1527static void __devexit epic_remove_one(struct pci_dev *pdev)
1528{ 1528{
1529 struct net_device *dev = pci_get_drvdata(pdev); 1529 struct net_device *dev = pci_get_drvdata(pdev);
1530 struct epic_private *ep = netdev_priv(dev); 1530 struct epic_private *ep = netdev_priv(dev);
@@ -1532,9 +1532,7 @@ static void __devexit epic_remove_one (struct pci_dev *pdev)
1532 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); 1532 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1533 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); 1533 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1534 unregister_netdev(dev); 1534 unregister_netdev(dev);
1535#ifndef USE_IO_OPS 1535 pci_iounmap(pdev, ep->ioaddr);
1536 iounmap((void*) dev->base_addr);
1537#endif
1538 pci_release_regions(pdev); 1536 pci_release_regions(pdev);
1539 free_netdev(dev); 1537 free_netdev(dev);
1540 pci_disable_device(pdev); 1538 pci_disable_device(pdev);
@@ -1548,13 +1546,14 @@ static void __devexit epic_remove_one (struct pci_dev *pdev)
1548static int epic_suspend (struct pci_dev *pdev, pm_message_t state) 1546static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1549{ 1547{
1550 struct net_device *dev = pci_get_drvdata(pdev); 1548 struct net_device *dev = pci_get_drvdata(pdev);
1551 long ioaddr = dev->base_addr; 1549 struct epic_private *ep = netdev_priv(dev);
1550 void __iomem *ioaddr = ep->ioaddr;
1552 1551
1553 if (!netif_running(dev)) 1552 if (!netif_running(dev))
1554 return 0; 1553 return 0;
1555 epic_pause(dev); 1554 epic_pause(dev);
1556 /* Put the chip into low-power mode. */ 1555 /* Put the chip into low-power mode. */
1557 outl(0x0008, ioaddr + GENCTL); 1556 ew32(GENCTL, 0x0008);
1558 /* pci_power_off(pdev, -1); */ 1557 /* pci_power_off(pdev, -1); */
1559 return 0; 1558 return 0;
1560} 1559}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 4a6971027076..dab9c6f671ec 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1166,10 +1166,8 @@ smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat)
1166 1166
1167/* Quickly dumps bad packets */ 1167/* Quickly dumps bad packets */
1168static void 1168static void
1169smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes) 1169smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords)
1170{ 1170{
1171 unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2;
1172
1173 if (likely(pktwords >= 4)) { 1171 if (likely(pktwords >= 4)) {
1174 unsigned int timeout = 500; 1172 unsigned int timeout = 500;
1175 unsigned int val; 1173 unsigned int val;
@@ -1233,7 +1231,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
1233 continue; 1231 continue;
1234 } 1232 }
1235 1233
1236 skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN); 1234 skb = netdev_alloc_skb(dev, pktwords << 2);
1237 if (unlikely(!skb)) { 1235 if (unlikely(!skb)) {
1238 SMSC_WARN(pdata, rx_err, 1236 SMSC_WARN(pdata, rx_err,
1239 "Unable to allocate skb for rx packet"); 1237 "Unable to allocate skb for rx packet");
@@ -1243,14 +1241,12 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
1243 break; 1241 break;
1244 } 1242 }
1245 1243
1246 skb->data = skb->head; 1244 pdata->ops->rx_readfifo(pdata,
1247 skb_reset_tail_pointer(skb); 1245 (unsigned int *)skb->data, pktwords);
1248 1246
1249 /* Align IP on 16B boundary */ 1247 /* Align IP on 16B boundary */
1250 skb_reserve(skb, NET_IP_ALIGN); 1248 skb_reserve(skb, NET_IP_ALIGN);
1251 skb_put(skb, pktlength - 4); 1249 skb_put(skb, pktlength - 4);
1252 pdata->ops->rx_readfifo(pdata,
1253 (unsigned int *)skb->head, pktwords);
1254 skb->protocol = eth_type_trans(skb, dev); 1250 skb->protocol = eth_type_trans(skb, dev);
1255 skb_checksum_none_assert(skb); 1251 skb_checksum_none_assert(skb);
1256 netif_receive_skb(skb); 1252 netif_receive_skb(skb);
@@ -1565,7 +1561,7 @@ static int smsc911x_open(struct net_device *dev)
1565 smsc911x_reg_write(pdata, FIFO_INT, temp); 1561 smsc911x_reg_write(pdata, FIFO_INT, temp);
1566 1562
1567 /* set RX Data offset to 2 bytes for alignment */ 1563 /* set RX Data offset to 2 bytes for alignment */
1568 smsc911x_reg_write(pdata, RX_CFG, (2 << 8)); 1564 smsc911x_reg_write(pdata, RX_CFG, (NET_IP_ALIGN << 8));
1569 1565
1570 /* enable NAPI polling before enabling RX interrupts */ 1566 /* enable NAPI polling before enabling RX interrupts */
1571 napi_enable(&pdata->napi); 1567 napi_enable(&pdata->napi);
@@ -2070,6 +2066,7 @@ static const struct ethtool_ops smsc911x_ethtool_ops = {
2070 .get_eeprom_len = smsc911x_ethtool_get_eeprom_len, 2066 .get_eeprom_len = smsc911x_ethtool_get_eeprom_len,
2071 .get_eeprom = smsc911x_ethtool_get_eeprom, 2067 .get_eeprom = smsc911x_ethtool_get_eeprom,
2072 .set_eeprom = smsc911x_ethtool_set_eeprom, 2068 .set_eeprom = smsc911x_ethtool_set_eeprom,
2069 .get_ts_info = ethtool_op_get_ts_info,
2073}; 2070};
2074 2071
2075static const struct net_device_ops smsc911x_netdev_ops = { 2072static const struct net_device_ops smsc911x_netdev_ops = {
@@ -2382,7 +2379,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2382 SET_NETDEV_DEV(dev, &pdev->dev); 2379 SET_NETDEV_DEV(dev, &pdev->dev);
2383 2380
2384 pdata = netdev_priv(dev); 2381 pdata = netdev_priv(dev);
2385
2386 dev->irq = irq_res->start; 2382 dev->irq = irq_res->start;
2387 irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; 2383 irq_flags = irq_res->flags & IRQF_TRIGGER_MASK;
2388 pdata->ioaddr = ioremap_nocache(res->start, res_size); 2384 pdata->ioaddr = ioremap_nocache(res->start, res_size);
@@ -2446,7 +2442,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2446 if (retval) { 2442 if (retval) {
2447 SMSC_WARN(pdata, probe, 2443 SMSC_WARN(pdata, probe,
2448 "Unable to claim requested irq: %d", dev->irq); 2444 "Unable to claim requested irq: %d", dev->irq);
2449 goto out_free_irq; 2445 goto out_disable_resources;
2450 } 2446 }
2451 2447
2452 retval = register_netdev(dev); 2448 retval = register_netdev(dev);
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 38386478532b..fd33b21f6c96 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -54,7 +54,7 @@ struct smsc9420_ring_info {
54}; 54};
55 55
56struct smsc9420_pdata { 56struct smsc9420_pdata {
57 void __iomem *base_addr; 57 void __iomem *ioaddr;
58 struct pci_dev *pdev; 58 struct pci_dev *pdev;
59 struct net_device *dev; 59 struct net_device *dev;
60 60
@@ -114,13 +114,13 @@ do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
114 114
115static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset) 115static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset)
116{ 116{
117 return ioread32(pd->base_addr + offset); 117 return ioread32(pd->ioaddr + offset);
118} 118}
119 119
120static inline void 120static inline void
121smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value) 121smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value)
122{ 122{
123 iowrite32(value, pd->base_addr + offset); 123 iowrite32(value, pd->ioaddr + offset);
124} 124}
125 125
126static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd) 126static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd)
@@ -469,6 +469,7 @@ static const struct ethtool_ops smsc9420_ethtool_ops = {
469 .set_eeprom = smsc9420_ethtool_set_eeprom, 469 .set_eeprom = smsc9420_ethtool_set_eeprom,
470 .get_regs_len = smsc9420_ethtool_getregslen, 470 .get_regs_len = smsc9420_ethtool_getregslen,
471 .get_regs = smsc9420_ethtool_getregs, 471 .get_regs = smsc9420_ethtool_getregs,
472 .get_ts_info = ethtool_op_get_ts_info,
472}; 473};
473 474
474/* Sets the device MAC address to dev_addr */ 475/* Sets the device MAC address to dev_addr */
@@ -659,7 +660,7 @@ static irqreturn_t smsc9420_isr(int irq, void *dev_id)
659 ulong flags; 660 ulong flags;
660 661
661 BUG_ON(!pd); 662 BUG_ON(!pd);
662 BUG_ON(!pd->base_addr); 663 BUG_ON(!pd->ioaddr);
663 664
664 int_cfg = smsc9420_reg_read(pd, INT_CFG); 665 int_cfg = smsc9420_reg_read(pd, INT_CFG);
665 666
@@ -720,9 +721,12 @@ static irqreturn_t smsc9420_isr(int irq, void *dev_id)
720#ifdef CONFIG_NET_POLL_CONTROLLER 721#ifdef CONFIG_NET_POLL_CONTROLLER
721static void smsc9420_poll_controller(struct net_device *dev) 722static void smsc9420_poll_controller(struct net_device *dev)
722{ 723{
723 disable_irq(dev->irq); 724 struct smsc9420_pdata *pd = netdev_priv(dev);
725 const int irq = pd->pdev->irq;
726
727 disable_irq(irq);
724 smsc9420_isr(0, dev); 728 smsc9420_isr(0, dev);
725 enable_irq(dev->irq); 729 enable_irq(irq);
726} 730}
727#endif /* CONFIG_NET_POLL_CONTROLLER */ 731#endif /* CONFIG_NET_POLL_CONTROLLER */
728 732
@@ -759,7 +763,7 @@ static int smsc9420_stop(struct net_device *dev)
759 smsc9420_stop_rx(pd); 763 smsc9420_stop_rx(pd);
760 smsc9420_free_rx_ring(pd); 764 smsc9420_free_rx_ring(pd);
761 765
762 free_irq(dev->irq, pd); 766 free_irq(pd->pdev->irq, pd);
763 767
764 smsc9420_dmac_soft_reset(pd); 768 smsc9420_dmac_soft_reset(pd);
765 769
@@ -1331,15 +1335,12 @@ out:
1331 1335
1332static int smsc9420_open(struct net_device *dev) 1336static int smsc9420_open(struct net_device *dev)
1333{ 1337{
1334 struct smsc9420_pdata *pd; 1338 struct smsc9420_pdata *pd = netdev_priv(dev);
1335 u32 bus_mode, mac_cr, dmac_control, int_cfg, dma_intr_ena, int_ctl; 1339 u32 bus_mode, mac_cr, dmac_control, int_cfg, dma_intr_ena, int_ctl;
1340 const int irq = pd->pdev->irq;
1336 unsigned long flags; 1341 unsigned long flags;
1337 int result = 0, timeout; 1342 int result = 0, timeout;
1338 1343
1339 BUG_ON(!dev);
1340 pd = netdev_priv(dev);
1341 BUG_ON(!pd);
1342
1343 if (!is_valid_ether_addr(dev->dev_addr)) { 1344 if (!is_valid_ether_addr(dev->dev_addr)) {
1344 smsc_warn(IFUP, "dev_addr is not a valid MAC address"); 1345 smsc_warn(IFUP, "dev_addr is not a valid MAC address");
1345 result = -EADDRNOTAVAIL; 1346 result = -EADDRNOTAVAIL;
@@ -1358,9 +1359,10 @@ static int smsc9420_open(struct net_device *dev)
1358 smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF); 1359 smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF);
1359 smsc9420_pci_flush_write(pd); 1360 smsc9420_pci_flush_write(pd);
1360 1361
1361 if (request_irq(dev->irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED, 1362 result = request_irq(irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED,
1362 DRV_NAME, pd)) { 1363 DRV_NAME, pd);
1363 smsc_warn(IFUP, "Unable to use IRQ = %d", dev->irq); 1364 if (result) {
1365 smsc_warn(IFUP, "Unable to use IRQ = %d", irq);
1364 result = -ENODEV; 1366 result = -ENODEV;
1365 goto out_0; 1367 goto out_0;
1366 } 1368 }
@@ -1395,7 +1397,7 @@ static int smsc9420_open(struct net_device *dev)
1395 smsc9420_pci_flush_write(pd); 1397 smsc9420_pci_flush_write(pd);
1396 1398
1397 /* test the IRQ connection to the ISR */ 1399 /* test the IRQ connection to the ISR */
1398 smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq); 1400 smsc_dbg(IFUP, "Testing ISR using IRQ %d", irq);
1399 pd->software_irq_signal = false; 1401 pd->software_irq_signal = false;
1400 1402
1401 spin_lock_irqsave(&pd->int_lock, flags); 1403 spin_lock_irqsave(&pd->int_lock, flags);
@@ -1430,7 +1432,7 @@ static int smsc9420_open(struct net_device *dev)
1430 goto out_free_irq_1; 1432 goto out_free_irq_1;
1431 } 1433 }
1432 1434
1433 smsc_dbg(IFUP, "ISR passed test using IRQ %d", dev->irq); 1435 smsc_dbg(IFUP, "ISR passed test using IRQ %d", irq);
1434 1436
1435 result = smsc9420_alloc_tx_ring(pd); 1437 result = smsc9420_alloc_tx_ring(pd);
1436 if (result) { 1438 if (result) {
@@ -1490,7 +1492,7 @@ out_free_rx_ring_3:
1490out_free_tx_ring_2: 1492out_free_tx_ring_2:
1491 smsc9420_free_tx_ring(pd); 1493 smsc9420_free_tx_ring(pd);
1492out_free_irq_1: 1494out_free_irq_1:
1493 free_irq(dev->irq, pd); 1495 free_irq(irq, pd);
1494out_0: 1496out_0:
1495 return result; 1497 return result;
1496} 1498}
@@ -1519,7 +1521,7 @@ static int smsc9420_suspend(struct pci_dev *pdev, pm_message_t state)
1519 smsc9420_stop_rx(pd); 1521 smsc9420_stop_rx(pd);
1520 smsc9420_free_rx_ring(pd); 1522 smsc9420_free_rx_ring(pd);
1521 1523
1522 free_irq(dev->irq, pd); 1524 free_irq(pd->pdev->irq, pd);
1523 1525
1524 netif_device_detach(dev); 1526 netif_device_detach(dev);
1525 } 1527 }
@@ -1552,6 +1554,7 @@ static int smsc9420_resume(struct pci_dev *pdev)
1552 smsc_warn(IFUP, "pci_enable_wake failed: %d", err); 1554 smsc_warn(IFUP, "pci_enable_wake failed: %d", err);
1553 1555
1554 if (netif_running(dev)) { 1556 if (netif_running(dev)) {
1557 /* FIXME: gross. It looks like ancient PM relic.*/
1555 err = smsc9420_open(dev); 1558 err = smsc9420_open(dev);
1556 netif_device_attach(dev); 1559 netif_device_attach(dev);
1557 } 1560 }
@@ -1625,8 +1628,6 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1625 /* registers are double mapped with 0 offset for LE and 0x200 for BE */ 1628 /* registers are double mapped with 0 offset for LE and 0x200 for BE */
1626 virt_addr += LAN9420_CPSR_ENDIAN_OFFSET; 1629 virt_addr += LAN9420_CPSR_ENDIAN_OFFSET;
1627 1630
1628 dev->base_addr = (ulong)virt_addr;
1629
1630 pd = netdev_priv(dev); 1631 pd = netdev_priv(dev);
1631 1632
1632 /* pci descriptors are created in the PCI consistent area */ 1633 /* pci descriptors are created in the PCI consistent area */
@@ -1646,7 +1647,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1646 1647
1647 pd->pdev = pdev; 1648 pd->pdev = pdev;
1648 pd->dev = dev; 1649 pd->dev = dev;
1649 pd->base_addr = virt_addr; 1650 pd->ioaddr = virt_addr;
1650 pd->msg_enable = smsc_debug; 1651 pd->msg_enable = smsc_debug;
1651 pd->rx_csum = true; 1652 pd->rx_csum = true;
1652 1653
@@ -1669,7 +1670,6 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1669 1670
1670 dev->netdev_ops = &smsc9420_netdev_ops; 1671 dev->netdev_ops = &smsc9420_netdev_ops;
1671 dev->ethtool_ops = &smsc9420_ethtool_ops; 1672 dev->ethtool_ops = &smsc9420_ethtool_ops;
1672 dev->irq = pdev->irq;
1673 1673
1674 netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT); 1674 netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT);
1675 1675
@@ -1727,7 +1727,7 @@ static void __devexit smsc9420_remove(struct pci_dev *pdev)
1727 pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) * 1727 pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
1728 (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); 1728 (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
1729 1729
1730 iounmap(pd->base_addr - LAN9420_CPSR_ENDIAN_OFFSET); 1730 iounmap(pd->ioaddr - LAN9420_CPSR_ENDIAN_OFFSET);
1731 pci_release_regions(pdev); 1731 pci_release_regions(pdev);
1732 free_netdev(dev); 1732 free_netdev(dev);
1733 pci_disable_device(pdev); 1733 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 0319d640f728..f5dedcbf4651 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -97,6 +97,16 @@ struct stmmac_extra_stats {
97 unsigned long normal_irq_n; 97 unsigned long normal_irq_n;
98}; 98};
99 99
100/* CSR Frequency Access Defines*/
101#define CSR_F_35M 35000000
102#define CSR_F_60M 60000000
103#define CSR_F_100M 100000000
104#define CSR_F_150M 150000000
105#define CSR_F_250M 250000000
106#define CSR_F_300M 300000000
107
108#define MAC_CSR_H_FRQ_MASK 0x20
109
100#define HASH_TABLE_SIZE 64 110#define HASH_TABLE_SIZE 64
101#define PAUSE_TIME 0x200 111#define PAUSE_TIME 0x200
102 112
@@ -137,6 +147,7 @@ struct stmmac_extra_stats {
137#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */ 147#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */
138#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN Insertion */ 148#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN Insertion */
139#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY interface */ 149#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY interface */
150#define DEFAULT_DMA_PBL 8
140 151
141enum rx_frame_status { /* IPC status */ 152enum rx_frame_status { /* IPC status */
142 good_frame = 0, 153 good_frame = 0,
@@ -228,7 +239,7 @@ struct stmmac_desc_ops {
228 int (*get_rx_owner) (struct dma_desc *p); 239 int (*get_rx_owner) (struct dma_desc *p);
229 void (*set_rx_owner) (struct dma_desc *p); 240 void (*set_rx_owner) (struct dma_desc *p);
230 /* Get the receive frame size */ 241 /* Get the receive frame size */
231 int (*get_rx_frame_len) (struct dma_desc *p); 242 int (*get_rx_frame_len) (struct dma_desc *p, int rx_coe_type);
232 /* Return the reception status looking at the RDES1 */ 243 /* Return the reception status looking at the RDES1 */
233 int (*rx_status) (void *data, struct stmmac_extra_stats *x, 244 int (*rx_status) (void *data, struct stmmac_extra_stats *x,
234 struct dma_desc *p); 245 struct dma_desc *p);
@@ -236,7 +247,8 @@ struct stmmac_desc_ops {
236 247
237struct stmmac_dma_ops { 248struct stmmac_dma_ops {
238 /* DMA core initialization */ 249 /* DMA core initialization */
239 int (*init) (void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx); 250 int (*init) (void __iomem *ioaddr, int pbl, int fb, int burst_len,
251 u32 dma_tx, u32 dma_rx);
240 /* Dump DMA registers */ 252 /* Dump DMA registers */
241 void (*dump_regs) (void __iomem *ioaddr); 253 void (*dump_regs) (void __iomem *ioaddr);
242 /* Set tx/rx threshold in the csr6 register 254 /* Set tx/rx threshold in the csr6 register
@@ -261,8 +273,8 @@ struct stmmac_dma_ops {
261struct stmmac_ops { 273struct stmmac_ops {
262 /* MAC core initialization */ 274 /* MAC core initialization */
263 void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned; 275 void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned;
264 /* Support checksum offload engine */ 276 /* Enable and verify that the IPC module is supported */
265 int (*rx_coe) (void __iomem *ioaddr); 277 int (*rx_ipc) (void __iomem *ioaddr);
266 /* Dump MAC registers */ 278 /* Dump MAC registers */
267 void (*dump_regs) (void __iomem *ioaddr); 279 void (*dump_regs) (void __iomem *ioaddr);
268 /* Handle extra events on specific interrupts hw dependent */ 280 /* Handle extra events on specific interrupts hw dependent */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index cfcef0ea0fa5..54339a78e358 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -142,7 +142,7 @@ enum rx_tx_priority_ratio {
142#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ 142#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
143#define DMA_BUS_MODE_RPBL_SHIFT 17 143#define DMA_BUS_MODE_RPBL_SHIFT 17
144#define DMA_BUS_MODE_USP 0x00800000 144#define DMA_BUS_MODE_USP 0x00800000
145#define DMA_BUS_MODE_4PBL 0x01000000 145#define DMA_BUS_MODE_PBL 0x01000000
146#define DMA_BUS_MODE_AAL 0x02000000 146#define DMA_BUS_MODE_AAL 0x02000000
147 147
148/* DMA CRS Control and Status Register Mapping */ 148/* DMA CRS Control and Status Register Mapping */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b1c48b975945..e7cbcd99c2cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -46,7 +46,7 @@ static void dwmac1000_core_init(void __iomem *ioaddr)
46#endif 46#endif
47} 47}
48 48
49static int dwmac1000_rx_coe_supported(void __iomem *ioaddr) 49static int dwmac1000_rx_ipc_enable(void __iomem *ioaddr)
50{ 50{
51 u32 value = readl(ioaddr + GMAC_CONTROL); 51 u32 value = readl(ioaddr + GMAC_CONTROL);
52 52
@@ -211,7 +211,7 @@ static void dwmac1000_irq_status(void __iomem *ioaddr)
211 211
212static const struct stmmac_ops dwmac1000_ops = { 212static const struct stmmac_ops dwmac1000_ops = {
213 .core_init = dwmac1000_core_init, 213 .core_init = dwmac1000_core_init,
214 .rx_coe = dwmac1000_rx_coe_supported, 214 .rx_ipc = dwmac1000_rx_ipc_enable,
215 .dump_regs = dwmac1000_dump_regs, 215 .dump_regs = dwmac1000_dump_regs,
216 .host_irq_status = dwmac1000_irq_status, 216 .host_irq_status = dwmac1000_irq_status,
217 .set_filter = dwmac1000_set_filter, 217 .set_filter = dwmac1000_set_filter,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 4d5402a1d262..3675c5731565 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -30,8 +30,8 @@
30#include "dwmac1000.h" 30#include "dwmac1000.h"
31#include "dwmac_dma.h" 31#include "dwmac_dma.h"
32 32
33static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx, 33static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
34 u32 dma_rx) 34 int burst_len, u32 dma_tx, u32 dma_rx)
35{ 35{
36 u32 value = readl(ioaddr + DMA_BUS_MODE); 36 u32 value = readl(ioaddr + DMA_BUS_MODE);
37 int limit; 37 int limit;
@@ -48,15 +48,47 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
48 if (limit < 0) 48 if (limit < 0)
49 return -EBUSY; 49 return -EBUSY;
50 50
51 value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL | 51 /*
52 ((pbl << DMA_BUS_MODE_PBL_SHIFT) | 52 * Set the DMA PBL (Programmable Burst Length) mode
53 (pbl << DMA_BUS_MODE_RPBL_SHIFT)); 53 * Before stmmac core 3.50 this mode bit was 4xPBL, and
54 * post 3.5 mode bit acts as 8*PBL.
55 * For core rev < 3.5, when the core is set for 4xPBL mode, the
56 * DMA transfers the data in 4, 8, 16, 32, 64 & 128 beats
57 * depending on pbl value.
58 * For core rev > 3.5, when the core is set for 8xPBL mode, the
59 * DMA transfers the data in 8, 16, 32, 64, 128 & 256 beats
60 * depending on pbl value.
61 */
62 value = DMA_BUS_MODE_PBL | ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
63 (pbl << DMA_BUS_MODE_RPBL_SHIFT));
64
65 /* Set the Fixed burst mode */
66 if (fb)
67 value |= DMA_BUS_MODE_FB;
54 68
55#ifdef CONFIG_STMMAC_DA 69#ifdef CONFIG_STMMAC_DA
56 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */ 70 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
57#endif 71#endif
58 writel(value, ioaddr + DMA_BUS_MODE); 72 writel(value, ioaddr + DMA_BUS_MODE);
59 73
74 /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE
75 * for supported bursts.
76 *
77 * Note: This is applicable only for revision GMACv3.61a. For
78 * older version this register is reserved and shall have no
79 * effect.
80 *
81 * Note:
82 * For Fixed Burst Mode: if we directly write 0xFF to this
83 * register using the configurations pass from platform code,
84 * this would ensure that all bursts supported by core are set
85 * and those which are not supported would remain ineffective.
86 *
87 * For Non Fixed Burst Mode: provide the maximum value of the
88 * burst length. Any burst equal or below the provided burst
89 * length would be allowed to perform. */
90 writel(burst_len, ioaddr + DMA_AXI_BUS_MODE);
91
60 /* Mask interrupts by writing to CSR7 */ 92 /* Mask interrupts by writing to CSR7 */
61 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); 93 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
62 94
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 138fb8dd1e87..efde50ff03f8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -43,11 +43,6 @@ static void dwmac100_core_init(void __iomem *ioaddr)
43#endif 43#endif
44} 44}
45 45
46static int dwmac100_rx_coe_supported(void __iomem *ioaddr)
47{
48 return 0;
49}
50
51static void dwmac100_dump_mac_regs(void __iomem *ioaddr) 46static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
52{ 47{
53 pr_info("\t----------------------------------------------\n" 48 pr_info("\t----------------------------------------------\n"
@@ -72,6 +67,11 @@ static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
72 readl(ioaddr + MAC_VLAN2)); 67 readl(ioaddr + MAC_VLAN2));
73} 68}
74 69
70static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
71{
72 return 0;
73}
74
75static void dwmac100_irq_status(void __iomem *ioaddr) 75static void dwmac100_irq_status(void __iomem *ioaddr)
76{ 76{
77 return; 77 return;
@@ -160,7 +160,7 @@ static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode)
160 160
161static const struct stmmac_ops dwmac100_ops = { 161static const struct stmmac_ops dwmac100_ops = {
162 .core_init = dwmac100_core_init, 162 .core_init = dwmac100_core_init,
163 .rx_coe = dwmac100_rx_coe_supported, 163 .rx_ipc = dwmac100_rx_ipc_enable,
164 .dump_regs = dwmac100_dump_mac_regs, 164 .dump_regs = dwmac100_dump_mac_regs,
165 .host_irq_status = dwmac100_irq_status, 165 .host_irq_status = dwmac100_irq_status,
166 .set_filter = dwmac100_set_filter, 166 .set_filter = dwmac100_set_filter,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index bc17fd08b55d..92ed2e07609e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -32,8 +32,8 @@
32#include "dwmac100.h" 32#include "dwmac100.h"
33#include "dwmac_dma.h" 33#include "dwmac_dma.h"
34 34
35static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx, 35static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb,
36 u32 dma_rx) 36 int burst_len, u32 dma_tx, u32 dma_rx)
37{ 37{
38 u32 value = readl(ioaddr + DMA_BUS_MODE); 38 u32 value = readl(ioaddr + DMA_BUS_MODE);
39 int limit; 39 int limit;
@@ -52,7 +52,7 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
52 52
53 /* Enable Application Access by writing to DMA CSR0 */ 53 /* Enable Application Access by writing to DMA CSR0 */
54 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), 54 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
55 ioaddr + DMA_BUS_MODE); 55 ioaddr + DMA_BUS_MODE);
56 56
57 /* Mask interrupts by writing to CSR7 */ 57 /* Mask interrupts by writing to CSR7 */
58 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); 58 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 437edacd602e..6e0360f9cfde 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -32,6 +32,7 @@
32#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */ 32#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
33#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */ 33#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
34#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */ 34#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
35#define DMA_AXI_BUS_MODE 0x00001028 /* AXI Bus Mode */
35#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */ 36#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
36#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */ 37#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
37#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */ 38#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index ad1b627f8ec2..2fc8ef95f97a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -22,6 +22,7 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#include <linux/stmmac.h>
25#include "common.h" 26#include "common.h"
26#include "descs_com.h" 27#include "descs_com.h"
27 28
@@ -309,9 +310,17 @@ static void enh_desc_close_tx_desc(struct dma_desc *p)
309 p->des01.etx.interrupt = 1; 310 p->des01.etx.interrupt = 1;
310} 311}
311 312
312static int enh_desc_get_rx_frame_len(struct dma_desc *p) 313static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
313{ 314{
314 return p->des01.erx.frame_length; 315 /* The type-1 checksum offload engines append the checksum at
316 * the end of frame and the two bytes of checksum are added in
317 * the length.
318 * Adjust for that in the framelen for type-1 checksum offload
319 * engines. */
320 if (rx_coe_type == STMMAC_RX_COE_TYPE1)
321 return p->des01.erx.frame_length - 2;
322 else
323 return p->des01.erx.frame_length;
315} 324}
316 325
317const struct stmmac_desc_ops enh_desc_ops = { 326const struct stmmac_desc_ops enh_desc_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 25953bb45a73..68962c549a2d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -22,6 +22,7 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#include <linux/stmmac.h>
25#include "common.h" 26#include "common.h"
26#include "descs_com.h" 27#include "descs_com.h"
27 28
@@ -201,9 +202,17 @@ static void ndesc_close_tx_desc(struct dma_desc *p)
201 p->des01.tx.interrupt = 1; 202 p->des01.tx.interrupt = 1;
202} 203}
203 204
204static int ndesc_get_rx_frame_len(struct dma_desc *p) 205static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
205{ 206{
206 return p->des01.rx.frame_length; 207 /* The type-1 checksum offload engines append the checksum at
208 * the end of frame and the two bytes of checksum are added in
209 * the length.
210 * Adjust for that in the framelen for type-1 checksum offload
211 * engines. */
212 if (rx_coe_type == STMMAC_RX_COE_TYPE1)
213 return p->des01.rx.frame_length - 2;
214 else
215 return p->des01.rx.frame_length;
207} 216}
208 217
209const struct stmmac_desc_ops ndesc_ops = { 218const struct stmmac_desc_ops ndesc_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b4b095fdcf29..db2de9a49952 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -21,7 +21,9 @@
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#define STMMAC_RESOURCE_NAME "stmmaceth" 23#define STMMAC_RESOURCE_NAME "stmmaceth"
24#define DRV_MODULE_VERSION "Feb_2012" 24#define DRV_MODULE_VERSION "March_2012"
25
26#include <linux/clk.h>
25#include <linux/stmmac.h> 27#include <linux/stmmac.h>
26#include <linux/phy.h> 28#include <linux/phy.h>
27#include "common.h" 29#include "common.h"
@@ -56,8 +58,6 @@ struct stmmac_priv {
56 58
57 struct stmmac_extra_stats xstats; 59 struct stmmac_extra_stats xstats;
58 struct napi_struct napi; 60 struct napi_struct napi;
59
60 int rx_coe;
61 int no_csum_insertion; 61 int no_csum_insertion;
62 62
63 struct phy_device *phydev; 63 struct phy_device *phydev;
@@ -81,6 +81,10 @@ struct stmmac_priv {
81 struct stmmac_counters mmc; 81 struct stmmac_counters mmc;
82 struct dma_features dma_cap; 82 struct dma_features dma_cap;
83 int hw_cap_support; 83 int hw_cap_support;
84#ifdef CONFIG_HAVE_CLK
85 struct clk *stmmac_clk;
86#endif
87 int clk_csr;
84}; 88};
85 89
86extern int phyaddr; 90extern int phyaddr;
@@ -99,3 +103,42 @@ int stmmac_dvr_remove(struct net_device *ndev);
99struct stmmac_priv *stmmac_dvr_probe(struct device *device, 103struct stmmac_priv *stmmac_dvr_probe(struct device *device,
100 struct plat_stmmacenet_data *plat_dat, 104 struct plat_stmmacenet_data *plat_dat,
101 void __iomem *addr); 105 void __iomem *addr);
106
107#ifdef CONFIG_HAVE_CLK
108static inline int stmmac_clk_enable(struct stmmac_priv *priv)
109{
110 if (!IS_ERR(priv->stmmac_clk))
111 return clk_enable(priv->stmmac_clk);
112
113 return 0;
114}
115
116static inline void stmmac_clk_disable(struct stmmac_priv *priv)
117{
118 if (IS_ERR(priv->stmmac_clk))
119 return;
120
121 clk_disable(priv->stmmac_clk);
122}
123static inline int stmmac_clk_get(struct stmmac_priv *priv)
124{
125 priv->stmmac_clk = clk_get(priv->device, NULL);
126
127 if (IS_ERR(priv->stmmac_clk))
128 return PTR_ERR(priv->stmmac_clk);
129
130 return 0;
131}
132#else
133static inline int stmmac_clk_enable(struct stmmac_priv *priv)
134{
135 return 0;
136}
137static inline void stmmac_clk_disable(struct stmmac_priv *priv)
138{
139}
140static inline int stmmac_clk_get(struct stmmac_priv *priv)
141{
142 return 0;
143}
144#endif /* CONFIG_HAVE_CLK */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index f98e1511660f..ce431846fc6f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -481,6 +481,7 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
481 .get_wol = stmmac_get_wol, 481 .get_wol = stmmac_get_wol,
482 .set_wol = stmmac_set_wol, 482 .set_wol = stmmac_set_wol,
483 .get_sset_count = stmmac_get_sset_count, 483 .get_sset_count = stmmac_get_sset_count,
484 .get_ts_info = ethtool_op_get_ts_info,
484}; 485};
485 486
486void stmmac_set_ethtool_ops(struct net_device *netdev) 487void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 48d56da62f08..1a4cf8128f91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -163,6 +163,38 @@ static void stmmac_verify_args(void)
163 pause = PAUSE_TIME; 163 pause = PAUSE_TIME;
164} 164}
165 165
166static void stmmac_clk_csr_set(struct stmmac_priv *priv)
167{
168#ifdef CONFIG_HAVE_CLK
169 u32 clk_rate;
170
171 if (IS_ERR(priv->stmmac_clk))
172 return;
173
174 clk_rate = clk_get_rate(priv->stmmac_clk);
175
176 /* Platform provided default clk_csr would be assumed valid
177 * for all other cases except for the below mentioned ones. */
178 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
179 if (clk_rate < CSR_F_35M)
180 priv->clk_csr = STMMAC_CSR_20_35M;
181 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
182 priv->clk_csr = STMMAC_CSR_35_60M;
183 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
184 priv->clk_csr = STMMAC_CSR_60_100M;
185 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
186 priv->clk_csr = STMMAC_CSR_100_150M;
187 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
188 priv->clk_csr = STMMAC_CSR_150_250M;
189 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
190 priv->clk_csr = STMMAC_CSR_250_300M;
191 } /* For values higher than the IEEE 802.3 specified frequency
192 * we can not estimate the proper divider as it is not known
193 * the frequency of clk_csr_i. So we do not change the default
194 * divider. */
195#endif
196}
197
166#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG) 198#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
167static void print_pkt(unsigned char *buf, int len) 199static void print_pkt(unsigned char *buf, int len)
168{ 200{
@@ -307,7 +339,13 @@ static int stmmac_init_phy(struct net_device *dev)
307 priv->speed = 0; 339 priv->speed = 0;
308 priv->oldduplex = -1; 340 priv->oldduplex = -1;
309 341
310 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", priv->plat->bus_id); 342 if (priv->plat->phy_bus_name)
343 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
344 priv->plat->phy_bus_name, priv->plat->bus_id);
345 else
346 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
347 priv->plat->bus_id);
348
311 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 349 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
312 priv->plat->phy_addr); 350 priv->plat->phy_addr);
313 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); 351 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
@@ -884,6 +922,24 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
884 priv->dev->dev_addr); 922 priv->dev->dev_addr);
885} 923}
886 924
925static int stmmac_init_dma_engine(struct stmmac_priv *priv)
926{
927 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
928
929 /* Some DMA parameters can be passed from the platform;
930 * in case of these are not passed we keep a default
931 * (good for all the chips) and init the DMA! */
932 if (priv->plat->dma_cfg) {
933 pbl = priv->plat->dma_cfg->pbl;
934 fixed_burst = priv->plat->dma_cfg->fixed_burst;
935 burst_len = priv->plat->dma_cfg->burst_len;
936 }
937
938 return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst,
939 burst_len, priv->dma_tx_phy,
940 priv->dma_rx_phy);
941}
942
887/** 943/**
888 * stmmac_open - open entry point of the driver 944 * stmmac_open - open entry point of the driver
889 * @dev : pointer to the device structure. 945 * @dev : pointer to the device structure.
@@ -898,16 +954,6 @@ static int stmmac_open(struct net_device *dev)
898 struct stmmac_priv *priv = netdev_priv(dev); 954 struct stmmac_priv *priv = netdev_priv(dev);
899 int ret; 955 int ret;
900 956
901 stmmac_check_ether_addr(priv);
902
903 /* MDIO bus Registration */
904 ret = stmmac_mdio_register(dev);
905 if (ret < 0) {
906 pr_debug("%s: MDIO bus (id: %d) registration failed",
907 __func__, priv->plat->bus_id);
908 return ret;
909 }
910
911#ifdef CONFIG_STMMAC_TIMER 957#ifdef CONFIG_STMMAC_TIMER
912 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); 958 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
913 if (unlikely(priv->tm == NULL)) 959 if (unlikely(priv->tm == NULL))
@@ -925,6 +971,10 @@ static int stmmac_open(struct net_device *dev)
925 } else 971 } else
926 priv->tm->enable = 1; 972 priv->tm->enable = 1;
927#endif 973#endif
974 stmmac_clk_enable(priv);
975
976 stmmac_check_ether_addr(priv);
977
928 ret = stmmac_init_phy(dev); 978 ret = stmmac_init_phy(dev);
929 if (unlikely(ret)) { 979 if (unlikely(ret)) {
930 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); 980 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
@@ -938,8 +988,7 @@ static int stmmac_open(struct net_device *dev)
938 init_dma_desc_rings(dev); 988 init_dma_desc_rings(dev);
939 989
940 /* DMA initialization and SW reset */ 990 /* DMA initialization and SW reset */
941 ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl, 991 ret = stmmac_init_dma_engine(priv);
942 priv->dma_tx_phy, priv->dma_rx_phy);
943 if (ret < 0) { 992 if (ret < 0) {
944 pr_err("%s: DMA initialization failed\n", __func__); 993 pr_err("%s: DMA initialization failed\n", __func__);
945 goto open_error; 994 goto open_error;
@@ -1026,6 +1075,8 @@ open_error:
1026 if (priv->phydev) 1075 if (priv->phydev)
1027 phy_disconnect(priv->phydev); 1076 phy_disconnect(priv->phydev);
1028 1077
1078 stmmac_clk_disable(priv);
1079
1029 return ret; 1080 return ret;
1030} 1081}
1031 1082
@@ -1077,7 +1128,7 @@ static int stmmac_release(struct net_device *dev)
1077#ifdef CONFIG_STMMAC_DEBUG_FS 1128#ifdef CONFIG_STMMAC_DEBUG_FS
1078 stmmac_exit_fs(); 1129 stmmac_exit_fs();
1079#endif 1130#endif
1080 stmmac_mdio_unregister(dev); 1131 stmmac_clk_disable(priv);
1081 1132
1082 return 0; 1133 return 0;
1083} 1134}
@@ -1276,7 +1327,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1276 struct sk_buff *skb; 1327 struct sk_buff *skb;
1277 int frame_len; 1328 int frame_len;
1278 1329
1279 frame_len = priv->hw->desc->get_rx_frame_len(p); 1330 frame_len = priv->hw->desc->get_rx_frame_len(p,
1331 priv->plat->rx_coe);
1280 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 1332 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
1281 * Type frames (LLC/LLC-SNAP) */ 1333 * Type frames (LLC/LLC-SNAP) */
1282 if (unlikely(status != llc_snap)) 1334 if (unlikely(status != llc_snap))
@@ -1312,7 +1364,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1312#endif 1364#endif
1313 skb->protocol = eth_type_trans(skb, priv->dev); 1365 skb->protocol = eth_type_trans(skb, priv->dev);
1314 1366
1315 if (unlikely(!priv->rx_coe)) { 1367 if (unlikely(!priv->plat->rx_coe)) {
1316 /* No RX COE for old mac10/100 devices */ 1368 /* No RX COE for old mac10/100 devices */
1317 skb_checksum_none_assert(skb); 1369 skb_checksum_none_assert(skb);
1318 netif_receive_skb(skb); 1370 netif_receive_skb(skb);
@@ -1459,8 +1511,10 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
1459{ 1511{
1460 struct stmmac_priv *priv = netdev_priv(dev); 1512 struct stmmac_priv *priv = netdev_priv(dev);
1461 1513
1462 if (!priv->rx_coe) 1514 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
1463 features &= ~NETIF_F_RXCSUM; 1515 features &= ~NETIF_F_RXCSUM;
1516 else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
1517 features &= ~NETIF_F_IPV6_CSUM;
1464 if (!priv->plat->tx_coe) 1518 if (!priv->plat->tx_coe)
1465 features &= ~NETIF_F_ALL_CSUM; 1519 features &= ~NETIF_F_ALL_CSUM;
1466 1520
@@ -1765,17 +1819,32 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1765 * register (if supported). 1819 * register (if supported).
1766 */ 1820 */
1767 priv->plat->enh_desc = priv->dma_cap.enh_desc; 1821 priv->plat->enh_desc = priv->dma_cap.enh_desc;
1768 priv->plat->tx_coe = priv->dma_cap.tx_coe;
1769 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 1822 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
1823
1824 priv->plat->tx_coe = priv->dma_cap.tx_coe;
1825
1826 if (priv->dma_cap.rx_coe_type2)
1827 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
1828 else if (priv->dma_cap.rx_coe_type1)
1829 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
1830
1770 } else 1831 } else
1771 pr_info(" No HW DMA feature register supported"); 1832 pr_info(" No HW DMA feature register supported");
1772 1833
1773 /* Select the enhnaced/normal descriptor structures */ 1834 /* Select the enhnaced/normal descriptor structures */
1774 stmmac_selec_desc_mode(priv); 1835 stmmac_selec_desc_mode(priv);
1775 1836
1776 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr); 1837 /* Enable the IPC (Checksum Offload) and check if the feature has been
1777 if (priv->rx_coe) 1838 * enabled during the core configuration. */
1778 pr_info(" RX Checksum Offload Engine supported\n"); 1839 ret = priv->hw->mac->rx_ipc(priv->ioaddr);
1840 if (!ret) {
1841 pr_warning(" RX IPC Checksum Offload not configured.\n");
1842 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1843 }
1844
1845 if (priv->plat->rx_coe)
1846 pr_info(" RX Checksum Offload Engine supported (type %d)\n",
1847 priv->plat->rx_coe);
1779 if (priv->plat->tx_coe) 1848 if (priv->plat->tx_coe)
1780 pr_info(" TX Checksum insertion supported\n"); 1849 pr_info(" TX Checksum insertion supported\n");
1781 1850
@@ -1856,6 +1925,28 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
1856 goto error; 1925 goto error;
1857 } 1926 }
1858 1927
1928 if (stmmac_clk_get(priv))
1929 pr_warning("%s: warning: cannot get CSR clock\n", __func__);
1930
1931 /* If a specific clk_csr value is passed from the platform
1932 * this means that the CSR Clock Range selection cannot be
1933 * changed at run-time and it is fixed. Viceversa the driver'll try to
1934 * set the MDC clock dynamically according to the csr actual
1935 * clock input.
1936 */
1937 if (!priv->plat->clk_csr)
1938 stmmac_clk_csr_set(priv);
1939 else
1940 priv->clk_csr = priv->plat->clk_csr;
1941
1942 /* MDIO bus Registration */
1943 ret = stmmac_mdio_register(ndev);
1944 if (ret < 0) {
1945 pr_debug("%s: MDIO bus (id: %d) registration failed",
1946 __func__, priv->plat->bus_id);
1947 goto error;
1948 }
1949
1859 return priv; 1950 return priv;
1860 1951
1861error: 1952error:
@@ -1883,6 +1974,7 @@ int stmmac_dvr_remove(struct net_device *ndev)
1883 priv->hw->dma->stop_tx(priv->ioaddr); 1974 priv->hw->dma->stop_tx(priv->ioaddr);
1884 1975
1885 stmmac_set_mac(priv->ioaddr, false); 1976 stmmac_set_mac(priv->ioaddr, false);
1977 stmmac_mdio_unregister(ndev);
1886 netif_carrier_off(ndev); 1978 netif_carrier_off(ndev);
1887 unregister_netdev(ndev); 1979 unregister_netdev(ndev);
1888 free_netdev(ndev); 1980 free_netdev(ndev);
@@ -1925,9 +2017,11 @@ int stmmac_suspend(struct net_device *ndev)
1925 /* Enable Power down mode by programming the PMT regs */ 2017 /* Enable Power down mode by programming the PMT regs */
1926 if (device_may_wakeup(priv->device)) 2018 if (device_may_wakeup(priv->device))
1927 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); 2019 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
1928 else 2020 else {
1929 stmmac_set_mac(priv->ioaddr, false); 2021 stmmac_set_mac(priv->ioaddr, false);
1930 2022 /* Disable clock in case of PWM is off */
2023 stmmac_clk_disable(priv);
2024 }
1931 spin_unlock(&priv->lock); 2025 spin_unlock(&priv->lock);
1932 return 0; 2026 return 0;
1933} 2027}
@@ -1948,6 +2042,9 @@ int stmmac_resume(struct net_device *ndev)
1948 * from another devices (e.g. serial console). */ 2042 * from another devices (e.g. serial console). */
1949 if (device_may_wakeup(priv->device)) 2043 if (device_may_wakeup(priv->device))
1950 priv->hw->mac->pmt(priv->ioaddr, 0); 2044 priv->hw->mac->pmt(priv->ioaddr, 0);
2045 else
2046 /* enable the clk prevously disabled */
2047 stmmac_clk_enable(priv);
1951 2048
1952 netif_device_attach(ndev); 2049 netif_device_attach(ndev);
1953 2050
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 73195329aa46..ade108232048 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -34,6 +34,22 @@
34#define MII_BUSY 0x00000001 34#define MII_BUSY 0x00000001
35#define MII_WRITE 0x00000002 35#define MII_WRITE 0x00000002
36 36
37static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
38{
39 unsigned long curr;
40 unsigned long finish = jiffies + 3 * HZ;
41
42 do {
43 curr = jiffies;
44 if (readl(ioaddr + mii_addr) & MII_BUSY)
45 cpu_relax();
46 else
47 return 0;
48 } while (!time_after_eq(curr, finish));
49
50 return -EBUSY;
51}
52
37/** 53/**
38 * stmmac_mdio_read 54 * stmmac_mdio_read
39 * @bus: points to the mii_bus structure 55 * @bus: points to the mii_bus structure
@@ -54,11 +70,15 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
54 int data; 70 int data;
55 u16 regValue = (((phyaddr << 11) & (0x0000F800)) | 71 u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
56 ((phyreg << 6) & (0x000007C0))); 72 ((phyreg << 6) & (0x000007C0)));
57 regValue |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2); 73 regValue |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
74
75 if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
76 return -EBUSY;
58 77
59 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
60 writel(regValue, priv->ioaddr + mii_address); 78 writel(regValue, priv->ioaddr + mii_address);
61 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); 79
80 if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
81 return -EBUSY;
62 82
63 /* Read the data from the MII data register */ 83 /* Read the data from the MII data register */
64 data = (int)readl(priv->ioaddr + mii_data); 84 data = (int)readl(priv->ioaddr + mii_data);
@@ -86,20 +106,18 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
86 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) 106 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
87 | MII_WRITE; 107 | MII_WRITE;
88 108
89 value |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2); 109 value |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
90
91 110
92 /* Wait until any existing MII operation is complete */ 111 /* Wait until any existing MII operation is complete */
93 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); 112 if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
113 return -EBUSY;
94 114
95 /* Set the MII address register to write */ 115 /* Set the MII address register to write */
96 writel(phydata, priv->ioaddr + mii_data); 116 writel(phydata, priv->ioaddr + mii_data);
97 writel(value, priv->ioaddr + mii_address); 117 writel(value, priv->ioaddr + mii_address);
98 118
99 /* Wait until any existing MII operation is complete */ 119 /* Wait until any existing MII operation is complete */
100 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); 120 return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
101
102 return 0;
103} 121}
104 122
105/** 123/**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index da66ed7c3c5d..58fab5303e9c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -28,6 +28,7 @@
28 28
29struct plat_stmmacenet_data plat_dat; 29struct plat_stmmacenet_data plat_dat;
30struct stmmac_mdio_bus_data mdio_data; 30struct stmmac_mdio_bus_data mdio_data;
31struct stmmac_dma_cfg dma_cfg;
31 32
32static void stmmac_default_data(void) 33static void stmmac_default_data(void)
33{ 34{
@@ -35,7 +36,6 @@ static void stmmac_default_data(void)
35 plat_dat.bus_id = 1; 36 plat_dat.bus_id = 1;
36 plat_dat.phy_addr = 0; 37 plat_dat.phy_addr = 0;
37 plat_dat.interface = PHY_INTERFACE_MODE_GMII; 38 plat_dat.interface = PHY_INTERFACE_MODE_GMII;
38 plat_dat.pbl = 32;
39 plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ 39 plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
40 plat_dat.has_gmac = 1; 40 plat_dat.has_gmac = 1;
41 plat_dat.force_sf_dma_mode = 1; 41 plat_dat.force_sf_dma_mode = 1;
@@ -44,6 +44,10 @@ static void stmmac_default_data(void)
44 mdio_data.phy_reset = NULL; 44 mdio_data.phy_reset = NULL;
45 mdio_data.phy_mask = 0; 45 mdio_data.phy_mask = 0;
46 plat_dat.mdio_bus_data = &mdio_data; 46 plat_dat.mdio_bus_data = &mdio_data;
47
48 dma_cfg.pbl = 32;
49 dma_cfg.burst_len = DMA_AXI_BLEN_256;
50 plat_dat.dma_cfg = &dma_cfg;
47} 51}
48 52
49/** 53/**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 116529a366b2..3dd8f0803808 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -50,7 +50,6 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
50 * once needed on other platforms. 50 * once needed on other platforms.
51 */ 51 */
52 if (of_device_is_compatible(np, "st,spear600-gmac")) { 52 if (of_device_is_compatible(np, "st,spear600-gmac")) {
53 plat->pbl = 8;
54 plat->has_gmac = 1; 53 plat->has_gmac = 1;
55 plat->pmt = 1; 54 plat->pmt = 1;
56 } 55 }
@@ -189,9 +188,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
189 if (priv->plat->exit) 188 if (priv->plat->exit)
190 priv->plat->exit(pdev); 189 priv->plat->exit(pdev);
191 190
192 if (priv->plat->exit)
193 priv->plat->exit(pdev);
194
195 platform_set_drvdata(pdev, NULL); 191 platform_set_drvdata(pdev, NULL);
196 192
197 iounmap((void *)priv->ioaddr); 193 iounmap((void *)priv->ioaddr);
@@ -218,14 +214,26 @@ static int stmmac_pltfr_resume(struct device *dev)
218 214
219int stmmac_pltfr_freeze(struct device *dev) 215int stmmac_pltfr_freeze(struct device *dev)
220{ 216{
217 int ret;
218 struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
221 struct net_device *ndev = dev_get_drvdata(dev); 219 struct net_device *ndev = dev_get_drvdata(dev);
220 struct platform_device *pdev = to_platform_device(dev);
222 221
223 return stmmac_freeze(ndev); 222 ret = stmmac_freeze(ndev);
223 if (plat_dat->exit)
224 plat_dat->exit(pdev);
225
226 return ret;
224} 227}
225 228
226int stmmac_pltfr_restore(struct device *dev) 229int stmmac_pltfr_restore(struct device *dev)
227{ 230{
231 struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
228 struct net_device *ndev = dev_get_drvdata(dev); 232 struct net_device *ndev = dev_get_drvdata(dev);
233 struct platform_device *pdev = to_platform_device(dev);
234
235 if (plat_dat->init)
236 plat_dat->init(pdev);
229 237
230 return stmmac_restore(ndev); 238 return stmmac_restore(ndev);
231} 239}
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 558409ff4058..dc065face7ac 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -401,7 +401,7 @@ static int gem_rxmac_reset(struct gem *gp)
401 return 1; 401 return 1;
402 } 402 }
403 403
404 udelay(5000); 404 mdelay(5);
405 405
406 /* Execute RX reset command. */ 406 /* Execute RX reset command. */
407 writel(gp->swrst_base | GREG_SWRST_RXRST, 407 writel(gp->swrst_base | GREG_SWRST_RXRST,
@@ -2898,7 +2898,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
2898 } 2898 }
2899 2899
2900 gp->pdev = pdev; 2900 gp->pdev = pdev;
2901 dev->base_addr = (long) pdev;
2902 gp->dev = dev; 2901 gp->dev = dev;
2903 2902
2904 gp->msg_enable = DEFAULT_MSG; 2903 gp->msg_enable = DEFAULT_MSG;
@@ -2972,7 +2971,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
2972 netif_napi_add(dev, &gp->napi, gem_poll, 64); 2971 netif_napi_add(dev, &gp->napi, gem_poll, 64);
2973 dev->ethtool_ops = &gem_ethtool_ops; 2972 dev->ethtool_ops = &gem_ethtool_ops;
2974 dev->watchdog_timeo = 5 * HZ; 2973 dev->watchdog_timeo = 5 * HZ;
2975 dev->irq = pdev->irq;
2976 dev->dma = 0; 2974 dev->dma = 0;
2977 2975
2978 /* Set that now, in case PM kicks in now */ 2976 /* Set that now, in case PM kicks in now */
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index b95e7e681b38..dfc00c4683e5 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2182,11 +2182,12 @@ static int happy_meal_open(struct net_device *dev)
2182 * into a single source which we register handling at probe time. 2182 * into a single source which we register handling at probe time.
2183 */ 2183 */
2184 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) { 2184 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
2185 if (request_irq(dev->irq, happy_meal_interrupt, 2185 res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
2186 IRQF_SHARED, dev->name, (void *)dev)) { 2186 dev->name, dev);
2187 if (res) {
2187 HMD(("EAGAIN\n")); 2188 HMD(("EAGAIN\n"));
2188 printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n", 2189 printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
2189 dev->irq); 2190 hp->irq);
2190 2191
2191 return -EAGAIN; 2192 return -EAGAIN;
2192 } 2193 }
@@ -2199,7 +2200,7 @@ static int happy_meal_open(struct net_device *dev)
2199 spin_unlock_irq(&hp->happy_lock); 2200 spin_unlock_irq(&hp->happy_lock);
2200 2201
2201 if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)) 2202 if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
2202 free_irq(dev->irq, dev); 2203 free_irq(hp->irq, dev);
2203 return res; 2204 return res;
2204} 2205}
2205 2206
@@ -2221,7 +2222,7 @@ static int happy_meal_close(struct net_device *dev)
2221 * time and never unregister. 2222 * time and never unregister.
2222 */ 2223 */
2223 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) 2224 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
2224 free_irq(dev->irq, dev); 2225 free_irq(hp->irq, dev);
2225 2226
2226 return 0; 2227 return 0;
2227} 2228}
@@ -2777,7 +2778,7 @@ static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int i
2777 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; 2778 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2778 dev->features |= dev->hw_features | NETIF_F_RXCSUM; 2779 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2779 2780
2780 dev->irq = op->archdata.irqs[0]; 2781 hp->irq = op->archdata.irqs[0];
2781 2782
2782#if defined(CONFIG_SBUS) && defined(CONFIG_PCI) 2783#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
2783 /* Hook up SBUS register/descriptor accessors. */ 2784 /* Hook up SBUS register/descriptor accessors. */
@@ -2981,8 +2982,6 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
2981 if (hme_version_printed++ == 0) 2982 if (hme_version_printed++ == 0)
2982 printk(KERN_INFO "%s", version); 2983 printk(KERN_INFO "%s", version);
2983 2984
2984 dev->base_addr = (long) pdev;
2985
2986 hp = netdev_priv(dev); 2985 hp = netdev_priv(dev);
2987 2986
2988 hp->happy_dev = pdev; 2987 hp->happy_dev = pdev;
@@ -3087,12 +3086,11 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
3087 3086
3088 init_timer(&hp->happy_timer); 3087 init_timer(&hp->happy_timer);
3089 3088
3089 hp->irq = pdev->irq;
3090 hp->dev = dev; 3090 hp->dev = dev;
3091 dev->netdev_ops = &hme_netdev_ops; 3091 dev->netdev_ops = &hme_netdev_ops;
3092 dev->watchdog_timeo = 5*HZ; 3092 dev->watchdog_timeo = 5*HZ;
3093 dev->ethtool_ops = &hme_ethtool_ops; 3093 dev->ethtool_ops = &hme_ethtool_ops;
3094 dev->irq = pdev->irq;
3095 dev->dma = 0;
3096 3094
3097 /* Happy Meal can do it all... */ 3095 /* Happy Meal can do it all... */
3098 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; 3096 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h
index 64f278360d89..f4307654e4ae 100644
--- a/drivers/net/ethernet/sun/sunhme.h
+++ b/drivers/net/ethernet/sun/sunhme.h
@@ -432,6 +432,7 @@ struct happy_meal {
432 432
433 dma_addr_t hblock_dvma; /* DVMA visible address happy block */ 433 dma_addr_t hblock_dvma; /* DVMA visible address happy block */
434 unsigned int happy_flags; /* Driver state flags */ 434 unsigned int happy_flags; /* Driver state flags */
435 int irq;
435 enum happy_transceiver tcvr_type; /* Kind of transceiver in use */ 436 enum happy_transceiver tcvr_type; /* Kind of transceiver in use */
436 unsigned int happy_bursts; /* Get your mind out of the gutter */ 437 unsigned int happy_bursts; /* Get your mind out of the gutter */
437 unsigned int paddr; /* PHY address for transceiver */ 438 unsigned int paddr; /* PHY address for transceiver */
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index ad973ffc9ff3..8846516678c3 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1317,7 +1317,7 @@ static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
1317 1317
1318static void print_rxfd(struct rxf_desc *rxfd) 1318static void print_rxfd(struct rxf_desc *rxfd)
1319{ 1319{
1320 DBG("=== RxF desc CHIP ORDER/ENDIANESS =============\n" 1320 DBG("=== RxF desc CHIP ORDER/ENDIANNESS =============\n"
1321 "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n", 1321 "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n",
1322 rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len); 1322 rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len);
1323} 1323}
@@ -1988,10 +1988,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1988 /* these fields are used for info purposes only 1988 /* these fields are used for info purposes only
1989 * so we can have them same for all ports of the board */ 1989 * so we can have them same for all ports of the board */
1990 ndev->if_port = port; 1990 ndev->if_port = port;
1991 ndev->base_addr = pciaddr;
1992 ndev->mem_start = pciaddr;
1993 ndev->mem_end = pciaddr + regionSize;
1994 ndev->irq = pdev->irq;
1995 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO 1991 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
1996 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 1992 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
1997 NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM 1993 NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 34558766cbf0..d614c374ed9d 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -92,7 +92,7 @@ enum cpdma_state {
92 CPDMA_STATE_TEARDOWN, 92 CPDMA_STATE_TEARDOWN,
93}; 93};
94 94
95const char *cpdma_state_str[] = { "idle", "active", "teardown" }; 95static const char *cpdma_state_str[] = { "idle", "active", "teardown" };
96 96
97struct cpdma_ctlr { 97struct cpdma_ctlr {
98 enum cpdma_state state; 98 enum cpdma_state state;
@@ -276,6 +276,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
276 ctlr->num_chan = CPDMA_MAX_CHANNELS; 276 ctlr->num_chan = CPDMA_MAX_CHANNELS;
277 return ctlr; 277 return ctlr;
278} 278}
279EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
279 280
280int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) 281int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
281{ 282{
@@ -321,6 +322,7 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
321 spin_unlock_irqrestore(&ctlr->lock, flags); 322 spin_unlock_irqrestore(&ctlr->lock, flags);
322 return 0; 323 return 0;
323} 324}
325EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
324 326
325int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) 327int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
326{ 328{
@@ -351,6 +353,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
351 spin_unlock_irqrestore(&ctlr->lock, flags); 353 spin_unlock_irqrestore(&ctlr->lock, flags);
352 return 0; 354 return 0;
353} 355}
356EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
354 357
355int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) 358int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
356{ 359{
@@ -421,6 +424,7 @@ int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
421 spin_unlock_irqrestore(&ctlr->lock, flags); 424 spin_unlock_irqrestore(&ctlr->lock, flags);
422 return 0; 425 return 0;
423} 426}
427EXPORT_SYMBOL_GPL(cpdma_ctlr_dump);
424 428
425int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) 429int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
426{ 430{
@@ -444,6 +448,7 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
444 kfree(ctlr); 448 kfree(ctlr);
445 return ret; 449 return ret;
446} 450}
451EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
447 452
448int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) 453int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
449{ 454{
@@ -528,6 +533,7 @@ err_chan_busy:
528err_chan_alloc: 533err_chan_alloc:
529 return ERR_PTR(ret); 534 return ERR_PTR(ret);
530} 535}
536EXPORT_SYMBOL_GPL(cpdma_chan_create);
531 537
532int cpdma_chan_destroy(struct cpdma_chan *chan) 538int cpdma_chan_destroy(struct cpdma_chan *chan)
533{ 539{
@@ -545,6 +551,7 @@ int cpdma_chan_destroy(struct cpdma_chan *chan)
545 kfree(chan); 551 kfree(chan);
546 return 0; 552 return 0;
547} 553}
554EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
548 555
549int cpdma_chan_get_stats(struct cpdma_chan *chan, 556int cpdma_chan_get_stats(struct cpdma_chan *chan,
550 struct cpdma_chan_stats *stats) 557 struct cpdma_chan_stats *stats)
@@ -693,6 +700,7 @@ unlock_ret:
693 spin_unlock_irqrestore(&chan->lock, flags); 700 spin_unlock_irqrestore(&chan->lock, flags);
694 return ret; 701 return ret;
695} 702}
703EXPORT_SYMBOL_GPL(cpdma_chan_submit);
696 704
697static void __cpdma_chan_free(struct cpdma_chan *chan, 705static void __cpdma_chan_free(struct cpdma_chan *chan,
698 struct cpdma_desc __iomem *desc, 706 struct cpdma_desc __iomem *desc,
@@ -776,6 +784,7 @@ int cpdma_chan_process(struct cpdma_chan *chan, int quota)
776 } 784 }
777 return used; 785 return used;
778} 786}
787EXPORT_SYMBOL_GPL(cpdma_chan_process);
779 788
780int cpdma_chan_start(struct cpdma_chan *chan) 789int cpdma_chan_start(struct cpdma_chan *chan)
781{ 790{
@@ -803,6 +812,7 @@ int cpdma_chan_start(struct cpdma_chan *chan)
803 spin_unlock_irqrestore(&chan->lock, flags); 812 spin_unlock_irqrestore(&chan->lock, flags);
804 return 0; 813 return 0;
805} 814}
815EXPORT_SYMBOL_GPL(cpdma_chan_start);
806 816
807int cpdma_chan_stop(struct cpdma_chan *chan) 817int cpdma_chan_stop(struct cpdma_chan *chan)
808{ 818{
@@ -863,6 +873,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
863 spin_unlock_irqrestore(&chan->lock, flags); 873 spin_unlock_irqrestore(&chan->lock, flags);
864 return 0; 874 return 0;
865} 875}
876EXPORT_SYMBOL_GPL(cpdma_chan_stop);
866 877
867int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) 878int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
868{ 879{
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 174a3348f676..8aa33326bec3 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -627,6 +627,7 @@ static const struct ethtool_ops ethtool_ops = {
627 .get_link = ethtool_op_get_link, 627 .get_link = ethtool_op_get_link,
628 .get_coalesce = emac_get_coalesce, 628 .get_coalesce = emac_get_coalesce,
629 .set_coalesce = emac_set_coalesce, 629 .set_coalesce = emac_set_coalesce,
630 .get_ts_info = ethtool_op_get_ts_info,
630}; 631};
631 632
632/** 633/**
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 2757c7d6e633..e4e47088e26b 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -181,6 +181,11 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
181 __davinci_mdio_reset(data); 181 __davinci_mdio_reset(data);
182 return -EAGAIN; 182 return -EAGAIN;
183 } 183 }
184
185 reg = __raw_readl(&regs->user[0].access);
186 if ((reg & USERACCESS_GO) == 0)
187 return 0;
188
184 dev_err(data->dev, "timed out waiting for user access\n"); 189 dev_err(data->dev, "timed out waiting for user access\n");
185 return -ETIMEDOUT; 190 return -ETIMEDOUT;
186} 191}
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 817ad3bc4957..bb8b802a328b 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -2545,7 +2545,7 @@ static void tlan_phy_reset(struct net_device *dev)
2545 2545
2546 phy = priv->phy[priv->phy_num]; 2546 phy = priv->phy[priv->phy_num];
2547 2547
2548 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name); 2548 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Resetting PHY.\n", dev->name);
2549 tlan_mii_sync(dev->base_addr); 2549 tlan_mii_sync(dev->base_addr);
2550 value = MII_GC_LOOPBK | MII_GC_RESET; 2550 value = MII_GC_LOOPBK | MII_GC_RESET;
2551 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value); 2551 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 261356c2dc99..3d501ec7fad7 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -342,6 +342,21 @@ inline int __netio_fastio1(u32 fastio_index, u32 arg0)
342} 342}
343 343
344 344
345static void tile_net_return_credit(struct tile_net_cpu *info)
346{
347 struct tile_netio_queue *queue = &info->queue;
348 netio_queue_user_impl_t *qup = &queue->__user_part;
349
350 /* Return four credits after every fourth packet. */
351 if (--qup->__receive_credit_remaining == 0) {
352 u32 interval = qup->__receive_credit_interval;
353 qup->__receive_credit_remaining = interval;
354 __netio_fastio_return_credits(qup->__fastio_index, interval);
355 }
356}
357
358
359
345/* 360/*
346 * Provide a linux buffer to LIPP. 361 * Provide a linux buffer to LIPP.
347 */ 362 */
@@ -433,7 +448,7 @@ static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
433 struct sk_buff **skb_ptr; 448 struct sk_buff **skb_ptr;
434 449
435 /* Request 96 extra bytes for alignment purposes. */ 450 /* Request 96 extra bytes for alignment purposes. */
436 skb = netdev_alloc_skb(info->napi->dev, len + padding); 451 skb = netdev_alloc_skb(info->napi.dev, len + padding);
437 if (skb == NULL) 452 if (skb == NULL)
438 return false; 453 return false;
439 454
@@ -864,19 +879,11 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
864 879
865 stats->rx_packets++; 880 stats->rx_packets++;
866 stats->rx_bytes += len; 881 stats->rx_bytes += len;
867
868 if (small)
869 info->num_needed_small_buffers++;
870 else
871 info->num_needed_large_buffers++;
872 } 882 }
873 883
874 /* Return four credits after every fourth packet. */ 884 /* ISSUE: It would be nice to defer this until the packet has */
875 if (--qup->__receive_credit_remaining == 0) { 885 /* actually been processed. */
876 u32 interval = qup->__receive_credit_interval; 886 tile_net_return_credit(info);
877 qup->__receive_credit_remaining = interval;
878 __netio_fastio_return_credits(qup->__fastio_index, interval);
879 }
880 887
881 /* Consume this packet. */ 888 /* Consume this packet. */
882 qup->__packet_receive_read = index2; 889 qup->__packet_receive_read = index2;
@@ -1543,7 +1550,7 @@ static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv)
1543 1550
1544 /* Drain all the LIPP buffers. */ 1551 /* Drain all the LIPP buffers. */
1545 while (true) { 1552 while (true) {
1546 int buffer; 1553 unsigned int buffer;
1547 1554
1548 /* NOTE: This should never fail. */ 1555 /* NOTE: This should never fail. */
1549 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer, 1556 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer,
@@ -1707,7 +1714,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
1707 if (!hash_default) { 1714 if (!hash_default) {
1708 void *va = pfn_to_kaddr(pfn) + f->page_offset; 1715 void *va = pfn_to_kaddr(pfn) + f->page_offset;
1709 BUG_ON(PageHighMem(skb_frag_page(f))); 1716 BUG_ON(PageHighMem(skb_frag_page(f)));
1710 finv_buffer_remote(va, f->size, 0); 1717 finv_buffer_remote(va, skb_frag_size(f), 0);
1711 } 1718 }
1712 1719
1713 cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; 1720 cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
@@ -1735,8 +1742,8 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
1735 * Sometimes, if "sendfile()" requires copying, we will be called with 1742 * Sometimes, if "sendfile()" requires copying, we will be called with
1736 * "data" containing the header and payload, with "frags" being empty. 1743 * "data" containing the header and payload, with "frags" being empty.
1737 * 1744 *
1738 * In theory, "sh->nr_frags" could be 3, but in practice, it seems 1745 * Sometimes, for example when using NFS over TCP, a single segment can
1739 * that this will never actually happen. 1746 * span 3 fragments, which must be handled carefully in LEPP.
1740 * 1747 *
1741 * See "emulate_large_send_offload()" for some reference code, which 1748 * See "emulate_large_send_offload()" for some reference code, which
1742 * does not handle checksumming. 1749 * does not handle checksumming.
@@ -1844,10 +1851,8 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
1844 1851
1845 spin_lock_irqsave(&priv->eq_lock, irqflags); 1852 spin_lock_irqsave(&priv->eq_lock, irqflags);
1846 1853
1847 /* 1854 /* Handle completions if needed to make room. */
1848 * Handle completions if needed to make room. 1855 /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */
1849 * HACK: Spin until there is sufficient room.
1850 */
1851 if (lepp_num_free_comp_slots(eq) == 0) { 1856 if (lepp_num_free_comp_slots(eq) == 0) {
1852 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); 1857 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
1853 if (nolds == 0) { 1858 if (nolds == 0) {
@@ -1861,6 +1866,7 @@ busy:
1861 cmd_tail = eq->cmd_tail; 1866 cmd_tail = eq->cmd_tail;
1862 1867
1863 /* Prepare to advance, detecting full queue. */ 1868 /* Prepare to advance, detecting full queue. */
1869 /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */
1864 cmd_next = cmd_tail + cmd_size; 1870 cmd_next = cmd_tail + cmd_size;
1865 if (cmd_tail < cmd_head && cmd_next >= cmd_head) 1871 if (cmd_tail < cmd_head && cmd_next >= cmd_head)
1866 goto busy; 1872 goto busy;
@@ -2023,10 +2029,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
2023 2029
2024 spin_lock_irqsave(&priv->eq_lock, irqflags); 2030 spin_lock_irqsave(&priv->eq_lock, irqflags);
2025 2031
2026 /* 2032 /* Handle completions if needed to make room. */
2027 * Handle completions if needed to make room. 2033 /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */
2028 * HACK: Spin until there is sufficient room.
2029 */
2030 if (lepp_num_free_comp_slots(eq) == 0) { 2034 if (lepp_num_free_comp_slots(eq) == 0) {
2031 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); 2035 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
2032 if (nolds == 0) { 2036 if (nolds == 0) {
@@ -2040,6 +2044,7 @@ busy:
2040 cmd_tail = eq->cmd_tail; 2044 cmd_tail = eq->cmd_tail;
2041 2045
2042 /* Copy the commands, or fail. */ 2046 /* Copy the commands, or fail. */
2047 /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */
2043 for (i = 0; i < num_frags; i++) { 2048 for (i = 0; i < num_frags; i++) {
2044 2049
2045 /* Prepare to advance, detecting full queue. */ 2050 /* Prepare to advance, detecting full queue. */
@@ -2261,6 +2266,23 @@ static int tile_net_get_mac(struct net_device *dev)
2261 return 0; 2266 return 0;
2262} 2267}
2263 2268
2269
2270#ifdef CONFIG_NET_POLL_CONTROLLER
2271/*
2272 * Polling 'interrupt' - used by things like netconsole to send skbs
2273 * without having to re-enable interrupts. It's not called while
2274 * the interrupt routine is executing.
2275 */
2276static void tile_net_netpoll(struct net_device *dev)
2277{
2278 struct tile_net_priv *priv = netdev_priv(dev);
2279 disable_percpu_irq(priv->intr_id);
2280 tile_net_handle_ingress_interrupt(priv->intr_id, dev);
2281 enable_percpu_irq(priv->intr_id, 0);
2282}
2283#endif
2284
2285
2264static const struct net_device_ops tile_net_ops = { 2286static const struct net_device_ops tile_net_ops = {
2265 .ndo_open = tile_net_open, 2287 .ndo_open = tile_net_open,
2266 .ndo_stop = tile_net_stop, 2288 .ndo_stop = tile_net_stop,
@@ -2269,7 +2291,10 @@ static const struct net_device_ops tile_net_ops = {
2269 .ndo_get_stats = tile_net_get_stats, 2291 .ndo_get_stats = tile_net_get_stats,
2270 .ndo_change_mtu = tile_net_change_mtu, 2292 .ndo_change_mtu = tile_net_change_mtu,
2271 .ndo_tx_timeout = tile_net_tx_timeout, 2293 .ndo_tx_timeout = tile_net_tx_timeout,
2272 .ndo_set_mac_address = tile_net_set_mac_address 2294 .ndo_set_mac_address = tile_net_set_mac_address,
2295#ifdef CONFIG_NET_POLL_CONTROLLER
2296 .ndo_poll_controller = tile_net_netpoll,
2297#endif
2273}; 2298};
2274 2299
2275 2300
@@ -2409,7 +2434,7 @@ static void tile_net_cleanup(void)
2409 */ 2434 */
2410static int tile_net_init_module(void) 2435static int tile_net_init_module(void)
2411{ 2436{
2412 pr_info("Tilera IPP Net Driver\n"); 2437 pr_info("Tilera Network Driver\n");
2413 2438
2414 tile_net_devs[0] = tile_net_dev_init("xgbe0"); 2439 tile_net_devs[0] = tile_net_dev_init("xgbe0");
2415 tile_net_devs[1] = tile_net_dev_init("xgbe1"); 2440 tile_net_devs[1] = tile_net_dev_init("xgbe1");
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index fcfa01f7ceb6..0459c096629f 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -689,9 +689,12 @@ static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
689#ifdef CONFIG_NET_POLL_CONTROLLER 689#ifdef CONFIG_NET_POLL_CONTROLLER
690static void rhine_poll(struct net_device *dev) 690static void rhine_poll(struct net_device *dev)
691{ 691{
692 disable_irq(dev->irq); 692 struct rhine_private *rp = netdev_priv(dev);
693 rhine_interrupt(dev->irq, (void *)dev); 693 const int irq = rp->pdev->irq;
694 enable_irq(dev->irq); 694
695 disable_irq(irq);
696 rhine_interrupt(irq, dev);
697 enable_irq(irq);
695} 698}
696#endif 699#endif
697 700
@@ -972,7 +975,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
972 } 975 }
973#endif /* USE_MMIO */ 976#endif /* USE_MMIO */
974 977
975 dev->base_addr = (unsigned long)ioaddr;
976 rp->base = ioaddr; 978 rp->base = ioaddr;
977 979
978 /* Get chip registers into a sane state */ 980 /* Get chip registers into a sane state */
@@ -995,8 +997,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
995 if (!phy_id) 997 if (!phy_id)
996 phy_id = ioread8(ioaddr + 0x6C); 998 phy_id = ioread8(ioaddr + 0x6C);
997 999
998 dev->irq = pdev->irq;
999
1000 spin_lock_init(&rp->lock); 1000 spin_lock_init(&rp->lock);
1001 mutex_init(&rp->task_lock); 1001 mutex_init(&rp->task_lock);
1002 INIT_WORK(&rp->reset_task, rhine_reset_task); 1002 INIT_WORK(&rp->reset_task, rhine_reset_task);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 8a5d7c100a5e..ea3e0a21ba74 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2488,8 +2488,8 @@ static int velocity_close(struct net_device *dev)
2488 2488
2489 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) 2489 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2490 velocity_get_ip(vptr); 2490 velocity_get_ip(vptr);
2491 if (dev->irq != 0) 2491
2492 free_irq(dev->irq, dev); 2492 free_irq(vptr->pdev->irq, dev);
2493 2493
2494 velocity_free_rings(vptr); 2494 velocity_free_rings(vptr);
2495 2495
@@ -2755,8 +2755,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2755 if (ret < 0) 2755 if (ret < 0)
2756 goto err_free_dev; 2756 goto err_free_dev;
2757 2757
2758 dev->irq = pdev->irq;
2759
2760 ret = velocity_get_pci_info(vptr, pdev); 2758 ret = velocity_get_pci_info(vptr, pdev);
2761 if (ret < 0) { 2759 if (ret < 0) {
2762 /* error message already printed */ 2760 /* error message already printed */
@@ -2779,8 +2777,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2779 2777
2780 mac_wol_reset(regs); 2778 mac_wol_reset(regs);
2781 2779
2782 dev->base_addr = vptr->ioaddr;
2783
2784 for (i = 0; i < 6; i++) 2780 for (i = 0; i < 6; i++)
2785 dev->dev_addr[i] = readb(&regs->PAR[i]); 2781 dev->dev_addr[i] = readb(&regs->PAR[i]);
2786 2782
@@ -2806,7 +2802,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2806 2802
2807 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); 2803 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2808 2804
2809 dev->irq = pdev->irq;
2810 dev->netdev_ops = &velocity_netdev_ops; 2805 dev->netdev_ops = &velocity_netdev_ops;
2811 dev->ethtool_ops = &velocity_ethtool_ops; 2806 dev->ethtool_ops = &velocity_ethtool_ops;
2812 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); 2807 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig
new file mode 100644
index 000000000000..cb18043f5830
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/Kconfig
@@ -0,0 +1,73 @@
1#
2# WIZnet devices configuration
3#
4
5config NET_VENDOR_WIZNET
6 bool "WIZnet devices"
7 default y
8 ---help---
9 If you have a network (Ethernet) card belonging to this class, say Y
10 and read the Ethernet-HOWTO, available from
11 <http://www.tldp.org/docs.html#howto>.
12
13 Note that the answer to this question doesn't directly affect the
14 kernel: saying N will just cause the configurator to skip all
15 the questions about WIZnet devices. If you say Y, you will be asked
16 for your specific card in the following questions.
17
18if NET_VENDOR_WIZNET
19
20config WIZNET_W5100
21 tristate "WIZnet W5100 Ethernet support"
22 depends on HAS_IOMEM
23 ---help---
24 Support for WIZnet W5100 chips.
25
26 W5100 is a single chip with integrated 10/100 Ethernet MAC,
27 PHY and hardware TCP/IP stack, but this driver is limited to
28 the MAC and PHY functions only, onchip TCP/IP is unused.
29
30 To compile this driver as a module, choose M here: the module
31 will be called w5100.
32
33config WIZNET_W5300
34 tristate "WIZnet W5300 Ethernet support"
35 depends on HAS_IOMEM
36 ---help---
37 Support for WIZnet W5300 chips.
38
39 W5300 is a single chip with integrated 10/100 Ethernet MAC,
40 PHY and hardware TCP/IP stack, but this driver is limited to
41 the MAC and PHY functions only, onchip TCP/IP is unused.
42
43 To compile this driver as a module, choose M here: the module
44 will be called w5300.
45
46choice
47 prompt "WIZnet interface mode"
48 depends on WIZNET_W5100 || WIZNET_W5300
49 default WIZNET_BUS_ANY
50
51config WIZNET_BUS_DIRECT
52 bool "Direct address bus mode"
53 ---help---
54 In direct address mode host system can directly access all registers
55 after mapping to Memory-Mapped I/O space.
56
57config WIZNET_BUS_INDIRECT
58 bool "Indirect address bus mode"
59 ---help---
60 In indirect address mode host system indirectly accesses registers
61 using Indirect Mode Address Register and Indirect Mode Data Register,
62 which are directly mapped to Memory-Mapped I/O space.
63
64config WIZNET_BUS_ANY
65 bool "Select interface mode in runtime"
66 ---help---
67 If interface mode is unknown in compile time, it can be selected
68 in runtime from board/platform resources configuration.
69
70 Performance may decrease compared to explicitly selected bus mode.
71endchoice
72
73endif # NET_VENDOR_WIZNET
diff --git a/drivers/net/ethernet/wiznet/Makefile b/drivers/net/ethernet/wiznet/Makefile
new file mode 100644
index 000000000000..c614535227e8
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_WIZNET_W5100) += w5100.o
2obj-$(CONFIG_WIZNET_W5300) += w5300.o
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
new file mode 100644
index 000000000000..a75e9ef5a4ce
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -0,0 +1,808 @@
1/*
2 * Ethernet driver for the WIZnet W5100 chip.
3 *
4 * Copyright (C) 2006-2008 WIZnet Co.,Ltd.
5 * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
6 *
7 * Licensed under the GPL-2 or later.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/kconfig.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/platform_device.h>
16#include <linux/platform_data/wiznet.h>
17#include <linux/ethtool.h>
18#include <linux/skbuff.h>
19#include <linux/types.h>
20#include <linux/errno.h>
21#include <linux/delay.h>
22#include <linux/slab.h>
23#include <linux/spinlock.h>
24#include <linux/io.h>
25#include <linux/ioport.h>
26#include <linux/interrupt.h>
27#include <linux/irq.h>
28#include <linux/gpio.h>
29
30#define DRV_NAME "w5100"
31#define DRV_VERSION "2012-04-04"
32
33MODULE_DESCRIPTION("WIZnet W5100 Ethernet driver v"DRV_VERSION);
34MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>");
35MODULE_ALIAS("platform:"DRV_NAME);
36MODULE_LICENSE("GPL");
37
38/*
39 * Registers
40 */
41#define W5100_COMMON_REGS 0x0000
42#define W5100_MR 0x0000 /* Mode Register */
43#define MR_RST 0x80 /* S/W reset */
44#define MR_PB 0x10 /* Ping block */
45#define MR_AI 0x02 /* Address Auto-Increment */
46#define MR_IND 0x01 /* Indirect mode */
47#define W5100_SHAR 0x0009 /* Source MAC address */
48#define W5100_IR 0x0015 /* Interrupt Register */
49#define W5100_IMR 0x0016 /* Interrupt Mask Register */
50#define IR_S0 0x01 /* S0 interrupt */
51#define W5100_RTR 0x0017 /* Retry Time-value Register */
52#define RTR_DEFAULT 2000 /* =0x07d0 (2000) */
53#define W5100_RMSR 0x001a /* Receive Memory Size */
54#define W5100_TMSR 0x001b /* Transmit Memory Size */
55#define W5100_COMMON_REGS_LEN 0x0040
56
57#define W5100_S0_REGS 0x0400
58#define W5100_S0_MR 0x0400 /* S0 Mode Register */
59#define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscous) */
60#define S0_MR_MACRAW_MF 0x44 /* MAC RAW mode (filtered) */
61#define W5100_S0_CR 0x0401 /* S0 Command Register */
62#define S0_CR_OPEN 0x01 /* OPEN command */
63#define S0_CR_CLOSE 0x10 /* CLOSE command */
64#define S0_CR_SEND 0x20 /* SEND command */
65#define S0_CR_RECV 0x40 /* RECV command */
66#define W5100_S0_IR 0x0402 /* S0 Interrupt Register */
67#define S0_IR_SENDOK 0x10 /* complete sending */
68#define S0_IR_RECV 0x04 /* receiving data */
69#define W5100_S0_SR 0x0403 /* S0 Status Register */
70#define S0_SR_MACRAW 0x42 /* mac raw mode */
71#define W5100_S0_TX_FSR 0x0420 /* S0 Transmit free memory size */
72#define W5100_S0_TX_RD 0x0422 /* S0 Transmit memory read pointer */
73#define W5100_S0_TX_WR 0x0424 /* S0 Transmit memory write pointer */
74#define W5100_S0_RX_RSR 0x0426 /* S0 Receive free memory size */
75#define W5100_S0_RX_RD 0x0428 /* S0 Receive memory read pointer */
76#define W5100_S0_REGS_LEN 0x0040
77
78#define W5100_TX_MEM_START 0x4000
79#define W5100_TX_MEM_END 0x5fff
80#define W5100_TX_MEM_MASK 0x1fff
81#define W5100_RX_MEM_START 0x6000
82#define W5100_RX_MEM_END 0x7fff
83#define W5100_RX_MEM_MASK 0x1fff
84
85/*
86 * Device driver private data structure
87 */
88struct w5100_priv {
89 void __iomem *base;
90 spinlock_t reg_lock;
91 bool indirect;
92 u8 (*read)(struct w5100_priv *priv, u16 addr);
93 void (*write)(struct w5100_priv *priv, u16 addr, u8 data);
94 u16 (*read16)(struct w5100_priv *priv, u16 addr);
95 void (*write16)(struct w5100_priv *priv, u16 addr, u16 data);
96 void (*readbuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
97 void (*writebuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
98 int irq;
99 int link_irq;
100 int link_gpio;
101
102 struct napi_struct napi;
103 struct net_device *ndev;
104 bool promisc;
105 u32 msg_enable;
106};
107
108/************************************************************************
109 *
110 * Lowlevel I/O functions
111 *
112 ***********************************************************************/
113
114/*
115 * In direct address mode host system can directly access W5100 registers
116 * after mapping to Memory-Mapped I/O space.
117 *
118 * 0x8000 bytes are required for memory space.
119 */
120static inline u8 w5100_read_direct(struct w5100_priv *priv, u16 addr)
121{
122 return ioread8(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
123}
124
125static inline void w5100_write_direct(struct w5100_priv *priv,
126 u16 addr, u8 data)
127{
128 iowrite8(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
129}
130
131static u16 w5100_read16_direct(struct w5100_priv *priv, u16 addr)
132{
133 u16 data;
134 data = w5100_read_direct(priv, addr) << 8;
135 data |= w5100_read_direct(priv, addr + 1);
136 return data;
137}
138
139static void w5100_write16_direct(struct w5100_priv *priv, u16 addr, u16 data)
140{
141 w5100_write_direct(priv, addr, data >> 8);
142 w5100_write_direct(priv, addr + 1, data);
143}
144
145static void w5100_readbuf_direct(struct w5100_priv *priv,
146 u16 offset, u8 *buf, int len)
147{
148 u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
149 int i;
150
151 for (i = 0; i < len; i++, addr++) {
152 if (unlikely(addr > W5100_RX_MEM_END))
153 addr = W5100_RX_MEM_START;
154 *buf++ = w5100_read_direct(priv, addr);
155 }
156}
157
158static void w5100_writebuf_direct(struct w5100_priv *priv,
159 u16 offset, u8 *buf, int len)
160{
161 u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
162 int i;
163
164 for (i = 0; i < len; i++, addr++) {
165 if (unlikely(addr > W5100_TX_MEM_END))
166 addr = W5100_TX_MEM_START;
167 w5100_write_direct(priv, addr, *buf++);
168 }
169}
170
171/*
172 * In indirect address mode host system indirectly accesses registers by
173 * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
174 * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space.
175 * Mode Register (MR) is directly accessible.
176 *
177 * Only 0x04 bytes are required for memory space.
178 */
179#define W5100_IDM_AR 0x01 /* Indirect Mode Address Register */
180#define W5100_IDM_DR 0x03 /* Indirect Mode Data Register */
181
182static u8 w5100_read_indirect(struct w5100_priv *priv, u16 addr)
183{
184 unsigned long flags;
185 u8 data;
186
187 spin_lock_irqsave(&priv->reg_lock, flags);
188 w5100_write16_direct(priv, W5100_IDM_AR, addr);
189 mmiowb();
190 data = w5100_read_direct(priv, W5100_IDM_DR);
191 spin_unlock_irqrestore(&priv->reg_lock, flags);
192
193 return data;
194}
195
196static void w5100_write_indirect(struct w5100_priv *priv, u16 addr, u8 data)
197{
198 unsigned long flags;
199
200 spin_lock_irqsave(&priv->reg_lock, flags);
201 w5100_write16_direct(priv, W5100_IDM_AR, addr);
202 mmiowb();
203 w5100_write_direct(priv, W5100_IDM_DR, data);
204 mmiowb();
205 spin_unlock_irqrestore(&priv->reg_lock, flags);
206}
207
208static u16 w5100_read16_indirect(struct w5100_priv *priv, u16 addr)
209{
210 unsigned long flags;
211 u16 data;
212
213 spin_lock_irqsave(&priv->reg_lock, flags);
214 w5100_write16_direct(priv, W5100_IDM_AR, addr);
215 mmiowb();
216 data = w5100_read_direct(priv, W5100_IDM_DR) << 8;
217 data |= w5100_read_direct(priv, W5100_IDM_DR);
218 spin_unlock_irqrestore(&priv->reg_lock, flags);
219
220 return data;
221}
222
223static void w5100_write16_indirect(struct w5100_priv *priv, u16 addr, u16 data)
224{
225 unsigned long flags;
226
227 spin_lock_irqsave(&priv->reg_lock, flags);
228 w5100_write16_direct(priv, W5100_IDM_AR, addr);
229 mmiowb();
230 w5100_write_direct(priv, W5100_IDM_DR, data >> 8);
231 w5100_write_direct(priv, W5100_IDM_DR, data);
232 mmiowb();
233 spin_unlock_irqrestore(&priv->reg_lock, flags);
234}
235
236static void w5100_readbuf_indirect(struct w5100_priv *priv,
237 u16 offset, u8 *buf, int len)
238{
239 u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
240 unsigned long flags;
241 int i;
242
243 spin_lock_irqsave(&priv->reg_lock, flags);
244 w5100_write16_direct(priv, W5100_IDM_AR, addr);
245 mmiowb();
246
247 for (i = 0; i < len; i++, addr++) {
248 if (unlikely(addr > W5100_RX_MEM_END)) {
249 addr = W5100_RX_MEM_START;
250 w5100_write16_direct(priv, W5100_IDM_AR, addr);
251 mmiowb();
252 }
253 *buf++ = w5100_read_direct(priv, W5100_IDM_DR);
254 }
255 mmiowb();
256 spin_unlock_irqrestore(&priv->reg_lock, flags);
257}
258
259static void w5100_writebuf_indirect(struct w5100_priv *priv,
260 u16 offset, u8 *buf, int len)
261{
262 u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
263 unsigned long flags;
264 int i;
265
266 spin_lock_irqsave(&priv->reg_lock, flags);
267 w5100_write16_direct(priv, W5100_IDM_AR, addr);
268 mmiowb();
269
270 for (i = 0; i < len; i++, addr++) {
271 if (unlikely(addr > W5100_TX_MEM_END)) {
272 addr = W5100_TX_MEM_START;
273 w5100_write16_direct(priv, W5100_IDM_AR, addr);
274 mmiowb();
275 }
276 w5100_write_direct(priv, W5100_IDM_DR, *buf++);
277 }
278 mmiowb();
279 spin_unlock_irqrestore(&priv->reg_lock, flags);
280}
281
282#if defined(CONFIG_WIZNET_BUS_DIRECT)
283#define w5100_read w5100_read_direct
284#define w5100_write w5100_write_direct
285#define w5100_read16 w5100_read16_direct
286#define w5100_write16 w5100_write16_direct
287#define w5100_readbuf w5100_readbuf_direct
288#define w5100_writebuf w5100_writebuf_direct
289
290#elif defined(CONFIG_WIZNET_BUS_INDIRECT)
291#define w5100_read w5100_read_indirect
292#define w5100_write w5100_write_indirect
293#define w5100_read16 w5100_read16_indirect
294#define w5100_write16 w5100_write16_indirect
295#define w5100_readbuf w5100_readbuf_indirect
296#define w5100_writebuf w5100_writebuf_indirect
297
298#else /* CONFIG_WIZNET_BUS_ANY */
299#define w5100_read priv->read
300#define w5100_write priv->write
301#define w5100_read16 priv->read16
302#define w5100_write16 priv->write16
303#define w5100_readbuf priv->readbuf
304#define w5100_writebuf priv->writebuf
305#endif
306
307static int w5100_command(struct w5100_priv *priv, u16 cmd)
308{
309 unsigned long timeout = jiffies + msecs_to_jiffies(100);
310
311 w5100_write(priv, W5100_S0_CR, cmd);
312 mmiowb();
313
314 while (w5100_read(priv, W5100_S0_CR) != 0) {
315 if (time_after(jiffies, timeout))
316 return -EIO;
317 cpu_relax();
318 }
319
320 return 0;
321}
322
323static void w5100_write_macaddr(struct w5100_priv *priv)
324{
325 struct net_device *ndev = priv->ndev;
326 int i;
327
328 for (i = 0; i < ETH_ALEN; i++)
329 w5100_write(priv, W5100_SHAR + i, ndev->dev_addr[i]);
330 mmiowb();
331}
332
333static void w5100_hw_reset(struct w5100_priv *priv)
334{
335 w5100_write_direct(priv, W5100_MR, MR_RST);
336 mmiowb();
337 mdelay(5);
338 w5100_write_direct(priv, W5100_MR, priv->indirect ?
339 MR_PB | MR_AI | MR_IND :
340 MR_PB);
341 mmiowb();
342 w5100_write(priv, W5100_IMR, 0);
343 w5100_write_macaddr(priv);
344
345 /* Configure 16K of internal memory
346 * as 8K RX buffer and 8K TX buffer
347 */
348 w5100_write(priv, W5100_RMSR, 0x03);
349 w5100_write(priv, W5100_TMSR, 0x03);
350 mmiowb();
351}
352
353static void w5100_hw_start(struct w5100_priv *priv)
354{
355 w5100_write(priv, W5100_S0_MR, priv->promisc ?
356 S0_MR_MACRAW : S0_MR_MACRAW_MF);
357 mmiowb();
358 w5100_command(priv, S0_CR_OPEN);
359 w5100_write(priv, W5100_IMR, IR_S0);
360 mmiowb();
361}
362
363static void w5100_hw_close(struct w5100_priv *priv)
364{
365 w5100_write(priv, W5100_IMR, 0);
366 mmiowb();
367 w5100_command(priv, S0_CR_CLOSE);
368}
369
370/***********************************************************************
371 *
372 * Device driver functions / callbacks
373 *
374 ***********************************************************************/
375
376static void w5100_get_drvinfo(struct net_device *ndev,
377 struct ethtool_drvinfo *info)
378{
379 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
380 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
381 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
382 sizeof(info->bus_info));
383}
384
385static u32 w5100_get_link(struct net_device *ndev)
386{
387 struct w5100_priv *priv = netdev_priv(ndev);
388
389 if (gpio_is_valid(priv->link_gpio))
390 return !!gpio_get_value(priv->link_gpio);
391
392 return 1;
393}
394
395static u32 w5100_get_msglevel(struct net_device *ndev)
396{
397 struct w5100_priv *priv = netdev_priv(ndev);
398
399 return priv->msg_enable;
400}
401
402static void w5100_set_msglevel(struct net_device *ndev, u32 value)
403{
404 struct w5100_priv *priv = netdev_priv(ndev);
405
406 priv->msg_enable = value;
407}
408
409static int w5100_get_regs_len(struct net_device *ndev)
410{
411 return W5100_COMMON_REGS_LEN + W5100_S0_REGS_LEN;
412}
413
414static void w5100_get_regs(struct net_device *ndev,
415 struct ethtool_regs *regs, void *_buf)
416{
417 struct w5100_priv *priv = netdev_priv(ndev);
418 u8 *buf = _buf;
419 u16 i;
420
421 regs->version = 1;
422 for (i = 0; i < W5100_COMMON_REGS_LEN; i++)
423 *buf++ = w5100_read(priv, W5100_COMMON_REGS + i);
424 for (i = 0; i < W5100_S0_REGS_LEN; i++)
425 *buf++ = w5100_read(priv, W5100_S0_REGS + i);
426}
427
428static void w5100_tx_timeout(struct net_device *ndev)
429{
430 struct w5100_priv *priv = netdev_priv(ndev);
431
432 netif_stop_queue(ndev);
433 w5100_hw_reset(priv);
434 w5100_hw_start(priv);
435 ndev->stats.tx_errors++;
436 ndev->trans_start = jiffies;
437 netif_wake_queue(ndev);
438}
439
440static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
441{
442 struct w5100_priv *priv = netdev_priv(ndev);
443 u16 offset;
444
445 netif_stop_queue(ndev);
446
447 offset = w5100_read16(priv, W5100_S0_TX_WR);
448 w5100_writebuf(priv, offset, skb->data, skb->len);
449 w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len);
450 mmiowb();
451 ndev->stats.tx_bytes += skb->len;
452 ndev->stats.tx_packets++;
453 dev_kfree_skb(skb);
454
455 w5100_command(priv, S0_CR_SEND);
456
457 return NETDEV_TX_OK;
458}
459
460static int w5100_napi_poll(struct napi_struct *napi, int budget)
461{
462 struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
463 struct net_device *ndev = priv->ndev;
464 struct sk_buff *skb;
465 int rx_count;
466 u16 rx_len;
467 u16 offset;
468 u8 header[2];
469
470 for (rx_count = 0; rx_count < budget; rx_count++) {
471 u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR);
472 if (rx_buf_len == 0)
473 break;
474
475 offset = w5100_read16(priv, W5100_S0_RX_RD);
476 w5100_readbuf(priv, offset, header, 2);
477 rx_len = get_unaligned_be16(header) - 2;
478
479 skb = netdev_alloc_skb_ip_align(ndev, rx_len);
480 if (unlikely(!skb)) {
481 w5100_write16(priv, W5100_S0_RX_RD,
482 offset + rx_buf_len);
483 w5100_command(priv, S0_CR_RECV);
484 ndev->stats.rx_dropped++;
485 return -ENOMEM;
486 }
487
488 skb_put(skb, rx_len);
489 w5100_readbuf(priv, offset + 2, skb->data, rx_len);
490 w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len);
491 mmiowb();
492 w5100_command(priv, S0_CR_RECV);
493 skb->protocol = eth_type_trans(skb, ndev);
494
495 netif_receive_skb(skb);
496 ndev->stats.rx_packets++;
497 ndev->stats.rx_bytes += rx_len;
498 }
499
500 if (rx_count < budget) {
501 w5100_write(priv, W5100_IMR, IR_S0);
502 mmiowb();
503 napi_complete(napi);
504 }
505
506 return rx_count;
507}
508
509static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
510{
511 struct net_device *ndev = ndev_instance;
512 struct w5100_priv *priv = netdev_priv(ndev);
513
514 int ir = w5100_read(priv, W5100_S0_IR);
515 if (!ir)
516 return IRQ_NONE;
517 w5100_write(priv, W5100_S0_IR, ir);
518 mmiowb();
519
520 if (ir & S0_IR_SENDOK) {
521 netif_dbg(priv, tx_done, ndev, "tx done\n");
522 netif_wake_queue(ndev);
523 }
524
525 if (ir & S0_IR_RECV) {
526 if (napi_schedule_prep(&priv->napi)) {
527 w5100_write(priv, W5100_IMR, 0);
528 mmiowb();
529 __napi_schedule(&priv->napi);
530 }
531 }
532
533 return IRQ_HANDLED;
534}
535
536static irqreturn_t w5100_detect_link(int irq, void *ndev_instance)
537{
538 struct net_device *ndev = ndev_instance;
539 struct w5100_priv *priv = netdev_priv(ndev);
540
541 if (netif_running(ndev)) {
542 if (gpio_get_value(priv->link_gpio) != 0) {
543 netif_info(priv, link, ndev, "link is up\n");
544 netif_carrier_on(ndev);
545 } else {
546 netif_info(priv, link, ndev, "link is down\n");
547 netif_carrier_off(ndev);
548 }
549 }
550
551 return IRQ_HANDLED;
552}
553
554static void w5100_set_rx_mode(struct net_device *ndev)
555{
556 struct w5100_priv *priv = netdev_priv(ndev);
557 bool set_promisc = (ndev->flags & IFF_PROMISC) != 0;
558
559 if (priv->promisc != set_promisc) {
560 priv->promisc = set_promisc;
561 w5100_hw_start(priv);
562 }
563}
564
565static int w5100_set_macaddr(struct net_device *ndev, void *addr)
566{
567 struct w5100_priv *priv = netdev_priv(ndev);
568 struct sockaddr *sock_addr = addr;
569
570 if (!is_valid_ether_addr(sock_addr->sa_data))
571 return -EADDRNOTAVAIL;
572 memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
573 ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
574 w5100_write_macaddr(priv);
575 return 0;
576}
577
578static int w5100_open(struct net_device *ndev)
579{
580 struct w5100_priv *priv = netdev_priv(ndev);
581
582 netif_info(priv, ifup, ndev, "enabling\n");
583 if (!is_valid_ether_addr(ndev->dev_addr))
584 return -EINVAL;
585 w5100_hw_start(priv);
586 napi_enable(&priv->napi);
587 netif_start_queue(ndev);
588 if (!gpio_is_valid(priv->link_gpio) ||
589 gpio_get_value(priv->link_gpio) != 0)
590 netif_carrier_on(ndev);
591 return 0;
592}
593
594static int w5100_stop(struct net_device *ndev)
595{
596 struct w5100_priv *priv = netdev_priv(ndev);
597
598 netif_info(priv, ifdown, ndev, "shutting down\n");
599 w5100_hw_close(priv);
600 netif_carrier_off(ndev);
601 netif_stop_queue(ndev);
602 napi_disable(&priv->napi);
603 return 0;
604}
605
606static const struct ethtool_ops w5100_ethtool_ops = {
607 .get_drvinfo = w5100_get_drvinfo,
608 .get_msglevel = w5100_get_msglevel,
609 .set_msglevel = w5100_set_msglevel,
610 .get_link = w5100_get_link,
611 .get_regs_len = w5100_get_regs_len,
612 .get_regs = w5100_get_regs,
613};
614
615static const struct net_device_ops w5100_netdev_ops = {
616 .ndo_open = w5100_open,
617 .ndo_stop = w5100_stop,
618 .ndo_start_xmit = w5100_start_tx,
619 .ndo_tx_timeout = w5100_tx_timeout,
620 .ndo_set_rx_mode = w5100_set_rx_mode,
621 .ndo_set_mac_address = w5100_set_macaddr,
622 .ndo_validate_addr = eth_validate_addr,
623 .ndo_change_mtu = eth_change_mtu,
624};
625
626static int __devinit w5100_hw_probe(struct platform_device *pdev)
627{
628 struct wiznet_platform_data *data = pdev->dev.platform_data;
629 struct net_device *ndev = platform_get_drvdata(pdev);
630 struct w5100_priv *priv = netdev_priv(ndev);
631 const char *name = netdev_name(ndev);
632 struct resource *mem;
633 int mem_size;
634 int irq;
635 int ret;
636
637 if (data && is_valid_ether_addr(data->mac_addr)) {
638 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
639 } else {
640 random_ether_addr(ndev->dev_addr);
641 ndev->addr_assign_type |= NET_ADDR_RANDOM;
642 }
643
644 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
645 if (!mem)
646 return -ENXIO;
647 mem_size = resource_size(mem);
648 if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
649 return -EBUSY;
650 priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
651 if (!priv->base)
652 return -EBUSY;
653
654 spin_lock_init(&priv->reg_lock);
655 priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
656 if (priv->indirect) {
657 priv->read = w5100_read_indirect;
658 priv->write = w5100_write_indirect;
659 priv->read16 = w5100_read16_indirect;
660 priv->write16 = w5100_write16_indirect;
661 priv->readbuf = w5100_readbuf_indirect;
662 priv->writebuf = w5100_writebuf_indirect;
663 } else {
664 priv->read = w5100_read_direct;
665 priv->write = w5100_write_direct;
666 priv->read16 = w5100_read16_direct;
667 priv->write16 = w5100_write16_direct;
668 priv->readbuf = w5100_readbuf_direct;
669 priv->writebuf = w5100_writebuf_direct;
670 }
671
672 w5100_hw_reset(priv);
673 if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT)
674 return -ENODEV;
675
676 irq = platform_get_irq(pdev, 0);
677 if (irq < 0)
678 return irq;
679 ret = request_irq(irq, w5100_interrupt,
680 IRQ_TYPE_LEVEL_LOW, name, ndev);
681 if (ret < 0)
682 return ret;
683 priv->irq = irq;
684
685 priv->link_gpio = data ? data->link_gpio : -EINVAL;
686 if (gpio_is_valid(priv->link_gpio)) {
687 char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL);
688 if (!link_name)
689 return -ENOMEM;
690 snprintf(link_name, 16, "%s-link", name);
691 priv->link_irq = gpio_to_irq(priv->link_gpio);
692 if (request_any_context_irq(priv->link_irq, w5100_detect_link,
693 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
694 link_name, priv->ndev) < 0)
695 priv->link_gpio = -EINVAL;
696 }
697
698 netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq);
699 return 0;
700}
701
702static int __devinit w5100_probe(struct platform_device *pdev)
703{
704 struct w5100_priv *priv;
705 struct net_device *ndev;
706 int err;
707
708 ndev = alloc_etherdev(sizeof(*priv));
709 if (!ndev)
710 return -ENOMEM;
711 SET_NETDEV_DEV(ndev, &pdev->dev);
712 platform_set_drvdata(pdev, ndev);
713 priv = netdev_priv(ndev);
714 priv->ndev = ndev;
715
716 ether_setup(ndev);
717 ndev->netdev_ops = &w5100_netdev_ops;
718 ndev->ethtool_ops = &w5100_ethtool_ops;
719 ndev->watchdog_timeo = HZ;
720 netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16);
721
722 /* This chip doesn't support VLAN packets with normal MTU,
723 * so disable VLAN for this device.
724 */
725 ndev->features |= NETIF_F_VLAN_CHALLENGED;
726
727 err = register_netdev(ndev);
728 if (err < 0)
729 goto err_register;
730
731 err = w5100_hw_probe(pdev);
732 if (err < 0)
733 goto err_hw_probe;
734
735 return 0;
736
737err_hw_probe:
738 unregister_netdev(ndev);
739err_register:
740 free_netdev(ndev);
741 platform_set_drvdata(pdev, NULL);
742 return err;
743}
744
745static int __devexit w5100_remove(struct platform_device *pdev)
746{
747 struct net_device *ndev = platform_get_drvdata(pdev);
748 struct w5100_priv *priv = netdev_priv(ndev);
749
750 w5100_hw_reset(priv);
751 free_irq(priv->irq, ndev);
752 if (gpio_is_valid(priv->link_gpio))
753 free_irq(priv->link_irq, ndev);
754
755 unregister_netdev(ndev);
756 free_netdev(ndev);
757 platform_set_drvdata(pdev, NULL);
758 return 0;
759}
760
761#ifdef CONFIG_PM
762static int w5100_suspend(struct device *dev)
763{
764 struct platform_device *pdev = to_platform_device(dev);
765 struct net_device *ndev = platform_get_drvdata(pdev);
766 struct w5100_priv *priv = netdev_priv(ndev);
767
768 if (netif_running(ndev)) {
769 netif_carrier_off(ndev);
770 netif_device_detach(ndev);
771
772 w5100_hw_close(priv);
773 }
774 return 0;
775}
776
777static int w5100_resume(struct device *dev)
778{
779 struct platform_device *pdev = to_platform_device(dev);
780 struct net_device *ndev = platform_get_drvdata(pdev);
781 struct w5100_priv *priv = netdev_priv(ndev);
782
783 if (netif_running(ndev)) {
784 w5100_hw_reset(priv);
785 w5100_hw_start(priv);
786
787 netif_device_attach(ndev);
788 if (!gpio_is_valid(priv->link_gpio) ||
789 gpio_get_value(priv->link_gpio) != 0)
790 netif_carrier_on(ndev);
791 }
792 return 0;
793}
794#endif /* CONFIG_PM */
795
796static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
797
798static struct platform_driver w5100_driver = {
799 .driver = {
800 .name = DRV_NAME,
801 .owner = THIS_MODULE,
802 .pm = &w5100_pm_ops,
803 },
804 .probe = w5100_probe,
805 .remove = __devexit_p(w5100_remove),
806};
807
808module_platform_driver(w5100_driver);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
new file mode 100644
index 000000000000..3306a20ec211
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -0,0 +1,720 @@
1/*
2 * Ethernet driver for the WIZnet W5300 chip.
3 *
4 * Copyright (C) 2008-2009 WIZnet Co.,Ltd.
5 * Copyright (C) 2011 Taehun Kim <kth3321 <at> gmail.com>
6 * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
7 *
8 * Licensed under the GPL-2 or later.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/kconfig.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/platform_device.h>
17#include <linux/platform_data/wiznet.h>
18#include <linux/ethtool.h>
19#include <linux/skbuff.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/delay.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25#include <linux/io.h>
26#include <linux/ioport.h>
27#include <linux/interrupt.h>
28#include <linux/irq.h>
29#include <linux/gpio.h>
30
31#define DRV_NAME "w5300"
32#define DRV_VERSION "2012-04-04"
33
34MODULE_DESCRIPTION("WIZnet W5300 Ethernet driver v"DRV_VERSION);
35MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>");
36MODULE_ALIAS("platform:"DRV_NAME);
37MODULE_LICENSE("GPL");
38
39/*
40 * Registers
41 */
42#define W5300_MR 0x0000 /* Mode Register */
43#define MR_DBW (1 << 15) /* Data bus width */
44#define MR_MPF (1 << 14) /* Mac layer pause frame */
45#define MR_WDF(n) (((n)&7)<<11) /* Write data fetch time */
46#define MR_RDH (1 << 10) /* Read data hold time */
47#define MR_FS (1 << 8) /* FIFO swap */
48#define MR_RST (1 << 7) /* S/W reset */
49#define MR_PB (1 << 4) /* Ping block */
50#define MR_DBS (1 << 2) /* Data bus swap */
51#define MR_IND (1 << 0) /* Indirect mode */
52#define W5300_IR 0x0002 /* Interrupt Register */
53#define W5300_IMR 0x0004 /* Interrupt Mask Register */
54#define IR_S0 0x0001 /* S0 interrupt */
55#define W5300_SHARL 0x0008 /* Source MAC address (0123) */
56#define W5300_SHARH 0x000c /* Source MAC address (45) */
57#define W5300_TMSRL 0x0020 /* Transmit Memory Size (0123) */
58#define W5300_TMSRH 0x0024 /* Transmit Memory Size (4567) */
59#define W5300_RMSRL 0x0028 /* Receive Memory Size (0123) */
60#define W5300_RMSRH 0x002c /* Receive Memory Size (4567) */
61#define W5300_MTYPE 0x0030 /* Memory Type */
62#define W5300_IDR 0x00fe /* Chip ID register */
63#define IDR_W5300 0x5300 /* =0x5300 for WIZnet W5300 */
64#define W5300_S0_MR 0x0200 /* S0 Mode Register */
65#define S0_MR_CLOSED 0x0000 /* Close mode */
66#define S0_MR_MACRAW 0x0004 /* MAC RAW mode (promiscous) */
67#define S0_MR_MACRAW_MF 0x0044 /* MAC RAW mode (filtered) */
68#define W5300_S0_CR 0x0202 /* S0 Command Register */
69#define S0_CR_OPEN 0x0001 /* OPEN command */
70#define S0_CR_CLOSE 0x0010 /* CLOSE command */
71#define S0_CR_SEND 0x0020 /* SEND command */
72#define S0_CR_RECV 0x0040 /* RECV command */
73#define W5300_S0_IMR 0x0204 /* S0 Interrupt Mask Register */
74#define W5300_S0_IR 0x0206 /* S0 Interrupt Register */
75#define S0_IR_RECV 0x0004 /* Receive interrupt */
76#define S0_IR_SENDOK 0x0010 /* Send OK interrupt */
77#define W5300_S0_SSR 0x0208 /* S0 Socket Status Register */
78#define W5300_S0_TX_WRSR 0x0220 /* S0 TX Write Size Register */
79#define W5300_S0_TX_FSR 0x0224 /* S0 TX Free Size Register */
80#define W5300_S0_RX_RSR 0x0228 /* S0 Received data Size */
81#define W5300_S0_TX_FIFO 0x022e /* S0 Transmit FIFO */
82#define W5300_S0_RX_FIFO 0x0230 /* S0 Receive FIFO */
83#define W5300_REGS_LEN 0x0400
84
85/*
86 * Device driver private data structure
87 */
88struct w5300_priv {
89 void __iomem *base;
90 spinlock_t reg_lock;
91 bool indirect;
92 u16 (*read) (struct w5300_priv *priv, u16 addr);
93 void (*write)(struct w5300_priv *priv, u16 addr, u16 data);
94 int irq;
95 int link_irq;
96 int link_gpio;
97
98 struct napi_struct napi;
99 struct net_device *ndev;
100 bool promisc;
101 u32 msg_enable;
102};
103
104/************************************************************************
105 *
106 * Lowlevel I/O functions
107 *
108 ***********************************************************************/
109
110/*
111 * In direct address mode host system can directly access W5300 registers
112 * after mapping to Memory-Mapped I/O space.
113 *
114 * 0x400 bytes are required for memory space.
115 */
116static inline u16 w5300_read_direct(struct w5300_priv *priv, u16 addr)
117{
118 return ioread16(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
119}
120
121static inline void w5300_write_direct(struct w5300_priv *priv,
122 u16 addr, u16 data)
123{
124 iowrite16(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
125}
126
127/*
128 * In indirect address mode host system indirectly accesses registers by
129 * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
130 * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space.
131 * Mode Register (MR) is directly accessible.
132 *
133 * Only 0x06 bytes are required for memory space.
134 */
135#define W5300_IDM_AR 0x0002 /* Indirect Mode Address */
136#define W5300_IDM_DR 0x0004 /* Indirect Mode Data */
137
138static u16 w5300_read_indirect(struct w5300_priv *priv, u16 addr)
139{
140 unsigned long flags;
141 u16 data;
142
143 spin_lock_irqsave(&priv->reg_lock, flags);
144 w5300_write_direct(priv, W5300_IDM_AR, addr);
145 mmiowb();
146 data = w5300_read_direct(priv, W5300_IDM_DR);
147 spin_unlock_irqrestore(&priv->reg_lock, flags);
148
149 return data;
150}
151
152static void w5300_write_indirect(struct w5300_priv *priv, u16 addr, u16 data)
153{
154 unsigned long flags;
155
156 spin_lock_irqsave(&priv->reg_lock, flags);
157 w5300_write_direct(priv, W5300_IDM_AR, addr);
158 mmiowb();
159 w5300_write_direct(priv, W5300_IDM_DR, data);
160 mmiowb();
161 spin_unlock_irqrestore(&priv->reg_lock, flags);
162}
163
164#if defined(CONFIG_WIZNET_BUS_DIRECT)
165#define w5300_read w5300_read_direct
166#define w5300_write w5300_write_direct
167
168#elif defined(CONFIG_WIZNET_BUS_INDIRECT)
169#define w5300_read w5300_read_indirect
170#define w5300_write w5300_write_indirect
171
172#else /* CONFIG_WIZNET_BUS_ANY */
173#define w5300_read priv->read
174#define w5300_write priv->write
175#endif
176
177static u32 w5300_read32(struct w5300_priv *priv, u16 addr)
178{
179 u32 data;
180 data = w5300_read(priv, addr) << 16;
181 data |= w5300_read(priv, addr + 2);
182 return data;
183}
184
185static void w5300_write32(struct w5300_priv *priv, u16 addr, u32 data)
186{
187 w5300_write(priv, addr, data >> 16);
188 w5300_write(priv, addr + 2, data);
189}
190
191static int w5300_command(struct w5300_priv *priv, u16 cmd)
192{
193 unsigned long timeout = jiffies + msecs_to_jiffies(100);
194
195 w5300_write(priv, W5300_S0_CR, cmd);
196 mmiowb();
197
198 while (w5300_read(priv, W5300_S0_CR) != 0) {
199 if (time_after(jiffies, timeout))
200 return -EIO;
201 cpu_relax();
202 }
203
204 return 0;
205}
206
207static void w5300_read_frame(struct w5300_priv *priv, u8 *buf, int len)
208{
209 u16 fifo;
210 int i;
211
212 for (i = 0; i < len; i += 2) {
213 fifo = w5300_read(priv, W5300_S0_RX_FIFO);
214 *buf++ = fifo >> 8;
215 *buf++ = fifo;
216 }
217 fifo = w5300_read(priv, W5300_S0_RX_FIFO);
218 fifo = w5300_read(priv, W5300_S0_RX_FIFO);
219}
220
221static void w5300_write_frame(struct w5300_priv *priv, u8 *buf, int len)
222{
223 u16 fifo;
224 int i;
225
226 for (i = 0; i < len; i += 2) {
227 fifo = *buf++ << 8;
228 fifo |= *buf++;
229 w5300_write(priv, W5300_S0_TX_FIFO, fifo);
230 }
231 w5300_write32(priv, W5300_S0_TX_WRSR, len);
232}
233
234static void w5300_write_macaddr(struct w5300_priv *priv)
235{
236 struct net_device *ndev = priv->ndev;
237 w5300_write32(priv, W5300_SHARL,
238 ndev->dev_addr[0] << 24 |
239 ndev->dev_addr[1] << 16 |
240 ndev->dev_addr[2] << 8 |
241 ndev->dev_addr[3]);
242 w5300_write(priv, W5300_SHARH,
243 ndev->dev_addr[4] << 8 |
244 ndev->dev_addr[5]);
245 mmiowb();
246}
247
248static void w5300_hw_reset(struct w5300_priv *priv)
249{
250 w5300_write_direct(priv, W5300_MR, MR_RST);
251 mmiowb();
252 mdelay(5);
253 w5300_write_direct(priv, W5300_MR, priv->indirect ?
254 MR_WDF(7) | MR_PB | MR_IND :
255 MR_WDF(7) | MR_PB);
256 mmiowb();
257 w5300_write(priv, W5300_IMR, 0);
258 w5300_write_macaddr(priv);
259
260 /* Configure 128K of internal memory
261 * as 64K RX fifo and 64K TX fifo
262 */
263 w5300_write32(priv, W5300_RMSRL, 64 << 24);
264 w5300_write32(priv, W5300_RMSRH, 0);
265 w5300_write32(priv, W5300_TMSRL, 64 << 24);
266 w5300_write32(priv, W5300_TMSRH, 0);
267 w5300_write(priv, W5300_MTYPE, 0x00ff);
268 mmiowb();
269}
270
271static void w5300_hw_start(struct w5300_priv *priv)
272{
273 w5300_write(priv, W5300_S0_MR, priv->promisc ?
274 S0_MR_MACRAW : S0_MR_MACRAW_MF);
275 mmiowb();
276 w5300_command(priv, S0_CR_OPEN);
277 w5300_write(priv, W5300_S0_IMR, S0_IR_RECV | S0_IR_SENDOK);
278 w5300_write(priv, W5300_IMR, IR_S0);
279 mmiowb();
280}
281
282static void w5300_hw_close(struct w5300_priv *priv)
283{
284 w5300_write(priv, W5300_IMR, 0);
285 mmiowb();
286 w5300_command(priv, S0_CR_CLOSE);
287}
288
289/***********************************************************************
290 *
291 * Device driver functions / callbacks
292 *
293 ***********************************************************************/
294
295static void w5300_get_drvinfo(struct net_device *ndev,
296 struct ethtool_drvinfo *info)
297{
298 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
299 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
300 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
301 sizeof(info->bus_info));
302}
303
304static u32 w5300_get_link(struct net_device *ndev)
305{
306 struct w5300_priv *priv = netdev_priv(ndev);
307
308 if (gpio_is_valid(priv->link_gpio))
309 return !!gpio_get_value(priv->link_gpio);
310
311 return 1;
312}
313
314static u32 w5300_get_msglevel(struct net_device *ndev)
315{
316 struct w5300_priv *priv = netdev_priv(ndev);
317
318 return priv->msg_enable;
319}
320
321static void w5300_set_msglevel(struct net_device *ndev, u32 value)
322{
323 struct w5300_priv *priv = netdev_priv(ndev);
324
325 priv->msg_enable = value;
326}
327
328static int w5300_get_regs_len(struct net_device *ndev)
329{
330 return W5300_REGS_LEN;
331}
332
333static void w5300_get_regs(struct net_device *ndev,
334 struct ethtool_regs *regs, void *_buf)
335{
336 struct w5300_priv *priv = netdev_priv(ndev);
337 u8 *buf = _buf;
338 u16 addr;
339 u16 data;
340
341 regs->version = 1;
342 for (addr = 0; addr < W5300_REGS_LEN; addr += 2) {
343 switch (addr & 0x23f) {
344 case W5300_S0_TX_FIFO: /* cannot read TX_FIFO */
345 case W5300_S0_RX_FIFO: /* cannot read RX_FIFO */
346 data = 0xffff;
347 break;
348 default:
349 data = w5300_read(priv, addr);
350 break;
351 }
352 *buf++ = data >> 8;
353 *buf++ = data;
354 }
355}
356
357static void w5300_tx_timeout(struct net_device *ndev)
358{
359 struct w5300_priv *priv = netdev_priv(ndev);
360
361 netif_stop_queue(ndev);
362 w5300_hw_reset(priv);
363 w5300_hw_start(priv);
364 ndev->stats.tx_errors++;
365 ndev->trans_start = jiffies;
366 netif_wake_queue(ndev);
367}
368
369static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
370{
371 struct w5300_priv *priv = netdev_priv(ndev);
372
373 netif_stop_queue(ndev);
374
375 w5300_write_frame(priv, skb->data, skb->len);
376 mmiowb();
377 ndev->stats.tx_packets++;
378 ndev->stats.tx_bytes += skb->len;
379 dev_kfree_skb(skb);
380 netif_dbg(priv, tx_queued, ndev, "tx queued\n");
381
382 w5300_command(priv, S0_CR_SEND);
383
384 return NETDEV_TX_OK;
385}
386
387static int w5300_napi_poll(struct napi_struct *napi, int budget)
388{
389 struct w5300_priv *priv = container_of(napi, struct w5300_priv, napi);
390 struct net_device *ndev = priv->ndev;
391 struct sk_buff *skb;
392 int rx_count;
393 u16 rx_len;
394
395 for (rx_count = 0; rx_count < budget; rx_count++) {
396 u32 rx_fifo_len = w5300_read32(priv, W5300_S0_RX_RSR);
397 if (rx_fifo_len == 0)
398 break;
399
400 rx_len = w5300_read(priv, W5300_S0_RX_FIFO);
401
402 skb = netdev_alloc_skb_ip_align(ndev, roundup(rx_len, 2));
403 if (unlikely(!skb)) {
404 u32 i;
405 for (i = 0; i < rx_fifo_len; i += 2)
406 w5300_read(priv, W5300_S0_RX_FIFO);
407 ndev->stats.rx_dropped++;
408 return -ENOMEM;
409 }
410
411 skb_put(skb, rx_len);
412 w5300_read_frame(priv, skb->data, rx_len);
413 skb->protocol = eth_type_trans(skb, ndev);
414
415 netif_receive_skb(skb);
416 ndev->stats.rx_packets++;
417 ndev->stats.rx_bytes += rx_len;
418 }
419
420 if (rx_count < budget) {
421 w5300_write(priv, W5300_IMR, IR_S0);
422 mmiowb();
423 napi_complete(napi);
424 }
425
426 return rx_count;
427}
428
429static irqreturn_t w5300_interrupt(int irq, void *ndev_instance)
430{
431 struct net_device *ndev = ndev_instance;
432 struct w5300_priv *priv = netdev_priv(ndev);
433
434 int ir = w5300_read(priv, W5300_S0_IR);
435 if (!ir)
436 return IRQ_NONE;
437 w5300_write(priv, W5300_S0_IR, ir);
438 mmiowb();
439
440 if (ir & S0_IR_SENDOK) {
441 netif_dbg(priv, tx_done, ndev, "tx done\n");
442 netif_wake_queue(ndev);
443 }
444
445 if (ir & S0_IR_RECV) {
446 if (napi_schedule_prep(&priv->napi)) {
447 w5300_write(priv, W5300_IMR, 0);
448 mmiowb();
449 __napi_schedule(&priv->napi);
450 }
451 }
452
453 return IRQ_HANDLED;
454}
455
456static irqreturn_t w5300_detect_link(int irq, void *ndev_instance)
457{
458 struct net_device *ndev = ndev_instance;
459 struct w5300_priv *priv = netdev_priv(ndev);
460
461 if (netif_running(ndev)) {
462 if (gpio_get_value(priv->link_gpio) != 0) {
463 netif_info(priv, link, ndev, "link is up\n");
464 netif_carrier_on(ndev);
465 } else {
466 netif_info(priv, link, ndev, "link is down\n");
467 netif_carrier_off(ndev);
468 }
469 }
470
471 return IRQ_HANDLED;
472}
473
474static void w5300_set_rx_mode(struct net_device *ndev)
475{
476 struct w5300_priv *priv = netdev_priv(ndev);
477 bool set_promisc = (ndev->flags & IFF_PROMISC) != 0;
478
479 if (priv->promisc != set_promisc) {
480 priv->promisc = set_promisc;
481 w5300_hw_start(priv);
482 }
483}
484
485static int w5300_set_macaddr(struct net_device *ndev, void *addr)
486{
487 struct w5300_priv *priv = netdev_priv(ndev);
488 struct sockaddr *sock_addr = addr;
489
490 if (!is_valid_ether_addr(sock_addr->sa_data))
491 return -EADDRNOTAVAIL;
492 memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
493 ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
494 w5300_write_macaddr(priv);
495 return 0;
496}
497
498static int w5300_open(struct net_device *ndev)
499{
500 struct w5300_priv *priv = netdev_priv(ndev);
501
502 netif_info(priv, ifup, ndev, "enabling\n");
503 if (!is_valid_ether_addr(ndev->dev_addr))
504 return -EINVAL;
505 w5300_hw_start(priv);
506 napi_enable(&priv->napi);
507 netif_start_queue(ndev);
508 if (!gpio_is_valid(priv->link_gpio) ||
509 gpio_get_value(priv->link_gpio) != 0)
510 netif_carrier_on(ndev);
511 return 0;
512}
513
514static int w5300_stop(struct net_device *ndev)
515{
516 struct w5300_priv *priv = netdev_priv(ndev);
517
518 netif_info(priv, ifdown, ndev, "shutting down\n");
519 w5300_hw_close(priv);
520 netif_carrier_off(ndev);
521 netif_stop_queue(ndev);
522 napi_disable(&priv->napi);
523 return 0;
524}
525
526static const struct ethtool_ops w5300_ethtool_ops = {
527 .get_drvinfo = w5300_get_drvinfo,
528 .get_msglevel = w5300_get_msglevel,
529 .set_msglevel = w5300_set_msglevel,
530 .get_link = w5300_get_link,
531 .get_regs_len = w5300_get_regs_len,
532 .get_regs = w5300_get_regs,
533};
534
535static const struct net_device_ops w5300_netdev_ops = {
536 .ndo_open = w5300_open,
537 .ndo_stop = w5300_stop,
538 .ndo_start_xmit = w5300_start_tx,
539 .ndo_tx_timeout = w5300_tx_timeout,
540 .ndo_set_rx_mode = w5300_set_rx_mode,
541 .ndo_set_mac_address = w5300_set_macaddr,
542 .ndo_validate_addr = eth_validate_addr,
543 .ndo_change_mtu = eth_change_mtu,
544};
545
546static int __devinit w5300_hw_probe(struct platform_device *pdev)
547{
548 struct wiznet_platform_data *data = pdev->dev.platform_data;
549 struct net_device *ndev = platform_get_drvdata(pdev);
550 struct w5300_priv *priv = netdev_priv(ndev);
551 const char *name = netdev_name(ndev);
552 struct resource *mem;
553 int mem_size;
554 int irq;
555 int ret;
556
557 if (data && is_valid_ether_addr(data->mac_addr)) {
558 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
559 } else {
560 random_ether_addr(ndev->dev_addr);
561 ndev->addr_assign_type |= NET_ADDR_RANDOM;
562 }
563
564 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
565 if (!mem)
566 return -ENXIO;
567 mem_size = resource_size(mem);
568 if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
569 return -EBUSY;
570 priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
571 if (!priv->base)
572 return -EBUSY;
573
574 spin_lock_init(&priv->reg_lock);
575 priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE;
576 if (priv->indirect) {
577 priv->read = w5300_read_indirect;
578 priv->write = w5300_write_indirect;
579 } else {
580 priv->read = w5300_read_direct;
581 priv->write = w5300_write_direct;
582 }
583
584 w5300_hw_reset(priv);
585 if (w5300_read(priv, W5300_IDR) != IDR_W5300)
586 return -ENODEV;
587
588 irq = platform_get_irq(pdev, 0);
589 if (irq < 0)
590 return irq;
591 ret = request_irq(irq, w5300_interrupt,
592 IRQ_TYPE_LEVEL_LOW, name, ndev);
593 if (ret < 0)
594 return ret;
595 priv->irq = irq;
596
597 priv->link_gpio = data ? data->link_gpio : -EINVAL;
598 if (gpio_is_valid(priv->link_gpio)) {
599 char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL);
600 if (!link_name)
601 return -ENOMEM;
602 snprintf(link_name, 16, "%s-link", name);
603 priv->link_irq = gpio_to_irq(priv->link_gpio);
604 if (request_any_context_irq(priv->link_irq, w5300_detect_link,
605 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
606 link_name, priv->ndev) < 0)
607 priv->link_gpio = -EINVAL;
608 }
609
610 netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq);
611 return 0;
612}
613
614static int __devinit w5300_probe(struct platform_device *pdev)
615{
616 struct w5300_priv *priv;
617 struct net_device *ndev;
618 int err;
619
620 ndev = alloc_etherdev(sizeof(*priv));
621 if (!ndev)
622 return -ENOMEM;
623 SET_NETDEV_DEV(ndev, &pdev->dev);
624 platform_set_drvdata(pdev, ndev);
625 priv = netdev_priv(ndev);
626 priv->ndev = ndev;
627
628 ether_setup(ndev);
629 ndev->netdev_ops = &w5300_netdev_ops;
630 ndev->ethtool_ops = &w5300_ethtool_ops;
631 ndev->watchdog_timeo = HZ;
632 netif_napi_add(ndev, &priv->napi, w5300_napi_poll, 16);
633
634 /* This chip doesn't support VLAN packets with normal MTU,
635 * so disable VLAN for this device.
636 */
637 ndev->features |= NETIF_F_VLAN_CHALLENGED;
638
639 err = register_netdev(ndev);
640 if (err < 0)
641 goto err_register;
642
643 err = w5300_hw_probe(pdev);
644 if (err < 0)
645 goto err_hw_probe;
646
647 return 0;
648
649err_hw_probe:
650 unregister_netdev(ndev);
651err_register:
652 free_netdev(ndev);
653 platform_set_drvdata(pdev, NULL);
654 return err;
655}
656
657static int __devexit w5300_remove(struct platform_device *pdev)
658{
659 struct net_device *ndev = platform_get_drvdata(pdev);
660 struct w5300_priv *priv = netdev_priv(ndev);
661
662 w5300_hw_reset(priv);
663 free_irq(priv->irq, ndev);
664 if (gpio_is_valid(priv->link_gpio))
665 free_irq(priv->link_irq, ndev);
666
667 unregister_netdev(ndev);
668 free_netdev(ndev);
669 platform_set_drvdata(pdev, NULL);
670 return 0;
671}
672
673#ifdef CONFIG_PM
674static int w5300_suspend(struct device *dev)
675{
676 struct platform_device *pdev = to_platform_device(dev);
677 struct net_device *ndev = platform_get_drvdata(pdev);
678 struct w5300_priv *priv = netdev_priv(ndev);
679
680 if (netif_running(ndev)) {
681 netif_carrier_off(ndev);
682 netif_device_detach(ndev);
683
684 w5300_hw_close(priv);
685 }
686 return 0;
687}
688
689static int w5300_resume(struct device *dev)
690{
691 struct platform_device *pdev = to_platform_device(dev);
692 struct net_device *ndev = platform_get_drvdata(pdev);
693 struct w5300_priv *priv = netdev_priv(ndev);
694
695 if (!netif_running(ndev)) {
696 w5300_hw_reset(priv);
697 w5300_hw_start(priv);
698
699 netif_device_attach(ndev);
700 if (!gpio_is_valid(priv->link_gpio) ||
701 gpio_get_value(priv->link_gpio) != 0)
702 netif_carrier_on(ndev);
703 }
704 return 0;
705}
706#endif /* CONFIG_PM */
707
708static SIMPLE_DEV_PM_OPS(w5300_pm_ops, w5300_suspend, w5300_resume);
709
710static struct platform_driver w5300_driver = {
711 .driver = {
712 .name = DRV_NAME,
713 .owner = THIS_MODULE,
714 .pm = &w5300_pm_ops,
715 },
716 .probe = w5300_probe,
717 .remove = __devexit_p(w5300_remove),
718};
719
720module_platform_driver(w5300_driver);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index d21591a2c593..1eaf7128afee 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1000,6 +1000,7 @@ static const struct ethtool_ops temac_ethtool_ops = {
1000 .set_settings = temac_set_settings, 1000 .set_settings = temac_set_settings,
1001 .nway_reset = temac_nway_reset, 1001 .nway_reset = temac_nway_reset,
1002 .get_link = ethtool_op_get_link, 1002 .get_link = ethtool_op_get_link,
1003 .get_ts_info = ethtool_op_get_ts_info,
1003}; 1004};
1004 1005
1005static int __devinit temac_of_probe(struct platform_device *op) 1006static int __devinit temac_of_probe(struct platform_device *op)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index cc83af083fd7..44b8d2bad8c3 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -2,9 +2,7 @@
2 * Definitions for Xilinx Axi Ethernet device driver. 2 * Definitions for Xilinx Axi Ethernet device driver.
3 * 3 *
4 * Copyright (c) 2009 Secret Lab Technologies, Ltd. 4 * Copyright (c) 2009 Secret Lab Technologies, Ltd.
5 * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 5 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
6 * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
7 * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
8 */ 6 */
9 7
10#ifndef XILINX_AXIENET_H 8#ifndef XILINX_AXIENET_H
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 2fcbeba6814b..9c365e192a31 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -4,9 +4,9 @@
4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
7 * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 7 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
8 * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch> 8 * Copyright (c) 2010 - 2011 PetaLogix
9 * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch> 9 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
10 * 10 *
11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
12 * and Spartan6. 12 * and Spartan6.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index d70b6e79f6c0..e90e1f46121e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -2,9 +2,9 @@
2 * MDIO bus driver for the Xilinx Axi Ethernet device 2 * MDIO bus driver for the Xilinx Axi Ethernet device
3 * 3 *
4 * Copyright (c) 2009 Secret Lab Technologies, Ltd. 4 * Copyright (c) 2009 Secret Lab Technologies, Ltd.
5 * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 5 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
6 * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch> 6 * Copyright (c) 2010 - 2011 PetaLogix
7 * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch> 7 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
8 */ 8 */
9 9
10#include <linux/of_address.h> 10#include <linux/of_address.h>
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index cf67352cea14..3f431019e615 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -5,8 +5,8 @@
5config NET_VENDOR_XSCALE 5config NET_VENDOR_XSCALE
6 bool "Intel XScale IXP devices" 6 bool "Intel XScale IXP devices"
7 default y 7 default y
8 depends on NET_VENDOR_INTEL && ((ARM && ARCH_IXP4XX && \ 8 depends on NET_VENDOR_INTEL && (ARM && ARCH_IXP4XX && \
9 IXP4XX_NPE && IXP4XX_QMGR) || ARCH_ENP2611) 9 IXP4XX_NPE && IXP4XX_QMGR)
10 ---help--- 10 ---help---
11 If you have a network (Ethernet) card belonging to this class, say Y 11 If you have a network (Ethernet) card belonging to this class, say Y
12 and read the Ethernet-HOWTO, available from 12 and read the Ethernet-HOWTO, available from
@@ -27,6 +27,4 @@ config IXP4XX_ETH
27 Say Y here if you want to use built-in Ethernet ports 27 Say Y here if you want to use built-in Ethernet ports
28 on IXP4xx processor. 28 on IXP4xx processor.
29 29
30source "drivers/net/ethernet/xscale/ixp2000/Kconfig"
31
32endif # NET_VENDOR_XSCALE 30endif # NET_VENDOR_XSCALE
diff --git a/drivers/net/ethernet/xscale/Makefile b/drivers/net/ethernet/xscale/Makefile
index b195b9d7fe81..abc3b031fba7 100644
--- a/drivers/net/ethernet/xscale/Makefile
+++ b/drivers/net/ethernet/xscale/Makefile
@@ -2,5 +2,4 @@
2# Makefile for the Intel XScale IXP device drivers. 2# Makefile for the Intel XScale IXP device drivers.
3# 3#
4 4
5obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/
6obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o 5obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
diff --git a/drivers/net/ethernet/xscale/ixp2000/Kconfig b/drivers/net/ethernet/xscale/ixp2000/Kconfig
deleted file mode 100644
index 58dbc5b876bc..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
1config ENP2611_MSF_NET
2 tristate "Radisys ENP2611 MSF network interface support"
3 depends on ARCH_ENP2611
4 ---help---
5 This is a driver for the MSF network interface unit in
6 the IXP2400 on the Radisys ENP2611 platform.
diff --git a/drivers/net/ethernet/xscale/ixp2000/Makefile b/drivers/net/ethernet/xscale/ixp2000/Makefile
deleted file mode 100644
index fd38351ceaa7..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
1obj-$(CONFIG_ENP2611_MSF_NET) += enp2611_mod.o
2
3enp2611_mod-objs := caleb.o enp2611.o ixp2400-msf.o ixpdev.o pm3386.o
diff --git a/drivers/net/ethernet/xscale/ixp2000/caleb.c b/drivers/net/ethernet/xscale/ixp2000/caleb.c
deleted file mode 100644
index 7dea5b95012c..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/caleb.c
+++ /dev/null
@@ -1,136 +0,0 @@
1/*
2 * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/delay.h>
14#include <asm/io.h>
15#include "caleb.h"
16
17#define CALEB_IDLO 0x00
18#define CALEB_IDHI 0x01
19#define CALEB_RID 0x02
20#define CALEB_RESET 0x03
21#define CALEB_INTREN0 0x04
22#define CALEB_INTREN1 0x05
23#define CALEB_INTRSTAT0 0x06
24#define CALEB_INTRSTAT1 0x07
25#define CALEB_PORTEN 0x08
26#define CALEB_BURST 0x09
27#define CALEB_PORTPAUS 0x0A
28#define CALEB_PORTPAUSD 0x0B
29#define CALEB_PHY0RX 0x10
30#define CALEB_PHY1RX 0x11
31#define CALEB_PHY0TX 0x12
32#define CALEB_PHY1TX 0x13
33#define CALEB_IXPRX_HI_CNTR 0x15
34#define CALEB_PHY0RX_HI_CNTR 0x16
35#define CALEB_PHY1RX_HI_CNTR 0x17
36#define CALEB_IXPRX_CNTR 0x18
37#define CALEB_PHY0RX_CNTR 0x19
38#define CALEB_PHY1RX_CNTR 0x1A
39#define CALEB_IXPTX_CNTR 0x1B
40#define CALEB_PHY0TX_CNTR 0x1C
41#define CALEB_PHY1TX_CNTR 0x1D
42#define CALEB_DEBUG0 0x1E
43#define CALEB_DEBUG1 0x1F
44
45
46static u8 caleb_reg_read(int reg)
47{
48 u8 value;
49
50 value = *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg));
51
52// printk(KERN_INFO "caleb_reg_read(%d) = %.2x\n", reg, value);
53
54 return value;
55}
56
57static void caleb_reg_write(int reg, u8 value)
58{
59 u8 dummy;
60
61// printk(KERN_INFO "caleb_reg_write(%d, %.2x)\n", reg, value);
62
63 *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg)) = value;
64
65 dummy = *((volatile u8 *)ENP2611_CALEB_VIRT_BASE);
66 __asm__ __volatile__("mov %0, %0" : "+r" (dummy));
67}
68
69
70void caleb_reset(void)
71{
72 /*
73 * Perform a chip reset.
74 */
75 caleb_reg_write(CALEB_RESET, 0x02);
76 udelay(1);
77
78 /*
79 * Enable all interrupt sources. This is needed to get
80 * meaningful results out of the status bits (register 6
81 * and 7.)
82 */
83 caleb_reg_write(CALEB_INTREN0, 0xff);
84 caleb_reg_write(CALEB_INTREN1, 0x07);
85
86 /*
87 * Set RX and TX FIFO thresholds to 1.5kb.
88 */
89 caleb_reg_write(CALEB_PHY0RX, 0x11);
90 caleb_reg_write(CALEB_PHY1RX, 0x11);
91 caleb_reg_write(CALEB_PHY0TX, 0x11);
92 caleb_reg_write(CALEB_PHY1TX, 0x11);
93
94 /*
95 * Program SPI-3 burst size.
96 */
97 caleb_reg_write(CALEB_BURST, 0); // 64-byte RBUF mpackets
98// caleb_reg_write(CALEB_BURST, 1); // 128-byte RBUF mpackets
99// caleb_reg_write(CALEB_BURST, 2); // 256-byte RBUF mpackets
100}
101
102void caleb_enable_rx(int port)
103{
104 u8 temp;
105
106 temp = caleb_reg_read(CALEB_PORTEN);
107 temp |= 1 << port;
108 caleb_reg_write(CALEB_PORTEN, temp);
109}
110
111void caleb_disable_rx(int port)
112{
113 u8 temp;
114
115 temp = caleb_reg_read(CALEB_PORTEN);
116 temp &= ~(1 << port);
117 caleb_reg_write(CALEB_PORTEN, temp);
118}
119
120void caleb_enable_tx(int port)
121{
122 u8 temp;
123
124 temp = caleb_reg_read(CALEB_PORTEN);
125 temp |= 1 << (port + 4);
126 caleb_reg_write(CALEB_PORTEN, temp);
127}
128
129void caleb_disable_tx(int port)
130{
131 u8 temp;
132
133 temp = caleb_reg_read(CALEB_PORTEN);
134 temp &= ~(1 << (port + 4));
135 caleb_reg_write(CALEB_PORTEN, temp);
136}
diff --git a/drivers/net/ethernet/xscale/ixp2000/caleb.h b/drivers/net/ethernet/xscale/ixp2000/caleb.h
deleted file mode 100644
index e93a1ef5b8a3..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/caleb.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __CALEB_H
13#define __CALEB_H
14
15void caleb_reset(void);
16void caleb_enable_rx(int port);
17void caleb_disable_rx(int port);
18void caleb_enable_tx(int port);
19void caleb_disable_tx(int port);
20
21
22#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/enp2611.c b/drivers/net/ethernet/xscale/ixp2000/enp2611.c
deleted file mode 100644
index 34a6cfd17930..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/enp2611.c
+++ /dev/null
@@ -1,232 +0,0 @@
1/*
2 * IXP2400 MSF network device driver for the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/init.h>
17#include <linux/moduleparam.h>
18#include <asm/hardware/uengine.h>
19#include <asm/mach-types.h>
20#include <asm/io.h>
21#include "ixpdev.h"
22#include "caleb.h"
23#include "ixp2400-msf.h"
24#include "pm3386.h"
25
26/***********************************************************************
27 * The Radisys ENP2611 is a PCI form factor board with three SFP GBIC
28 * slots, connected via two PMC/Sierra 3386s and an SPI-3 bridge FPGA
29 * to the IXP2400.
30 *
31 * +-------------+
32 * SFP GBIC #0 ---+ | +---------+
33 * | PM3386 #0 +-------+ |
34 * SFP GBIC #1 ---+ | | "Caleb" | +---------+
35 * +-------------+ | | | |
36 * | SPI-3 +---------+ IXP2400 |
37 * +-------------+ | bridge | | |
38 * SFP GBIC #2 ---+ | | FPGA | +---------+
39 * | PM3386 #1 +-------+ |
40 * | | +---------+
41 * +-------------+
42 * ^ ^ ^
43 * | 1.25Gbaud | 104MHz | 104MHz
44 * | SERDES ea. | SPI-3 ea. | SPI-3
45 *
46 ***********************************************************************/
47static struct ixp2400_msf_parameters enp2611_msf_parameters =
48{
49 .rx_mode = IXP2400_RX_MODE_UTOPIA_POS |
50 IXP2400_RX_MODE_1x32 |
51 IXP2400_RX_MODE_MPHY |
52 IXP2400_RX_MODE_MPHY_32 |
53 IXP2400_RX_MODE_MPHY_POLLED_STATUS |
54 IXP2400_RX_MODE_MPHY_LEVEL3 |
55 IXP2400_RX_MODE_RBUF_SIZE_64,
56
57 .rxclk01_multiplier = IXP2400_PLL_MULTIPLIER_16,
58
59 .rx_poll_ports = 3,
60
61 .rx_channel_mode = {
62 IXP2400_PORT_RX_MODE_MASTER |
63 IXP2400_PORT_RX_MODE_POS_PHY |
64 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
65 IXP2400_PORT_RX_MODE_ODD_PARITY |
66 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
67
68 IXP2400_PORT_RX_MODE_MASTER |
69 IXP2400_PORT_RX_MODE_POS_PHY |
70 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
71 IXP2400_PORT_RX_MODE_ODD_PARITY |
72 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
73
74 IXP2400_PORT_RX_MODE_MASTER |
75 IXP2400_PORT_RX_MODE_POS_PHY |
76 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
77 IXP2400_PORT_RX_MODE_ODD_PARITY |
78 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
79
80 IXP2400_PORT_RX_MODE_MASTER |
81 IXP2400_PORT_RX_MODE_POS_PHY |
82 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
83 IXP2400_PORT_RX_MODE_ODD_PARITY |
84 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE
85 },
86
87 .tx_mode = IXP2400_TX_MODE_UTOPIA_POS |
88 IXP2400_TX_MODE_1x32 |
89 IXP2400_TX_MODE_MPHY |
90 IXP2400_TX_MODE_MPHY_32 |
91 IXP2400_TX_MODE_MPHY_POLLED_STATUS |
92 IXP2400_TX_MODE_MPHY_LEVEL3 |
93 IXP2400_TX_MODE_TBUF_SIZE_64,
94
95 .txclk01_multiplier = IXP2400_PLL_MULTIPLIER_16,
96
97 .tx_poll_ports = 3,
98
99 .tx_channel_mode = {
100 IXP2400_PORT_TX_MODE_MASTER |
101 IXP2400_PORT_TX_MODE_POS_PHY |
102 IXP2400_PORT_TX_MODE_ODD_PARITY |
103 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
104
105 IXP2400_PORT_TX_MODE_MASTER |
106 IXP2400_PORT_TX_MODE_POS_PHY |
107 IXP2400_PORT_TX_MODE_ODD_PARITY |
108 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
109
110 IXP2400_PORT_TX_MODE_MASTER |
111 IXP2400_PORT_TX_MODE_POS_PHY |
112 IXP2400_PORT_TX_MODE_ODD_PARITY |
113 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
114
115 IXP2400_PORT_TX_MODE_MASTER |
116 IXP2400_PORT_TX_MODE_POS_PHY |
117 IXP2400_PORT_TX_MODE_ODD_PARITY |
118 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE
119 }
120};
121
122static struct net_device *nds[3];
123static struct timer_list link_check_timer;
124
125/* @@@ Poll the SFP moddef0 line too. */
126/* @@@ Try to use the pm3386 DOOL interrupt as well. */
127static void enp2611_check_link_status(unsigned long __dummy)
128{
129 int i;
130
131 for (i = 0; i < 3; i++) {
132 struct net_device *dev;
133 int status;
134
135 dev = nds[i];
136 if (dev == NULL)
137 continue;
138
139 status = pm3386_is_link_up(i);
140 if (status && !netif_carrier_ok(dev)) {
141 /* @@@ Should report autonegotiation status. */
142 printk(KERN_INFO "%s: NIC Link is Up\n", dev->name);
143
144 pm3386_enable_tx(i);
145 caleb_enable_tx(i);
146 netif_carrier_on(dev);
147 } else if (!status && netif_carrier_ok(dev)) {
148 printk(KERN_INFO "%s: NIC Link is Down\n", dev->name);
149
150 netif_carrier_off(dev);
151 caleb_disable_tx(i);
152 pm3386_disable_tx(i);
153 }
154 }
155
156 link_check_timer.expires = jiffies + HZ / 10;
157 add_timer(&link_check_timer);
158}
159
160static void enp2611_set_port_admin_status(int port, int up)
161{
162 if (up) {
163 caleb_enable_rx(port);
164
165 pm3386_set_carrier(port, 1);
166 pm3386_enable_rx(port);
167 } else {
168 caleb_disable_tx(port);
169 pm3386_disable_tx(port);
170 /* @@@ Flush out pending packets. */
171 pm3386_set_carrier(port, 0);
172
173 pm3386_disable_rx(port);
174 caleb_disable_rx(port);
175 }
176}
177
178static int __init enp2611_init_module(void)
179{
180 int ports;
181 int i;
182
183 if (!machine_is_enp2611())
184 return -ENODEV;
185
186 caleb_reset();
187 pm3386_reset();
188
189 ports = pm3386_port_count();
190 for (i = 0; i < ports; i++) {
191 nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv));
192 if (nds[i] == NULL) {
193 while (--i >= 0)
194 free_netdev(nds[i]);
195 return -ENOMEM;
196 }
197
198 pm3386_init_port(i);
199 pm3386_get_mac(i, nds[i]->dev_addr);
200 }
201
202 ixp2400_msf_init(&enp2611_msf_parameters);
203
204 if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
205 for (i = 0; i < ports; i++)
206 if (nds[i])
207 free_netdev(nds[i]);
208 return -EINVAL;
209 }
210
211 init_timer(&link_check_timer);
212 link_check_timer.function = enp2611_check_link_status;
213 link_check_timer.expires = jiffies;
214 add_timer(&link_check_timer);
215
216 return 0;
217}
218
219static void __exit enp2611_cleanup_module(void)
220{
221 int i;
222
223 del_timer_sync(&link_check_timer);
224
225 ixpdev_deinit();
226 for (i = 0; i < 3; i++)
227 free_netdev(nds[i]);
228}
229
230module_init(enp2611_init_module);
231module_exit(enp2611_cleanup_module);
232MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c
deleted file mode 100644
index f5ffd7e05d26..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c
+++ /dev/null
@@ -1,212 +0,0 @@
1/*
2 * Generic library functions for the MSF (Media and Switch Fabric) unit
3 * found on the Intel IXP2400 network processor.
4 *
5 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
6 * Dedicated to Marija Kulikova.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as
10 * published by the Free Software Foundation; either version 2.1 of the
11 * License, or (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <mach/hardware.h>
17#include <mach/ixp2000-regs.h>
18#include <asm/delay.h>
19#include <asm/io.h>
20#include "ixp2400-msf.h"
21
22/*
23 * This is the Intel recommended PLL init procedure as described on
24 * page 340 of the IXP2400/IXP2800 Programmer's Reference Manual.
25 */
26static void ixp2400_pll_init(struct ixp2400_msf_parameters *mp)
27{
28 int rx_dual_clock;
29 int tx_dual_clock;
30 u32 value;
31
32 /*
33 * If the RX mode is not 1x32, we have to enable both RX PLLs
34 * (#0 and #1.) The same thing for the TX direction.
35 */
36 rx_dual_clock = !!(mp->rx_mode & IXP2400_RX_MODE_WIDTH_MASK);
37 tx_dual_clock = !!(mp->tx_mode & IXP2400_TX_MODE_WIDTH_MASK);
38
39 /*
40 * Read initial value.
41 */
42 value = ixp2000_reg_read(IXP2000_MSF_CLK_CNTRL);
43
44 /*
45 * Put PLLs in powerdown and bypass mode.
46 */
47 value |= 0x0000f0f0;
48 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
49
50 /*
51 * Set single or dual clock mode bits.
52 */
53 value &= ~0x03000000;
54 value |= (rx_dual_clock << 24) | (tx_dual_clock << 25);
55
56 /*
57 * Set multipliers.
58 */
59 value &= ~0x00ff0000;
60 value |= mp->rxclk01_multiplier << 16;
61 value |= mp->rxclk23_multiplier << 18;
62 value |= mp->txclk01_multiplier << 20;
63 value |= mp->txclk23_multiplier << 22;
64
65 /*
66 * And write value.
67 */
68 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
69
70 /*
71 * Disable PLL bypass mode.
72 */
73 value &= ~(0x00005000 | rx_dual_clock << 13 | tx_dual_clock << 15);
74 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
75
76 /*
77 * Turn on PLLs.
78 */
79 value &= ~(0x00000050 | rx_dual_clock << 5 | tx_dual_clock << 7);
80 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
81
82 /*
83 * Wait for PLLs to lock. There are lock status bits, but IXP2400
84 * erratum #65 says that these lock bits should not be relied upon
85 * as they might not accurately reflect the true state of the PLLs.
86 */
87 udelay(100);
88}
89
90/*
91 * Needed according to p480 of Programmer's Reference Manual.
92 */
93static void ixp2400_msf_free_rbuf_entries(struct ixp2400_msf_parameters *mp)
94{
95 int size_bits;
96 int i;
97
98 /*
99 * Work around IXP2400 erratum #69 (silent RBUF-to-DRAM transfer
100 * corruption) in the Intel-recommended way: do not add the RBUF
101 * elements susceptible to corruption to the freelist.
102 */
103 size_bits = mp->rx_mode & IXP2400_RX_MODE_RBUF_SIZE_MASK;
104 if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_64) {
105 for (i = 1; i < 128; i++) {
106 if (i == 9 || i == 18 || i == 27)
107 continue;
108 ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
109 }
110 } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_128) {
111 for (i = 1; i < 64; i++) {
112 if (i == 4 || i == 9 || i == 13)
113 continue;
114 ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
115 }
116 } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_256) {
117 for (i = 1; i < 32; i++) {
118 if (i == 2 || i == 4 || i == 6)
119 continue;
120 ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
121 }
122 }
123}
124
125static u32 ixp2400_msf_valid_channels(u32 reg)
126{
127 u32 channels;
128
129 channels = 0;
130 switch (reg & IXP2400_RX_MODE_WIDTH_MASK) {
131 case IXP2400_RX_MODE_1x32:
132 channels = 0x1;
133 if (reg & IXP2400_RX_MODE_MPHY &&
134 !(reg & IXP2400_RX_MODE_MPHY_32))
135 channels = 0xf;
136 break;
137
138 case IXP2400_RX_MODE_2x16:
139 channels = 0x5;
140 break;
141
142 case IXP2400_RX_MODE_4x8:
143 channels = 0xf;
144 break;
145
146 case IXP2400_RX_MODE_1x16_2x8:
147 channels = 0xd;
148 break;
149 }
150
151 return channels;
152}
153
154static void ixp2400_msf_enable_rx(struct ixp2400_msf_parameters *mp)
155{
156 u32 value;
157
158 value = ixp2000_reg_read(IXP2000_MSF_RX_CONTROL) & 0x0fffffff;
159 value |= ixp2400_msf_valid_channels(mp->rx_mode) << 28;
160 ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, value);
161}
162
163static void ixp2400_msf_enable_tx(struct ixp2400_msf_parameters *mp)
164{
165 u32 value;
166
167 value = ixp2000_reg_read(IXP2000_MSF_TX_CONTROL) & 0x0fffffff;
168 value |= ixp2400_msf_valid_channels(mp->tx_mode) << 28;
169 ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, value);
170}
171
172
173void ixp2400_msf_init(struct ixp2400_msf_parameters *mp)
174{
175 u32 value;
176 int i;
177
178 /*
179 * Init the RX/TX PLLs based on the passed parameter block.
180 */
181 ixp2400_pll_init(mp);
182
183 /*
184 * Reset MSF. Bit 7 in IXP_RESET_0 resets the MSF.
185 */
186 value = ixp2000_reg_read(IXP2000_RESET0);
187 ixp2000_reg_write(IXP2000_RESET0, value | 0x80);
188 ixp2000_reg_write(IXP2000_RESET0, value & ~0x80);
189
190 /*
191 * Initialise the RX section.
192 */
193 ixp2000_reg_write(IXP2000_MSF_RX_MPHY_POLL_LIMIT, mp->rx_poll_ports - 1);
194 ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, mp->rx_mode);
195 for (i = 0; i < 4; i++) {
196 ixp2000_reg_write(IXP2000_MSF_RX_UP_CONTROL_0 + i,
197 mp->rx_channel_mode[i]);
198 }
199 ixp2400_msf_free_rbuf_entries(mp);
200 ixp2400_msf_enable_rx(mp);
201
202 /*
203 * Initialise the TX section.
204 */
205 ixp2000_reg_write(IXP2000_MSF_TX_MPHY_POLL_LIMIT, mp->tx_poll_ports - 1);
206 ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, mp->tx_mode);
207 for (i = 0; i < 4; i++) {
208 ixp2000_reg_write(IXP2000_MSF_TX_UP_CONTROL_0 + i,
209 mp->tx_channel_mode[i]);
210 }
211 ixp2400_msf_enable_tx(mp);
212}
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h
deleted file mode 100644
index 3ac1af2771da..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * Generic library functions for the MSF (Media and Switch Fabric) unit
3 * found on the Intel IXP2400 network processor.
4 *
5 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
6 * Dedicated to Marija Kulikova.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as
10 * published by the Free Software Foundation; either version 2.1 of the
11 * License, or (at your option) any later version.
12 */
13
14#ifndef __IXP2400_MSF_H
15#define __IXP2400_MSF_H
16
17struct ixp2400_msf_parameters
18{
19 u32 rx_mode;
20 unsigned rxclk01_multiplier:2;
21 unsigned rxclk23_multiplier:2;
22 unsigned rx_poll_ports:6;
23 u32 rx_channel_mode[4];
24
25 u32 tx_mode;
26 unsigned txclk01_multiplier:2;
27 unsigned txclk23_multiplier:2;
28 unsigned tx_poll_ports:6;
29 u32 tx_channel_mode[4];
30};
31
32void ixp2400_msf_init(struct ixp2400_msf_parameters *mp);
33
34#define IXP2400_PLL_MULTIPLIER_48 0x00
35#define IXP2400_PLL_MULTIPLIER_24 0x01
36#define IXP2400_PLL_MULTIPLIER_16 0x02
37#define IXP2400_PLL_MULTIPLIER_12 0x03
38
39#define IXP2400_RX_MODE_CSIX 0x00400000
40#define IXP2400_RX_MODE_UTOPIA_POS 0x00000000
41#define IXP2400_RX_MODE_WIDTH_MASK 0x00300000
42#define IXP2400_RX_MODE_1x16_2x8 0x00300000
43#define IXP2400_RX_MODE_4x8 0x00200000
44#define IXP2400_RX_MODE_2x16 0x00100000
45#define IXP2400_RX_MODE_1x32 0x00000000
46#define IXP2400_RX_MODE_MPHY 0x00080000
47#define IXP2400_RX_MODE_SPHY 0x00000000
48#define IXP2400_RX_MODE_MPHY_32 0x00040000
49#define IXP2400_RX_MODE_MPHY_4 0x00000000
50#define IXP2400_RX_MODE_MPHY_POLLED_STATUS 0x00020000
51#define IXP2400_RX_MODE_MPHY_DIRECT_STATUS 0x00000000
52#define IXP2400_RX_MODE_CBUS_FULL_DUPLEX 0x00010000
53#define IXP2400_RX_MODE_CBUS_SIMPLEX 0x00000000
54#define IXP2400_RX_MODE_MPHY_LEVEL2 0x00004000
55#define IXP2400_RX_MODE_MPHY_LEVEL3 0x00000000
56#define IXP2400_RX_MODE_CBUS_8BIT 0x00002000
57#define IXP2400_RX_MODE_CBUS_4BIT 0x00000000
58#define IXP2400_RX_MODE_CSIX_SINGLE_FREELIST 0x00000200
59#define IXP2400_RX_MODE_CSIX_SPLIT_FREELISTS 0x00000000
60#define IXP2400_RX_MODE_RBUF_SIZE_MASK 0x0000000c
61#define IXP2400_RX_MODE_RBUF_SIZE_256 0x00000008
62#define IXP2400_RX_MODE_RBUF_SIZE_128 0x00000004
63#define IXP2400_RX_MODE_RBUF_SIZE_64 0x00000000
64
65#define IXP2400_PORT_RX_MODE_SLAVE 0x00000040
66#define IXP2400_PORT_RX_MODE_MASTER 0x00000000
67#define IXP2400_PORT_RX_MODE_POS_PHY_L3 0x00000020
68#define IXP2400_PORT_RX_MODE_POS_PHY_L2 0x00000000
69#define IXP2400_PORT_RX_MODE_POS_PHY 0x00000010
70#define IXP2400_PORT_RX_MODE_UTOPIA 0x00000000
71#define IXP2400_PORT_RX_MODE_EVEN_PARITY 0x0000000c
72#define IXP2400_PORT_RX_MODE_ODD_PARITY 0x00000008
73#define IXP2400_PORT_RX_MODE_NO_PARITY 0x00000000
74#define IXP2400_PORT_RX_MODE_UTOPIA_BIG_CELLS 0x00000002
75#define IXP2400_PORT_RX_MODE_UTOPIA_NORMAL_CELLS 0x00000000
76#define IXP2400_PORT_RX_MODE_2_CYCLE_DECODE 0x00000001
77#define IXP2400_PORT_RX_MODE_1_CYCLE_DECODE 0x00000000
78
79#define IXP2400_TX_MODE_CSIX 0x00400000
80#define IXP2400_TX_MODE_UTOPIA_POS 0x00000000
81#define IXP2400_TX_MODE_WIDTH_MASK 0x00300000
82#define IXP2400_TX_MODE_1x16_2x8 0x00300000
83#define IXP2400_TX_MODE_4x8 0x00200000
84#define IXP2400_TX_MODE_2x16 0x00100000
85#define IXP2400_TX_MODE_1x32 0x00000000
86#define IXP2400_TX_MODE_MPHY 0x00080000
87#define IXP2400_TX_MODE_SPHY 0x00000000
88#define IXP2400_TX_MODE_MPHY_32 0x00040000
89#define IXP2400_TX_MODE_MPHY_4 0x00000000
90#define IXP2400_TX_MODE_MPHY_POLLED_STATUS 0x00020000
91#define IXP2400_TX_MODE_MPHY_DIRECT_STATUS 0x00000000
92#define IXP2400_TX_MODE_CBUS_FULL_DUPLEX 0x00010000
93#define IXP2400_TX_MODE_CBUS_SIMPLEX 0x00000000
94#define IXP2400_TX_MODE_MPHY_LEVEL2 0x00004000
95#define IXP2400_TX_MODE_MPHY_LEVEL3 0x00000000
96#define IXP2400_TX_MODE_CBUS_8BIT 0x00002000
97#define IXP2400_TX_MODE_CBUS_4BIT 0x00000000
98#define IXP2400_TX_MODE_TBUF_SIZE_MASK 0x0000000c
99#define IXP2400_TX_MODE_TBUF_SIZE_256 0x00000008
100#define IXP2400_TX_MODE_TBUF_SIZE_128 0x00000004
101#define IXP2400_TX_MODE_TBUF_SIZE_64 0x00000000
102
103#define IXP2400_PORT_TX_MODE_SLAVE 0x00000040
104#define IXP2400_PORT_TX_MODE_MASTER 0x00000000
105#define IXP2400_PORT_TX_MODE_POS_PHY 0x00000010
106#define IXP2400_PORT_TX_MODE_UTOPIA 0x00000000
107#define IXP2400_PORT_TX_MODE_EVEN_PARITY 0x0000000c
108#define IXP2400_PORT_TX_MODE_ODD_PARITY 0x00000008
109#define IXP2400_PORT_TX_MODE_NO_PARITY 0x00000000
110#define IXP2400_PORT_TX_MODE_UTOPIA_BIG_CELLS 0x00000002
111#define IXP2400_PORT_TX_MODE_2_CYCLE_DECODE 0x00000001
112#define IXP2400_PORT_TX_MODE_1_CYCLE_DECODE 0x00000000
113
114
115#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc
deleted file mode 100644
index 42a73e357afa..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc
+++ /dev/null
@@ -1,408 +0,0 @@
1/*
2 * RX ucode for the Intel IXP2400 in POS-PHY mode.
3 * Copyright (C) 2004, 2005 Lennert Buytenhek
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * Assumptions made in this code:
12 * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
13 * only one full element list is used. This includes, for example,
14 * 1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4. (This
15 * is not an exhaustive list.)
16 * - The RBUF uses 64-byte mpackets.
17 * - RX descriptors reside in SRAM, and have the following format:
18 * struct rx_desc
19 * {
20 * // to uengine
21 * u32 buf_phys_addr;
22 * u32 buf_length;
23 *
24 * // from uengine
25 * u32 channel;
26 * u32 pkt_length;
27 * };
28 * - Packet data resides in DRAM.
29 * - Packet buffer addresses are 8-byte aligned.
30 * - Scratch ring 0 is rx_pending.
31 * - Scratch ring 1 is rx_done, and has status condition 'full'.
32 * - The host triggers rx_done flush and rx_pending refill on seeing INTA.
33 * - This code is run on all eight threads of the microengine it runs on.
34 *
35 * Local memory is used for per-channel RX state.
36 */
37
38#define RX_THREAD_FREELIST_0 0x0030
39#define RBUF_ELEMENT_DONE 0x0044
40
41#define CHANNEL_FLAGS *l$index0[0]
42#define CHANNEL_FLAG_RECEIVING 1
43#define PACKET_LENGTH *l$index0[1]
44#define PACKET_CHECKSUM *l$index0[2]
45#define BUFFER_HANDLE *l$index0[3]
46#define BUFFER_START *l$index0[4]
47#define BUFFER_LENGTH *l$index0[5]
48
49#define CHANNEL_STATE_SIZE 24 // in bytes
50#define CHANNEL_STATE_SHIFT 5 // ceil(log2(state size))
51
52
53 .sig volatile sig1
54 .sig volatile sig2
55 .sig volatile sig3
56
57 .sig mpacket_arrived
58 .reg add_to_rx_freelist
59 .reg read $rsw0, $rsw1
60 .xfer_order $rsw0 $rsw1
61
62 .reg zero
63
64 /*
65 * Initialise add_to_rx_freelist.
66 */
67 .begin
68 .reg temp
69 .reg temp2
70
71 immed[add_to_rx_freelist, RX_THREAD_FREELIST_0]
72 immed_w1[add_to_rx_freelist, (&$rsw0 | (&mpacket_arrived << 12))]
73
74 local_csr_rd[ACTIVE_CTX_STS]
75 immed[temp, 0]
76 alu[temp2, temp, and, 0x1f]
77 alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<20]
78 alu[temp2, temp, and, 0x80]
79 alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<18]
80 .end
81
82 immed[zero, 0]
83
84 /*
85 * Skip context 0 initialisation?
86 */
87 .begin
88 br!=ctx[0, mpacket_receive_loop#]
89 .end
90
91 /*
92 * Initialise local memory.
93 */
94 .begin
95 .reg addr
96 .reg temp
97
98 immed[temp, 0]
99 init_local_mem_loop#:
100 alu_shf[addr, --, b, temp, <<CHANNEL_STATE_SHIFT]
101 local_csr_wr[ACTIVE_LM_ADDR_0, addr]
102 nop
103 nop
104 nop
105
106 immed[CHANNEL_FLAGS, 0]
107
108 alu[temp, temp, +, 1]
109 alu[--, temp, and, 0x20]
110 beq[init_local_mem_loop#]
111 .end
112
113 /*
114 * Initialise signal pipeline.
115 */
116 .begin
117 local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
118 .set_sig sig1
119
120 local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
121 .set_sig sig2
122
123 local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
124 .set_sig sig3
125 .end
126
127mpacket_receive_loop#:
128 /*
129 * Synchronise and wait for mpacket.
130 */
131 .begin
132 ctx_arb[sig1]
133 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
134
135 msf[fast_wr, --, add_to_rx_freelist, 0]
136 .set_sig mpacket_arrived
137 ctx_arb[mpacket_arrived]
138 .set $rsw0 $rsw1
139 .end
140
141 /*
142 * We halt if we see {inbparerr,parerr,null,soperror}.
143 */
144 .begin
145 alu_shf[--, 0x1b, and, $rsw0, >>8]
146 bne[abort_rswerr#]
147 .end
148
149 /*
150 * Point local memory pointer to this channel's state area.
151 */
152 .begin
153 .reg chanaddr
154
155 alu[chanaddr, $rsw0, and, 0x1f]
156 alu_shf[chanaddr, --, b, chanaddr, <<CHANNEL_STATE_SHIFT]
157 local_csr_wr[ACTIVE_LM_ADDR_0, chanaddr]
158 nop
159 nop
160 nop
161 .end
162
163 /*
164 * Check whether we received a SOP mpacket while we were already
165 * working on a packet, or a non-SOP mpacket while there was no
166 * packet pending. (SOP == RECEIVING -> abort) If everything's
167 * okay, update the RECEIVING flag to reflect our new state.
168 */
169 .begin
170 .reg temp
171 .reg eop
172
173 #if CHANNEL_FLAG_RECEIVING != 1
174 #error CHANNEL_FLAG_RECEIVING is not 1
175 #endif
176
177 alu_shf[temp, 1, and, $rsw0, >>15]
178 alu[temp, temp, xor, CHANNEL_FLAGS]
179 alu[--, temp, and, CHANNEL_FLAG_RECEIVING]
180 beq[abort_proterr#]
181
182 alu_shf[eop, 1, and, $rsw0, >>14]
183 alu[CHANNEL_FLAGS, temp, xor, eop]
184 .end
185
186 /*
187 * Copy the mpacket into the right spot, and in case of EOP,
188 * write back the descriptor and pass the packet on.
189 */
190 .begin
191 .reg buffer_offset
192 .reg _packet_length
193 .reg _packet_checksum
194 .reg _buffer_handle
195 .reg _buffer_start
196 .reg _buffer_length
197
198 /*
199 * Determine buffer_offset, _packet_length and
200 * _packet_checksum.
201 */
202 .begin
203 .reg temp
204
205 alu[--, 1, and, $rsw0, >>15]
206 beq[not_sop#]
207
208 immed[PACKET_LENGTH, 0]
209 immed[PACKET_CHECKSUM, 0]
210
211 not_sop#:
212 alu[buffer_offset, --, b, PACKET_LENGTH]
213 alu_shf[temp, 0xff, and, $rsw0, >>16]
214 alu[_packet_length, buffer_offset, +, temp]
215 alu[PACKET_LENGTH, --, b, _packet_length]
216
217 immed[temp, 0xffff]
218 alu[temp, $rsw1, and, temp]
219 alu[_packet_checksum, PACKET_CHECKSUM, +, temp]
220 alu[PACKET_CHECKSUM, --, b, _packet_checksum]
221 .end
222
223 /*
224 * Allocate buffer in case of SOP.
225 */
226 .begin
227 .reg temp
228
229 alu[temp, 1, and, $rsw0, >>15]
230 beq[skip_buffer_alloc#]
231
232 .begin
233 .sig zzz
234 .reg read $stemp $stemp2
235 .xfer_order $stemp $stemp2
236
237 rx_nobufs#:
238 scratch[get, $stemp, zero, 0, 1], ctx_swap[zzz]
239 alu[_buffer_handle, --, b, $stemp]
240 beq[rx_nobufs#]
241
242 sram[read, $stemp, _buffer_handle, 0, 2],
243 ctx_swap[zzz]
244 alu[_buffer_start, --, b, $stemp]
245 alu[_buffer_length, --, b, $stemp2]
246 .end
247
248 skip_buffer_alloc#:
249 .end
250
251 /*
252 * Resynchronise.
253 */
254 .begin
255 ctx_arb[sig2]
256 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
257 .end
258
259 /*
260 * Synchronise buffer state.
261 */
262 .begin
263 .reg temp
264
265 alu[temp, 1, and, $rsw0, >>15]
266 beq[copy_from_local_mem#]
267
268 alu[BUFFER_HANDLE, --, b, _buffer_handle]
269 alu[BUFFER_START, --, b, _buffer_start]
270 alu[BUFFER_LENGTH, --, b, _buffer_length]
271 br[sync_state_done#]
272
273 copy_from_local_mem#:
274 alu[_buffer_handle, --, b, BUFFER_HANDLE]
275 alu[_buffer_start, --, b, BUFFER_START]
276 alu[_buffer_length, --, b, BUFFER_LENGTH]
277
278 sync_state_done#:
279 .end
280
281#if 0
282 /*
283 * Debug buffer state management.
284 */
285 .begin
286 .reg temp
287
288 alu[temp, 1, and, $rsw0, >>14]
289 beq[no_poison#]
290 immed[BUFFER_HANDLE, 0xdead]
291 immed[BUFFER_START, 0xdead]
292 immed[BUFFER_LENGTH, 0xdead]
293 no_poison#:
294
295 immed[temp, 0xdead]
296 alu[--, _buffer_handle, -, temp]
297 beq[state_corrupted#]
298 alu[--, _buffer_start, -, temp]
299 beq[state_corrupted#]
300 alu[--, _buffer_length, -, temp]
301 beq[state_corrupted#]
302 .end
303#endif
304
305 /*
306 * Check buffer length.
307 */
308 .begin
309 alu[--, _buffer_length, -, _packet_length]
310 blo[buffer_overflow#]
311 .end
312
313 /*
314 * Copy the mpacket and give back the RBUF element.
315 */
316 .begin
317 .reg element
318 .reg xfer_size
319 .reg temp
320 .sig copy_sig
321
322 alu_shf[element, 0x7f, and, $rsw0, >>24]
323 alu_shf[xfer_size, 0xff, and, $rsw0, >>16]
324
325 alu[xfer_size, xfer_size, -, 1]
326 alu_shf[xfer_size, 0x10, or, xfer_size, >>3]
327 alu_shf[temp, 0x10, or, xfer_size, <<21]
328 alu_shf[temp, temp, or, element, <<11]
329 alu_shf[--, temp, or, 1, <<18]
330
331 dram[rbuf_rd, --, _buffer_start, buffer_offset, max_8],
332 indirect_ref, sig_done[copy_sig]
333 ctx_arb[copy_sig]
334
335 alu[temp, RBUF_ELEMENT_DONE, or, element, <<16]
336 msf[fast_wr, --, temp, 0]
337 .end
338
339 /*
340 * If EOP, write back the packet descriptor.
341 */
342 .begin
343 .reg write $stemp $stemp2
344 .xfer_order $stemp $stemp2
345 .sig zzz
346
347 alu_shf[--, 1, and, $rsw0, >>14]
348 beq[no_writeback#]
349
350 alu[$stemp, $rsw0, and, 0x1f]
351 alu[$stemp2, --, b, _packet_length]
352 sram[write, $stemp, _buffer_handle, 8, 2], ctx_swap[zzz]
353
354 no_writeback#:
355 .end
356
357 /*
358 * Resynchronise.
359 */
360 .begin
361 ctx_arb[sig3]
362 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
363 .end
364
365 /*
366 * If EOP, put the buffer back onto the scratch ring.
367 */
368 .begin
369 .reg write $stemp
370 .sig zzz
371
372 br_inp_state[SCR_Ring1_Status, rx_done_ring_overflow#]
373
374 alu_shf[--, 1, and, $rsw0, >>14]
375 beq[mpacket_receive_loop#]
376
377 alu[--, 1, and, $rsw0, >>10]
378 bne[rxerr#]
379
380 alu[$stemp, --, b, _buffer_handle]
381 scratch[put, $stemp, zero, 4, 1], ctx_swap[zzz]
382 cap[fast_wr, 0, XSCALE_INT_A]
383 br[mpacket_receive_loop#]
384
385 rxerr#:
386 alu[$stemp, --, b, _buffer_handle]
387 scratch[put, $stemp, zero, 0, 1], ctx_swap[zzz]
388 br[mpacket_receive_loop#]
389 .end
390 .end
391
392
393abort_rswerr#:
394 halt
395
396abort_proterr#:
397 halt
398
399state_corrupted#:
400 halt
401
402buffer_overflow#:
403 halt
404
405rx_done_ring_overflow#:
406 halt
407
408
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode
deleted file mode 100644
index e8aee2f81aad..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode
+++ /dev/null
@@ -1,130 +0,0 @@
1static struct ixp2000_uengine_code ixp2400_rx =
2{
3 .cpu_model_bitmask = 0x000003fe,
4 .cpu_min_revision = 0,
5 .cpu_max_revision = 255,
6
7 .uengine_parameters = IXP2000_UENGINE_8_CONTEXTS |
8 IXP2000_UENGINE_PRN_UPDATE_EVERY |
9 IXP2000_UENGINE_NN_FROM_PREVIOUS |
10 IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
11 IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
12 IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
13
14 .initial_reg_values = (struct ixp2000_reg_value []) {
15 { -1, -1 }
16 },
17
18 .num_insns = 109,
19 .insns = (u8 []) {
20 0xf0, 0x00, 0x0c, 0xc0, 0x05,
21 0xf4, 0x44, 0x0c, 0x00, 0x05,
22 0xfc, 0x04, 0x4c, 0x00, 0x00,
23 0xf0, 0x00, 0x00, 0x3b, 0x00,
24 0xb4, 0x40, 0xf0, 0x3b, 0x1f,
25 0x8a, 0xc0, 0x50, 0x3e, 0x05,
26 0xb4, 0x40, 0xf0, 0x3b, 0x80,
27 0x9a, 0xe0, 0x00, 0x3e, 0x05,
28 0xf0, 0x00, 0x00, 0x07, 0x00,
29 0xd8, 0x05, 0xc0, 0x00, 0x11,
30 0xf0, 0x00, 0x00, 0x0f, 0x00,
31 0x91, 0xb0, 0x20, 0x0e, 0x00,
32 0xfc, 0x06, 0x60, 0x0b, 0x00,
33 0xf0, 0x00, 0x0c, 0x03, 0x00,
34 0xf0, 0x00, 0x0c, 0x03, 0x00,
35 0xf0, 0x00, 0x0c, 0x03, 0x00,
36 0xf0, 0x00, 0x0c, 0x02, 0x00,
37 0xb0, 0xc0, 0x30, 0x0f, 0x01,
38 0xa4, 0x70, 0x00, 0x0f, 0x20,
39 0xd8, 0x02, 0xc0, 0x01, 0x00,
40 0xfc, 0x10, 0xac, 0x23, 0x08,
41 0xfc, 0x10, 0xac, 0x43, 0x10,
42 0xfc, 0x10, 0xac, 0x63, 0x18,
43 0xe0, 0x00, 0x00, 0x00, 0x02,
44 0xfc, 0x10, 0xae, 0x23, 0x88,
45 0x3d, 0x00, 0x04, 0x03, 0x20,
46 0xe0, 0x00, 0x00, 0x00, 0x10,
47 0x84, 0x82, 0x02, 0x01, 0x3b,
48 0xd8, 0x1a, 0x00, 0x01, 0x01,
49 0xb4, 0x00, 0x8c, 0x7d, 0x80,
50 0x91, 0xb0, 0x80, 0x22, 0x00,
51 0xfc, 0x06, 0x60, 0x23, 0x00,
52 0xf0, 0x00, 0x0c, 0x03, 0x00,
53 0xf0, 0x00, 0x0c, 0x03, 0x00,
54 0xf0, 0x00, 0x0c, 0x03, 0x00,
55 0x94, 0xf0, 0x92, 0x01, 0x21,
56 0xac, 0x40, 0x60, 0x26, 0x00,
57 0xa4, 0x30, 0x0c, 0x04, 0x06,
58 0xd8, 0x1a, 0x40, 0x01, 0x00,
59 0x94, 0xe0, 0xa2, 0x01, 0x21,
60 0xac, 0x20, 0x00, 0x28, 0x06,
61 0x84, 0xf2, 0x02, 0x01, 0x21,
62 0xd8, 0x0b, 0x40, 0x01, 0x00,
63 0xf0, 0x00, 0x0c, 0x02, 0x01,
64 0xf0, 0x00, 0x0c, 0x02, 0x02,
65 0xa0, 0x00, 0x08, 0x04, 0x00,
66 0x95, 0x00, 0xc6, 0x01, 0xff,
67 0xa0, 0x80, 0x10, 0x30, 0x00,
68 0xa0, 0x60, 0x1c, 0x00, 0x01,
69 0xf0, 0x0f, 0xf0, 0x33, 0xff,
70 0xb4, 0x00, 0xc0, 0x31, 0x81,
71 0xb0, 0x80, 0xb0, 0x32, 0x02,
72 0xa0, 0x20, 0x20, 0x2c, 0x00,
73 0x94, 0xf0, 0xd2, 0x01, 0x21,
74 0xd8, 0x0f, 0x40, 0x01, 0x00,
75 0x19, 0x40, 0x10, 0x04, 0x20,
76 0xa0, 0x00, 0x26, 0x04, 0x00,
77 0xd8, 0x0d, 0xc0, 0x01, 0x00,
78 0x00, 0x42, 0x10, 0x80, 0x02,
79 0xb0, 0x00, 0x46, 0x04, 0x00,
80 0xb0, 0x00, 0x56, 0x08, 0x00,
81 0xe0, 0x00, 0x00, 0x00, 0x04,
82 0xfc, 0x10, 0xae, 0x43, 0x90,
83 0x84, 0xf0, 0x32, 0x01, 0x21,
84 0xd8, 0x11, 0x40, 0x01, 0x00,
85 0xa0, 0x60, 0x3c, 0x00, 0x02,
86 0xa0, 0x20, 0x40, 0x10, 0x00,
87 0xa0, 0x20, 0x50, 0x14, 0x00,
88 0xd8, 0x12, 0x00, 0x00, 0x18,
89 0xa0, 0x00, 0x28, 0x0c, 0x00,
90 0xb0, 0x00, 0x48, 0x10, 0x00,
91 0xb0, 0x00, 0x58, 0x14, 0x00,
92 0xaa, 0xf0, 0x00, 0x14, 0x01,
93 0xd8, 0x1a, 0xc0, 0x01, 0x05,
94 0x85, 0x80, 0x42, 0x01, 0xff,
95 0x95, 0x00, 0x66, 0x01, 0xff,
96 0xba, 0xc0, 0x60, 0x1b, 0x01,
97 0x9a, 0x30, 0x60, 0x19, 0x30,
98 0x9a, 0xb0, 0x70, 0x1a, 0x30,
99 0x9b, 0x50, 0x78, 0x1e, 0x04,
100 0x8a, 0xe2, 0x08, 0x1e, 0x21,
101 0x6a, 0x4e, 0x00, 0x13, 0x00,
102 0xe0, 0x00, 0x00, 0x00, 0x30,
103 0x9b, 0x00, 0x7a, 0x92, 0x04,
104 0x3d, 0x00, 0x04, 0x1f, 0x20,
105 0x84, 0xe2, 0x02, 0x01, 0x21,
106 0xd8, 0x16, 0x80, 0x01, 0x00,
107 0xa4, 0x18, 0x0c, 0x7d, 0x80,
108 0xa0, 0x58, 0x1c, 0x00, 0x01,
109 0x01, 0x42, 0x00, 0xa0, 0x02,
110 0xe0, 0x00, 0x00, 0x00, 0x08,
111 0xfc, 0x10, 0xae, 0x63, 0x98,
112 0xd8, 0x1b, 0x00, 0xc2, 0x14,
113 0x84, 0xe2, 0x02, 0x01, 0x21,
114 0xd8, 0x05, 0xc0, 0x01, 0x00,
115 0x84, 0xa2, 0x02, 0x01, 0x21,
116 0xd8, 0x19, 0x40, 0x01, 0x01,
117 0xa0, 0x58, 0x0c, 0x00, 0x02,
118 0x1a, 0x40, 0x00, 0x04, 0x24,
119 0x33, 0x00, 0x01, 0x2f, 0x20,
120 0xd8, 0x05, 0xc0, 0x00, 0x18,
121 0xa0, 0x58, 0x0c, 0x00, 0x02,
122 0x1a, 0x40, 0x00, 0x04, 0x20,
123 0xd8, 0x05, 0xc0, 0x00, 0x18,
124 0xe0, 0x00, 0x02, 0x00, 0x00,
125 0xe0, 0x00, 0x02, 0x00, 0x00,
126 0xe0, 0x00, 0x02, 0x00, 0x00,
127 0xe0, 0x00, 0x02, 0x00, 0x00,
128 0xe0, 0x00, 0x02, 0x00, 0x00,
129 }
130};
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc
deleted file mode 100644
index d090d1884fb7..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc
+++ /dev/null
@@ -1,272 +0,0 @@
1/*
2 * TX ucode for the Intel IXP2400 in POS-PHY mode.
3 * Copyright (C) 2004, 2005 Lennert Buytenhek
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * Assumptions made in this code:
12 * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
13 * only one TBUF partition is used. This includes, for example,
14 * 1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4. (This
15 * is not an exhaustive list.)
16 * - The TBUF uses 64-byte mpackets.
17 * - TX descriptors reside in SRAM, and have the following format:
18 * struct tx_desc
19 * {
20 * // to uengine
21 * u32 buf_phys_addr;
22 * u32 pkt_length;
23 * u32 channel;
24 * };
25 * - Packet data resides in DRAM.
26 * - Packet buffer addresses are 8-byte aligned.
27 * - Scratch ring 2 is tx_pending.
28 * - Scratch ring 3 is tx_done, and has status condition 'full'.
29 * - This code is run on all eight threads of the microengine it runs on.
30 */
31
32#define TX_SEQUENCE_0 0x0060
33#define TBUF_CTRL 0x1800
34
35#define PARTITION_SIZE 128
36#define PARTITION_THRESH 96
37
38
39 .sig volatile sig1
40 .sig volatile sig2
41 .sig volatile sig3
42
43 .reg @old_tx_seq_0
44 .reg @mpkts_in_flight
45 .reg @next_tbuf_mpacket
46
47 .reg @buffer_handle
48 .reg @buffer_start
49 .reg @packet_length
50 .reg @channel
51 .reg @packet_offset
52
53 .reg zero
54
55 immed[zero, 0]
56
57 /*
58 * Skip context 0 initialisation?
59 */
60 .begin
61 br!=ctx[0, mpacket_tx_loop#]
62 .end
63
64 /*
65 * Wait until all pending TBUF elements have been transmitted.
66 */
67 .begin
68 .reg read $tx
69 .sig zzz
70
71 loop_empty#:
72 msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
73 alu_shf[--, --, b, $tx, >>31]
74 beq[loop_empty#]
75
76 alu[@old_tx_seq_0, --, b, $tx]
77 .end
78
79 immed[@mpkts_in_flight, 0]
80 alu[@next_tbuf_mpacket, @old_tx_seq_0, and, (PARTITION_SIZE - 1)]
81
82 immed[@buffer_handle, 0]
83
84 /*
85 * Initialise signal pipeline.
86 */
87 .begin
88 local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
89 .set_sig sig1
90
91 local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
92 .set_sig sig2
93
94 local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
95 .set_sig sig3
96 .end
97
98mpacket_tx_loop#:
99 .begin
100 .reg tbuf_element_index
101 .reg buffer_handle
102 .reg sop_eop
103 .reg packet_data
104 .reg channel
105 .reg mpacket_size
106
107 /*
108 * If there is no packet currently being transmitted,
109 * dequeue the next TX descriptor, and fetch the buffer
110 * address, packet length and destination channel number.
111 */
112 .begin
113 .reg read $stemp $stemp2 $stemp3
114 .xfer_order $stemp $stemp2 $stemp3
115 .sig zzz
116
117 ctx_arb[sig1]
118
119 alu[--, --, b, @buffer_handle]
120 bne[already_got_packet#]
121
122 tx_nobufs#:
123 scratch[get, $stemp, zero, 8, 1], ctx_swap[zzz]
124 alu[@buffer_handle, --, b, $stemp]
125 beq[tx_nobufs#]
126
127 sram[read, $stemp, $stemp, 0, 3], ctx_swap[zzz]
128 alu[@buffer_start, --, b, $stemp]
129 alu[@packet_length, --, b, $stemp2]
130 beq[zero_byte_packet#]
131 alu[@channel, --, b, $stemp3]
132 immed[@packet_offset, 0]
133
134 already_got_packet#:
135 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
136 .end
137
138 /*
139 * Determine tbuf element index, SOP/EOP flags, mpacket
140 * offset and mpacket size and cache buffer_handle and
141 * channel number.
142 */
143 .begin
144 alu[tbuf_element_index, --, b, @next_tbuf_mpacket]
145 alu[@next_tbuf_mpacket, @next_tbuf_mpacket, +, 1]
146 alu[@next_tbuf_mpacket, @next_tbuf_mpacket, and,
147 (PARTITION_SIZE - 1)]
148
149 alu[buffer_handle, --, b, @buffer_handle]
150 immed[@buffer_handle, 0]
151
152 immed[sop_eop, 1]
153
154 alu[packet_data, --, b, @packet_offset]
155 bne[no_sop#]
156 alu[sop_eop, sop_eop, or, 2]
157 no_sop#:
158 alu[packet_data, packet_data, +, @buffer_start]
159
160 alu[channel, --, b, @channel]
161
162 alu[mpacket_size, @packet_length, -, @packet_offset]
163 alu[--, 64, -, mpacket_size]
164 bhs[eop#]
165 alu[@buffer_handle, --, b, buffer_handle]
166 immed[mpacket_size, 64]
167 alu[sop_eop, sop_eop, and, 2]
168 eop#:
169
170 alu[@packet_offset, @packet_offset, +, mpacket_size]
171 .end
172
173 /*
174 * Wait until there's enough space in the TBUF.
175 */
176 .begin
177 .reg read $tx
178 .reg temp
179 .sig zzz
180
181 ctx_arb[sig2]
182
183 br[test_space#]
184
185 loop_space#:
186 msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
187
188 alu[temp, $tx, -, @old_tx_seq_0]
189 alu[temp, temp, and, 0xff]
190 alu[@mpkts_in_flight, @mpkts_in_flight, -, temp]
191
192 alu[@old_tx_seq_0, --, b, $tx]
193
194 test_space#:
195 alu[--, PARTITION_THRESH, -, @mpkts_in_flight]
196 blo[loop_space#]
197
198 alu[@mpkts_in_flight, @mpkts_in_flight, +, 1]
199
200 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
201 .end
202
203 /*
204 * Copy the packet data to the TBUF.
205 */
206 .begin
207 .reg temp
208 .sig copy_sig
209
210 alu[temp, mpacket_size, -, 1]
211 alu_shf[temp, 0x10, or, temp, >>3]
212 alu_shf[temp, 0x10, or, temp, <<21]
213 alu_shf[temp, temp, or, tbuf_element_index, <<11]
214 alu_shf[--, temp, or, 1, <<18]
215
216 dram[tbuf_wr, --, packet_data, 0, max_8],
217 indirect_ref, sig_done[copy_sig]
218 ctx_arb[copy_sig]
219 .end
220
221 /*
222 * Mark TBUF element as ready-to-be-transmitted.
223 */
224 .begin
225 .reg write $tsw $tsw2
226 .xfer_order $tsw $tsw2
227 .reg temp
228 .sig zzz
229
230 alu_shf[temp, channel, or, mpacket_size, <<24]
231 alu_shf[$tsw, temp, or, sop_eop, <<8]
232 immed[$tsw2, 0]
233
234 immed[temp, TBUF_CTRL]
235 alu_shf[temp, temp, or, tbuf_element_index, <<3]
236 msf[write, $tsw, temp, 0, 2], ctx_swap[zzz]
237 .end
238
239 /*
240 * Resynchronise.
241 */
242 .begin
243 ctx_arb[sig3]
244 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
245 .end
246
247 /*
248 * If this was an EOP mpacket, recycle the TX buffer
249 * and signal the host.
250 */
251 .begin
252 .reg write $stemp
253 .sig zzz
254
255 alu[--, sop_eop, and, 1]
256 beq[mpacket_tx_loop#]
257
258 tx_done_ring_full#:
259 br_inp_state[SCR_Ring3_Status, tx_done_ring_full#]
260
261 alu[$stemp, --, b, buffer_handle]
262 scratch[put, $stemp, zero, 12, 1], ctx_swap[zzz]
263 cap[fast_wr, 0, XSCALE_INT_A]
264 br[mpacket_tx_loop#]
265 .end
266 .end
267
268
269zero_byte_packet#:
270 halt
271
272
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode
deleted file mode 100644
index a433e24b0a51..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode
+++ /dev/null
@@ -1,98 +0,0 @@
1static struct ixp2000_uengine_code ixp2400_tx =
2{
3 .cpu_model_bitmask = 0x000003fe,
4 .cpu_min_revision = 0,
5 .cpu_max_revision = 255,
6
7 .uengine_parameters = IXP2000_UENGINE_8_CONTEXTS |
8 IXP2000_UENGINE_PRN_UPDATE_EVERY |
9 IXP2000_UENGINE_NN_FROM_PREVIOUS |
10 IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
11 IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
12 IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
13
14 .initial_reg_values = (struct ixp2000_reg_value []) {
15 { -1, -1 }
16 },
17
18 .num_insns = 77,
19 .insns = (u8 []) {
20 0xf0, 0x00, 0x00, 0x07, 0x00,
21 0xd8, 0x03, 0x00, 0x00, 0x11,
22 0x3c, 0x40, 0x00, 0x04, 0xe0,
23 0x81, 0xf2, 0x02, 0x01, 0x00,
24 0xd8, 0x00, 0x80, 0x01, 0x00,
25 0xb0, 0x08, 0x06, 0x00, 0x00,
26 0xf0, 0x00, 0x0c, 0x00, 0x80,
27 0xb4, 0x49, 0x02, 0x03, 0x7f,
28 0xf0, 0x00, 0x02, 0x83, 0x00,
29 0xfc, 0x10, 0xac, 0x23, 0x08,
30 0xfc, 0x10, 0xac, 0x43, 0x10,
31 0xfc, 0x10, 0xac, 0x63, 0x18,
32 0xe0, 0x00, 0x00, 0x00, 0x02,
33 0xa0, 0x30, 0x02, 0x80, 0x00,
34 0xd8, 0x06, 0x00, 0x01, 0x01,
35 0x19, 0x40, 0x00, 0x04, 0x28,
36 0xb0, 0x0a, 0x06, 0x00, 0x00,
37 0xd8, 0x03, 0xc0, 0x01, 0x00,
38 0x00, 0x44, 0x00, 0x80, 0x80,
39 0xa0, 0x09, 0x06, 0x00, 0x00,
40 0xb0, 0x0b, 0x06, 0x04, 0x00,
41 0xd8, 0x13, 0x00, 0x01, 0x00,
42 0xb0, 0x0c, 0x06, 0x08, 0x00,
43 0xf0, 0x00, 0x0c, 0x00, 0xa0,
44 0xfc, 0x10, 0xae, 0x23, 0x88,
45 0xa0, 0x00, 0x12, 0x40, 0x00,
46 0xb0, 0xc9, 0x02, 0x43, 0x01,
47 0xb4, 0x49, 0x02, 0x43, 0x7f,
48 0xb0, 0x00, 0x22, 0x80, 0x00,
49 0xf0, 0x00, 0x02, 0x83, 0x00,
50 0xf0, 0x00, 0x0c, 0x04, 0x02,
51 0xb0, 0x40, 0x6c, 0x00, 0xa0,
52 0xd8, 0x08, 0x80, 0x01, 0x01,
53 0xaa, 0x00, 0x2c, 0x08, 0x02,
54 0xa0, 0xc0, 0x30, 0x18, 0x90,
55 0xa0, 0x00, 0x43, 0x00, 0x00,
56 0xba, 0xc0, 0x32, 0xc0, 0xa0,
57 0xaa, 0xb0, 0x00, 0x0f, 0x40,
58 0xd8, 0x0a, 0x80, 0x01, 0x04,
59 0xb0, 0x0a, 0x00, 0x08, 0x00,
60 0xf0, 0x00, 0x00, 0x0f, 0x40,
61 0xa4, 0x00, 0x2c, 0x08, 0x02,
62 0xa0, 0x8a, 0x00, 0x0c, 0xa0,
63 0xe0, 0x00, 0x00, 0x00, 0x04,
64 0xd8, 0x0c, 0x80, 0x00, 0x18,
65 0x3c, 0x40, 0x00, 0x04, 0xe0,
66 0xba, 0x80, 0x42, 0x01, 0x80,
67 0xb4, 0x40, 0x40, 0x13, 0xff,
68 0xaa, 0x88, 0x00, 0x10, 0x80,
69 0xb0, 0x08, 0x06, 0x00, 0x00,
70 0xaa, 0xf0, 0x0d, 0x80, 0x80,
71 0xd8, 0x0b, 0x40, 0x01, 0x05,
72 0xa0, 0x88, 0x0c, 0x04, 0x80,
73 0xfc, 0x10, 0xae, 0x43, 0x90,
74 0xba, 0xc0, 0x50, 0x0f, 0x01,
75 0x9a, 0x30, 0x50, 0x15, 0x30,
76 0x9a, 0xb0, 0x50, 0x16, 0x30,
77 0x9b, 0x50, 0x58, 0x16, 0x01,
78 0x8a, 0xe2, 0x08, 0x16, 0x21,
79 0x6b, 0x4e, 0x00, 0x83, 0x03,
80 0xe0, 0x00, 0x00, 0x00, 0x30,
81 0x9a, 0x80, 0x70, 0x0e, 0x04,
82 0x8b, 0x88, 0x08, 0x1e, 0x02,
83 0xf0, 0x00, 0x0c, 0x01, 0x81,
84 0xf0, 0x01, 0x80, 0x1f, 0x00,
85 0x9b, 0xd0, 0x78, 0x1e, 0x01,
86 0x3d, 0x42, 0x00, 0x1c, 0x20,
87 0xe0, 0x00, 0x00, 0x00, 0x08,
88 0xfc, 0x10, 0xae, 0x63, 0x98,
89 0xa4, 0x30, 0x0c, 0x04, 0x02,
90 0xd8, 0x03, 0x00, 0x01, 0x00,
91 0xd8, 0x11, 0xc1, 0x42, 0x14,
92 0xa0, 0x18, 0x00, 0x08, 0x00,
93 0x1a, 0x40, 0x00, 0x04, 0x2c,
94 0x33, 0x00, 0x01, 0x2f, 0x20,
95 0xd8, 0x03, 0x00, 0x00, 0x18,
96 0xe0, 0x00, 0x02, 0x00, 0x00,
97 }
98};
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev.c b/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
deleted file mode 100644
index 45008377c8bf..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
+++ /dev/null
@@ -1,437 +0,0 @@
1/*
2 * IXP2000 MSF network device driver
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/moduleparam.h>
19#include <linux/gfp.h>
20#include <asm/hardware/uengine.h>
21#include <asm/io.h>
22#include "ixp2400_rx.ucode"
23#include "ixp2400_tx.ucode"
24#include "ixpdev_priv.h"
25#include "ixpdev.h"
26#include "pm3386.h"
27
28#define DRV_MODULE_VERSION "0.2"
29
30static int nds_count;
31static struct net_device **nds;
32static int nds_open;
33static void (*set_port_admin_status)(int port, int up);
34
35static struct ixpdev_rx_desc * const rx_desc =
36 (struct ixpdev_rx_desc *)(IXP2000_SRAM0_VIRT_BASE + RX_BUF_DESC_BASE);
37static struct ixpdev_tx_desc * const tx_desc =
38 (struct ixpdev_tx_desc *)(IXP2000_SRAM0_VIRT_BASE + TX_BUF_DESC_BASE);
39static int tx_pointer;
40
41
42static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
43{
44 struct ixpdev_priv *ip = netdev_priv(dev);
45 struct ixpdev_tx_desc *desc;
46 int entry;
47 unsigned long flags;
48
49 if (unlikely(skb->len > PAGE_SIZE)) {
50 /* @@@ Count drops. */
51 dev_kfree_skb(skb);
52 return NETDEV_TX_OK;
53 }
54
55 entry = tx_pointer;
56 tx_pointer = (tx_pointer + 1) % TX_BUF_COUNT;
57
58 desc = tx_desc + entry;
59 desc->pkt_length = skb->len;
60 desc->channel = ip->channel;
61
62 skb_copy_and_csum_dev(skb, phys_to_virt(desc->buf_addr));
63 dev_kfree_skb(skb);
64
65 ixp2000_reg_write(RING_TX_PENDING,
66 TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc)));
67
68 local_irq_save(flags);
69 ip->tx_queue_entries++;
70 if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
71 netif_stop_queue(dev);
72 local_irq_restore(flags);
73
74 return NETDEV_TX_OK;
75}
76
77
78static int ixpdev_rx(struct net_device *dev, int processed, int budget)
79{
80 while (processed < budget) {
81 struct ixpdev_rx_desc *desc;
82 struct sk_buff *skb;
83 void *buf;
84 u32 _desc;
85
86 _desc = ixp2000_reg_read(RING_RX_DONE);
87 if (_desc == 0)
88 return 0;
89
90 desc = rx_desc +
91 ((_desc - RX_BUF_DESC_BASE) / sizeof(struct ixpdev_rx_desc));
92 buf = phys_to_virt(desc->buf_addr);
93
94 if (desc->pkt_length < 4 || desc->pkt_length > PAGE_SIZE) {
95 printk(KERN_ERR "ixp2000: rx err, length %d\n",
96 desc->pkt_length);
97 goto err;
98 }
99
100 if (desc->channel < 0 || desc->channel >= nds_count) {
101 printk(KERN_ERR "ixp2000: rx err, channel %d\n",
102 desc->channel);
103 goto err;
104 }
105
106 /* @@@ Make FCS stripping configurable. */
107 desc->pkt_length -= 4;
108
109 if (unlikely(!netif_running(nds[desc->channel])))
110 goto err;
111
112 skb = netdev_alloc_skb_ip_align(dev, desc->pkt_length);
113 if (likely(skb != NULL)) {
114 skb_copy_to_linear_data(skb, buf, desc->pkt_length);
115 skb_put(skb, desc->pkt_length);
116 skb->protocol = eth_type_trans(skb, nds[desc->channel]);
117
118 netif_receive_skb(skb);
119 }
120
121err:
122 ixp2000_reg_write(RING_RX_PENDING, _desc);
123 processed++;
124 }
125
126 return processed;
127}
128
129/* dev always points to nds[0]. */
130static int ixpdev_poll(struct napi_struct *napi, int budget)
131{
132 struct ixpdev_priv *ip = container_of(napi, struct ixpdev_priv, napi);
133 struct net_device *dev = ip->dev;
134 int rx;
135
136 rx = 0;
137 do {
138 ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff);
139
140 rx = ixpdev_rx(dev, rx, budget);
141 if (rx >= budget)
142 break;
143 } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff);
144
145 napi_complete(napi);
146 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff);
147
148 return rx;
149}
150
151static void ixpdev_tx_complete(void)
152{
153 int channel;
154 u32 wake;
155
156 wake = 0;
157 while (1) {
158 struct ixpdev_priv *ip;
159 u32 desc;
160 int entry;
161
162 desc = ixp2000_reg_read(RING_TX_DONE);
163 if (desc == 0)
164 break;
165
166 /* @@@ Check whether entries come back in order. */
167 entry = (desc - TX_BUF_DESC_BASE) / sizeof(struct ixpdev_tx_desc);
168 channel = tx_desc[entry].channel;
169
170 if (channel < 0 || channel >= nds_count) {
171 printk(KERN_ERR "ixp2000: txcomp channel index "
172 "out of bounds (%d, %.8i, %d)\n",
173 channel, (unsigned int)desc, entry);
174 continue;
175 }
176
177 ip = netdev_priv(nds[channel]);
178 if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
179 wake |= 1 << channel;
180 ip->tx_queue_entries--;
181 }
182
183 for (channel = 0; wake != 0; channel++) {
184 if (wake & (1 << channel)) {
185 netif_wake_queue(nds[channel]);
186 wake &= ~(1 << channel);
187 }
188 }
189}
190
191static irqreturn_t ixpdev_interrupt(int irq, void *dev_id)
192{
193 u32 status;
194
195 status = ixp2000_reg_read(IXP2000_IRQ_THD_STATUS_A_0);
196 if (status == 0)
197 return IRQ_NONE;
198
199 /*
200 * Any of the eight receive units signaled RX?
201 */
202 if (status & 0x00ff) {
203 struct net_device *dev = nds[0];
204 struct ixpdev_priv *ip = netdev_priv(dev);
205
206 ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff);
207 if (likely(napi_schedule_prep(&ip->napi))) {
208 __napi_schedule(&ip->napi);
209 } else {
210 printk(KERN_CRIT "ixp2000: irq while polling!!\n");
211 }
212 }
213
214 /*
215 * Any of the eight transmit units signaled TXdone?
216 */
217 if (status & 0xff00) {
218 ixp2000_reg_wrb(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0xff00);
219 ixpdev_tx_complete();
220 }
221
222 return IRQ_HANDLED;
223}
224
225#ifdef CONFIG_NET_POLL_CONTROLLER
226static void ixpdev_poll_controller(struct net_device *dev)
227{
228 disable_irq(IRQ_IXP2000_THDA0);
229 ixpdev_interrupt(IRQ_IXP2000_THDA0, dev);
230 enable_irq(IRQ_IXP2000_THDA0);
231}
232#endif
233
234static int ixpdev_open(struct net_device *dev)
235{
236 struct ixpdev_priv *ip = netdev_priv(dev);
237 int err;
238
239 napi_enable(&ip->napi);
240 if (!nds_open++) {
241 err = request_irq(IRQ_IXP2000_THDA0, ixpdev_interrupt,
242 IRQF_SHARED, "ixp2000_eth", nds);
243 if (err) {
244 nds_open--;
245 napi_disable(&ip->napi);
246 return err;
247 }
248
249 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0xffff);
250 }
251
252 set_port_admin_status(ip->channel, 1);
253 netif_start_queue(dev);
254
255 return 0;
256}
257
258static int ixpdev_close(struct net_device *dev)
259{
260 struct ixpdev_priv *ip = netdev_priv(dev);
261
262 netif_stop_queue(dev);
263 napi_disable(&ip->napi);
264 set_port_admin_status(ip->channel, 0);
265
266 if (!--nds_open) {
267 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0xffff);
268 free_irq(IRQ_IXP2000_THDA0, nds);
269 }
270
271 return 0;
272}
273
274static struct net_device_stats *ixpdev_get_stats(struct net_device *dev)
275{
276 struct ixpdev_priv *ip = netdev_priv(dev);
277
278 pm3386_get_stats(ip->channel, &(dev->stats));
279
280 return &(dev->stats);
281}
282
283static const struct net_device_ops ixpdev_netdev_ops = {
284 .ndo_open = ixpdev_open,
285 .ndo_stop = ixpdev_close,
286 .ndo_start_xmit = ixpdev_xmit,
287 .ndo_change_mtu = eth_change_mtu,
288 .ndo_validate_addr = eth_validate_addr,
289 .ndo_set_mac_address = eth_mac_addr,
290 .ndo_get_stats = ixpdev_get_stats,
291#ifdef CONFIG_NET_POLL_CONTROLLER
292 .ndo_poll_controller = ixpdev_poll_controller,
293#endif
294};
295
296struct net_device *ixpdev_alloc(int channel, int sizeof_priv)
297{
298 struct net_device *dev;
299 struct ixpdev_priv *ip;
300
301 dev = alloc_etherdev(sizeof_priv);
302 if (dev == NULL)
303 return NULL;
304
305 dev->netdev_ops = &ixpdev_netdev_ops;
306
307 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
308
309 ip = netdev_priv(dev);
310 ip->dev = dev;
311 netif_napi_add(dev, &ip->napi, ixpdev_poll, 64);
312 ip->channel = channel;
313 ip->tx_queue_entries = 0;
314
315 return dev;
316}
317
318int ixpdev_init(int __nds_count, struct net_device **__nds,
319 void (*__set_port_admin_status)(int port, int up))
320{
321 int i;
322 int err;
323
324 BUILD_BUG_ON(RX_BUF_COUNT > 192 || TX_BUF_COUNT > 192);
325
326 printk(KERN_INFO "IXP2000 MSF ethernet driver %s\n", DRV_MODULE_VERSION);
327
328 nds_count = __nds_count;
329 nds = __nds;
330 set_port_admin_status = __set_port_admin_status;
331
332 for (i = 0; i < RX_BUF_COUNT; i++) {
333 void *buf;
334
335 buf = (void *)get_zeroed_page(GFP_KERNEL);
336 if (buf == NULL) {
337 err = -ENOMEM;
338 while (--i >= 0)
339 free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
340 goto err_out;
341 }
342 rx_desc[i].buf_addr = virt_to_phys(buf);
343 rx_desc[i].buf_length = PAGE_SIZE;
344 }
345
346 /* @@@ Maybe we shouldn't be preallocating TX buffers. */
347 for (i = 0; i < TX_BUF_COUNT; i++) {
348 void *buf;
349
350 buf = (void *)get_zeroed_page(GFP_KERNEL);
351 if (buf == NULL) {
352 err = -ENOMEM;
353 while (--i >= 0)
354 free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
355 goto err_free_rx;
356 }
357 tx_desc[i].buf_addr = virt_to_phys(buf);
358 }
359
360 /* 256 entries, ring status set means 'empty', base address 0x0000. */
361 ixp2000_reg_write(RING_RX_PENDING_BASE, 0x44000000);
362 ixp2000_reg_write(RING_RX_PENDING_HEAD, 0x00000000);
363 ixp2000_reg_write(RING_RX_PENDING_TAIL, 0x00000000);
364
365 /* 256 entries, ring status set means 'full', base address 0x0400. */
366 ixp2000_reg_write(RING_RX_DONE_BASE, 0x40000400);
367 ixp2000_reg_write(RING_RX_DONE_HEAD, 0x00000000);
368 ixp2000_reg_write(RING_RX_DONE_TAIL, 0x00000000);
369
370 for (i = 0; i < RX_BUF_COUNT; i++) {
371 ixp2000_reg_write(RING_RX_PENDING,
372 RX_BUF_DESC_BASE + (i * sizeof(struct ixpdev_rx_desc)));
373 }
374
375 ixp2000_uengine_load(0, &ixp2400_rx);
376 ixp2000_uengine_start_contexts(0, 0xff);
377
378 /* 256 entries, ring status set means 'empty', base address 0x0800. */
379 ixp2000_reg_write(RING_TX_PENDING_BASE, 0x44000800);
380 ixp2000_reg_write(RING_TX_PENDING_HEAD, 0x00000000);
381 ixp2000_reg_write(RING_TX_PENDING_TAIL, 0x00000000);
382
383 /* 256 entries, ring status set means 'full', base address 0x0c00. */
384 ixp2000_reg_write(RING_TX_DONE_BASE, 0x40000c00);
385 ixp2000_reg_write(RING_TX_DONE_HEAD, 0x00000000);
386 ixp2000_reg_write(RING_TX_DONE_TAIL, 0x00000000);
387
388 ixp2000_uengine_load(1, &ixp2400_tx);
389 ixp2000_uengine_start_contexts(1, 0xff);
390
391 for (i = 0; i < nds_count; i++) {
392 err = register_netdev(nds[i]);
393 if (err) {
394 while (--i >= 0)
395 unregister_netdev(nds[i]);
396 goto err_free_tx;
397 }
398 }
399
400 for (i = 0; i < nds_count; i++) {
401 printk(KERN_INFO "%s: IXP2000 MSF ethernet (port %d), %pM.\n",
402 nds[i]->name, i, nds[i]->dev_addr);
403 }
404
405 return 0;
406
407err_free_tx:
408 for (i = 0; i < TX_BUF_COUNT; i++)
409 free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
410
411err_free_rx:
412 for (i = 0; i < RX_BUF_COUNT; i++)
413 free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
414
415err_out:
416 return err;
417}
418
419void ixpdev_deinit(void)
420{
421 int i;
422
423 /* @@@ Flush out pending packets. */
424
425 for (i = 0; i < nds_count; i++)
426 unregister_netdev(nds[i]);
427
428 ixp2000_uengine_stop_contexts(1, 0xff);
429 ixp2000_uengine_stop_contexts(0, 0xff);
430 ixp2000_uengine_reset(0x3);
431
432 for (i = 0; i < TX_BUF_COUNT; i++)
433 free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
434
435 for (i = 0; i < RX_BUF_COUNT; i++)
436 free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
437}
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev.h
deleted file mode 100644
index 391ece623243..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixpdev.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * IXP2000 MSF network device driver
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __IXPDEV_H
13#define __IXPDEV_H
14
15struct ixpdev_priv
16{
17 struct net_device *dev;
18 struct napi_struct napi;
19 int channel;
20 int tx_queue_entries;
21};
22
23struct net_device *ixpdev_alloc(int channel, int sizeof_priv);
24int ixpdev_init(int num_ports, struct net_device **nds,
25 void (*set_port_admin_status)(int port, int up));
26void ixpdev_deinit(void);
27
28
29#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h
deleted file mode 100644
index 86aa08ea0c33..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h
+++ /dev/null
@@ -1,57 +0,0 @@
1/*
2 * IXP2000 MSF network device driver
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __IXPDEV_PRIV_H
13#define __IXPDEV_PRIV_H
14
15#define RX_BUF_DESC_BASE 0x00001000
16#define RX_BUF_COUNT ((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_rx_desc)))
17#define TX_BUF_DESC_BASE 0x00002000
18#define TX_BUF_COUNT ((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_tx_desc)))
19#define TX_BUF_COUNT_PER_CHAN (TX_BUF_COUNT / 4)
20
21#define RING_RX_PENDING ((u32 *)IXP2000_SCRATCH_RING_VIRT_BASE)
22#define RING_RX_DONE ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 4))
23#define RING_TX_PENDING ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 8))
24#define RING_TX_DONE ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 12))
25
26#define SCRATCH_REG(x) ((u32 *)(IXP2000_GLOBAL_REG_VIRT_BASE | 0x0800 | (x)))
27#define RING_RX_PENDING_BASE SCRATCH_REG(0x00)
28#define RING_RX_PENDING_HEAD SCRATCH_REG(0x04)
29#define RING_RX_PENDING_TAIL SCRATCH_REG(0x08)
30#define RING_RX_DONE_BASE SCRATCH_REG(0x10)
31#define RING_RX_DONE_HEAD SCRATCH_REG(0x14)
32#define RING_RX_DONE_TAIL SCRATCH_REG(0x18)
33#define RING_TX_PENDING_BASE SCRATCH_REG(0x20)
34#define RING_TX_PENDING_HEAD SCRATCH_REG(0x24)
35#define RING_TX_PENDING_TAIL SCRATCH_REG(0x28)
36#define RING_TX_DONE_BASE SCRATCH_REG(0x30)
37#define RING_TX_DONE_HEAD SCRATCH_REG(0x34)
38#define RING_TX_DONE_TAIL SCRATCH_REG(0x38)
39
40struct ixpdev_rx_desc
41{
42 u32 buf_addr;
43 u32 buf_length;
44 u32 channel;
45 u32 pkt_length;
46};
47
48struct ixpdev_tx_desc
49{
50 u32 buf_addr;
51 u32 pkt_length;
52 u32 channel;
53 u32 unused;
54};
55
56
57#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/pm3386.c b/drivers/net/ethernet/xscale/ixp2000/pm3386.c
deleted file mode 100644
index e08d3f9863b8..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/pm3386.c
+++ /dev/null
@@ -1,351 +0,0 @@
1/*
2 * Helper functions for the PM3386s on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/delay.h>
14#include <linux/netdevice.h>
15#include <asm/io.h>
16#include "pm3386.h"
17
18/*
19 * Read from register 'reg' of PM3386 device 'pm'.
20 */
21static u16 pm3386_reg_read(int pm, int reg)
22{
23 void *_reg;
24 u16 value;
25
26 _reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
27 if (pm == 1)
28 _reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
29
30 value = *((volatile u16 *)(_reg + (reg << 1)));
31
32// printk(KERN_INFO "pm3386_reg_read(%d, %.3x) = %.8x\n", pm, reg, value);
33
34 return value;
35}
36
37/*
38 * Write to register 'reg' of PM3386 device 'pm', and perform
39 * a readback from the identification register.
40 */
41static void pm3386_reg_write(int pm, int reg, u16 value)
42{
43 void *_reg;
44 u16 dummy;
45
46// printk(KERN_INFO "pm3386_reg_write(%d, %.3x, %.8x)\n", pm, reg, value);
47
48 _reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
49 if (pm == 1)
50 _reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
51
52 *((volatile u16 *)(_reg + (reg << 1))) = value;
53
54 dummy = *((volatile u16 *)_reg);
55 __asm__ __volatile__("mov %0, %0" : "+r" (dummy));
56}
57
58/*
59 * Read from port 'port' register 'reg', where the registers
60 * for the different ports are 'spacing' registers apart.
61 */
62static u16 pm3386_port_reg_read(int port, int _reg, int spacing)
63{
64 int reg;
65
66 reg = _reg;
67 if (port & 1)
68 reg += spacing;
69
70 return pm3386_reg_read(port >> 1, reg);
71}
72
73/*
74 * Write to port 'port' register 'reg', where the registers
75 * for the different ports are 'spacing' registers apart.
76 */
77static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value)
78{
79 int reg;
80
81 reg = _reg;
82 if (port & 1)
83 reg += spacing;
84
85 pm3386_reg_write(port >> 1, reg, value);
86}
87
88int pm3386_secondary_present(void)
89{
90 return pm3386_reg_read(1, 0) == 0x3386;
91}
92
93void pm3386_reset(void)
94{
95 u8 mac[3][6];
96 int secondary;
97
98 secondary = pm3386_secondary_present();
99
100 /* Save programmed MAC addresses. */
101 pm3386_get_mac(0, mac[0]);
102 pm3386_get_mac(1, mac[1]);
103 if (secondary)
104 pm3386_get_mac(2, mac[2]);
105
106 /* Assert analog and digital reset. */
107 pm3386_reg_write(0, 0x002, 0x0060);
108 if (secondary)
109 pm3386_reg_write(1, 0x002, 0x0060);
110 mdelay(1);
111
112 /* Deassert analog reset. */
113 pm3386_reg_write(0, 0x002, 0x0062);
114 if (secondary)
115 pm3386_reg_write(1, 0x002, 0x0062);
116 mdelay(10);
117
118 /* Deassert digital reset. */
119 pm3386_reg_write(0, 0x002, 0x0063);
120 if (secondary)
121 pm3386_reg_write(1, 0x002, 0x0063);
122 mdelay(10);
123
124 /* Restore programmed MAC addresses. */
125 pm3386_set_mac(0, mac[0]);
126 pm3386_set_mac(1, mac[1]);
127 if (secondary)
128 pm3386_set_mac(2, mac[2]);
129
130 /* Disable carrier on all ports. */
131 pm3386_set_carrier(0, 0);
132 pm3386_set_carrier(1, 0);
133 if (secondary)
134 pm3386_set_carrier(2, 0);
135}
136
137static u16 swaph(u16 x)
138{
139 return ((x << 8) | (x >> 8)) & 0xffff;
140}
141
142int pm3386_port_count(void)
143{
144 return 2 + pm3386_secondary_present();
145}
146
147void pm3386_init_port(int port)
148{
149 int pm = port >> 1;
150
151 /*
152 * Work around ENP2611 bootloader programming MAC address
153 * in reverse.
154 */
155 if (pm3386_port_reg_read(port, 0x30a, 0x100) == 0x0000 &&
156 (pm3386_port_reg_read(port, 0x309, 0x100) & 0xff00) == 0x5000) {
157 u16 temp[3];
158
159 temp[0] = pm3386_port_reg_read(port, 0x308, 0x100);
160 temp[1] = pm3386_port_reg_read(port, 0x309, 0x100);
161 temp[2] = pm3386_port_reg_read(port, 0x30a, 0x100);
162 pm3386_port_reg_write(port, 0x308, 0x100, swaph(temp[2]));
163 pm3386_port_reg_write(port, 0x309, 0x100, swaph(temp[1]));
164 pm3386_port_reg_write(port, 0x30a, 0x100, swaph(temp[0]));
165 }
166
167 /*
168 * Initialise narrowbanding mode. See application note 2010486
169 * for more information. (@@@ We also need to issue a reset
170 * when ROOL or DOOL are detected.)
171 */
172 pm3386_port_reg_write(port, 0x708, 0x10, 0xd055);
173 udelay(500);
174 pm3386_port_reg_write(port, 0x708, 0x10, 0x5055);
175
176 /*
177 * SPI-3 ingress block. Set 64 bytes SPI-3 burst size
178 * towards SPI-3 bridge.
179 */
180 pm3386_port_reg_write(port, 0x122, 0x20, 0x0002);
181
182 /*
183 * Enable ingress protocol checking, and soft reset the
184 * SPI-3 ingress block.
185 */
186 pm3386_reg_write(pm, 0x103, 0x0003);
187 while (!(pm3386_reg_read(pm, 0x103) & 0x80))
188 ;
189
190 /*
191 * SPI-3 egress block. Gather 12288 bytes of the current
192 * packet in the TX fifo before initiating transmit on the
193 * SERDES interface. (Prevents TX underflows.)
194 */
195 pm3386_port_reg_write(port, 0x221, 0x20, 0x0007);
196
197 /*
198 * Enforce odd parity from the SPI-3 bridge, and soft reset
199 * the SPI-3 egress block.
200 */
201 pm3386_reg_write(pm, 0x203, 0x000d & ~(4 << (port & 1)));
202 while ((pm3386_reg_read(pm, 0x203) & 0x000c) != 0x000c)
203 ;
204
205 /*
206 * EGMAC block. Set this channels to reject long preambles,
207 * not send or transmit PAUSE frames, enable preamble checking,
208 * disable frame length checking, enable FCS appending, enable
209 * TX frame padding.
210 */
211 pm3386_port_reg_write(port, 0x302, 0x100, 0x0113);
212
213 /*
214 * Soft reset the EGMAC block.
215 */
216 pm3386_port_reg_write(port, 0x301, 0x100, 0x8000);
217 pm3386_port_reg_write(port, 0x301, 0x100, 0x0000);
218
219 /*
220 * Auto-sense autonegotiation status.
221 */
222 pm3386_port_reg_write(port, 0x306, 0x100, 0x0100);
223
224 /*
225 * Allow reception of jumbo frames.
226 */
227 pm3386_port_reg_write(port, 0x310, 0x100, 9018);
228
229 /*
230 * Allow transmission of jumbo frames.
231 */
232 pm3386_port_reg_write(port, 0x336, 0x100, 9018);
233
234 /* @@@ Should set 0x337/0x437 (RX forwarding threshold.) */
235
236 /*
237 * Set autonegotiation parameters to 'no PAUSE, full duplex.'
238 */
239 pm3386_port_reg_write(port, 0x31c, 0x100, 0x0020);
240
241 /*
242 * Enable and restart autonegotiation.
243 */
244 pm3386_port_reg_write(port, 0x318, 0x100, 0x0003);
245 pm3386_port_reg_write(port, 0x318, 0x100, 0x0002);
246}
247
248void pm3386_get_mac(int port, u8 *mac)
249{
250 u16 temp;
251
252 temp = pm3386_port_reg_read(port, 0x308, 0x100);
253 mac[0] = temp & 0xff;
254 mac[1] = (temp >> 8) & 0xff;
255
256 temp = pm3386_port_reg_read(port, 0x309, 0x100);
257 mac[2] = temp & 0xff;
258 mac[3] = (temp >> 8) & 0xff;
259
260 temp = pm3386_port_reg_read(port, 0x30a, 0x100);
261 mac[4] = temp & 0xff;
262 mac[5] = (temp >> 8) & 0xff;
263}
264
265void pm3386_set_mac(int port, u8 *mac)
266{
267 pm3386_port_reg_write(port, 0x308, 0x100, (mac[1] << 8) | mac[0]);
268 pm3386_port_reg_write(port, 0x309, 0x100, (mac[3] << 8) | mac[2]);
269 pm3386_port_reg_write(port, 0x30a, 0x100, (mac[5] << 8) | mac[4]);
270}
271
272static u32 pm3386_get_stat(int port, u16 base)
273{
274 u32 value;
275
276 value = pm3386_port_reg_read(port, base, 0x100);
277 value |= pm3386_port_reg_read(port, base + 1, 0x100) << 16;
278
279 return value;
280}
281
282void pm3386_get_stats(int port, struct net_device_stats *stats)
283{
284 /*
285 * Snapshot statistics counters.
286 */
287 pm3386_port_reg_write(port, 0x500, 0x100, 0x0001);
288 while (pm3386_port_reg_read(port, 0x500, 0x100) & 0x0001)
289 ;
290
291 memset(stats, 0, sizeof(*stats));
292
293 stats->rx_packets = pm3386_get_stat(port, 0x510);
294 stats->tx_packets = pm3386_get_stat(port, 0x590);
295 stats->rx_bytes = pm3386_get_stat(port, 0x514);
296 stats->tx_bytes = pm3386_get_stat(port, 0x594);
297 /* @@@ Add other stats. */
298}
299
300void pm3386_set_carrier(int port, int state)
301{
302 pm3386_port_reg_write(port, 0x703, 0x10, state ? 0x1001 : 0x0000);
303}
304
305int pm3386_is_link_up(int port)
306{
307 u16 temp;
308
309 temp = pm3386_port_reg_read(port, 0x31a, 0x100);
310 temp = pm3386_port_reg_read(port, 0x31a, 0x100);
311
312 return !!(temp & 0x0002);
313}
314
315void pm3386_enable_rx(int port)
316{
317 u16 temp;
318
319 temp = pm3386_port_reg_read(port, 0x303, 0x100);
320 temp |= 0x1000;
321 pm3386_port_reg_write(port, 0x303, 0x100, temp);
322}
323
324void pm3386_disable_rx(int port)
325{
326 u16 temp;
327
328 temp = pm3386_port_reg_read(port, 0x303, 0x100);
329 temp &= 0xefff;
330 pm3386_port_reg_write(port, 0x303, 0x100, temp);
331}
332
333void pm3386_enable_tx(int port)
334{
335 u16 temp;
336
337 temp = pm3386_port_reg_read(port, 0x303, 0x100);
338 temp |= 0x4000;
339 pm3386_port_reg_write(port, 0x303, 0x100, temp);
340}
341
342void pm3386_disable_tx(int port)
343{
344 u16 temp;
345
346 temp = pm3386_port_reg_read(port, 0x303, 0x100);
347 temp &= 0xbfff;
348 pm3386_port_reg_write(port, 0x303, 0x100, temp);
349}
350
351MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xscale/ixp2000/pm3386.h b/drivers/net/ethernet/xscale/ixp2000/pm3386.h
deleted file mode 100644
index cc4183dca911..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/pm3386.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Helper functions for the PM3386s on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __PM3386_H
13#define __PM3386_H
14
15void pm3386_reset(void);
16int pm3386_port_count(void);
17void pm3386_init_port(int port);
18void pm3386_get_mac(int port, u8 *mac);
19void pm3386_set_mac(int port, u8 *mac);
20void pm3386_get_stats(int port, struct net_device_stats *stats);
21void pm3386_set_carrier(int port, int state);
22int pm3386_is_link_up(int port);
23void pm3386_enable_rx(int port);
24void pm3386_disable_rx(int port);
25void pm3386_enable_tx(int port);
26void pm3386_disable_tx(int port);
27
28
29#endif
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 41a8b5a9849e..482648fcf0b6 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1002,12 +1002,41 @@ static int ixp4xx_nway_reset(struct net_device *dev)
1002 return phy_start_aneg(port->phydev); 1002 return phy_start_aneg(port->phydev);
1003} 1003}
1004 1004
1005int ixp46x_phc_index = -1;
1006
1007static int ixp4xx_get_ts_info(struct net_device *dev,
1008 struct ethtool_ts_info *info)
1009{
1010 if (!cpu_is_ixp46x()) {
1011 info->so_timestamping =
1012 SOF_TIMESTAMPING_TX_SOFTWARE |
1013 SOF_TIMESTAMPING_RX_SOFTWARE |
1014 SOF_TIMESTAMPING_SOFTWARE;
1015 info->phc_index = -1;
1016 return 0;
1017 }
1018 info->so_timestamping =
1019 SOF_TIMESTAMPING_TX_HARDWARE |
1020 SOF_TIMESTAMPING_RX_HARDWARE |
1021 SOF_TIMESTAMPING_RAW_HARDWARE;
1022 info->phc_index = ixp46x_phc_index;
1023 info->tx_types =
1024 (1 << HWTSTAMP_TX_OFF) |
1025 (1 << HWTSTAMP_TX_ON);
1026 info->rx_filters =
1027 (1 << HWTSTAMP_FILTER_NONE) |
1028 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1029 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
1030 return 0;
1031}
1032
1005static const struct ethtool_ops ixp4xx_ethtool_ops = { 1033static const struct ethtool_ops ixp4xx_ethtool_ops = {
1006 .get_drvinfo = ixp4xx_get_drvinfo, 1034 .get_drvinfo = ixp4xx_get_drvinfo,
1007 .get_settings = ixp4xx_get_settings, 1035 .get_settings = ixp4xx_get_settings,
1008 .set_settings = ixp4xx_set_settings, 1036 .set_settings = ixp4xx_set_settings,
1009 .nway_reset = ixp4xx_nway_reset, 1037 .nway_reset = ixp4xx_nway_reset,
1010 .get_link = ethtool_op_get_link, 1038 .get_link = ethtool_op_get_link,
1039 .get_ts_info = ixp4xx_get_ts_info,
1011}; 1040};
1012 1041
1013 1042
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 168c8f41d09f..d4719632ffc6 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -113,10 +113,9 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
113 113
114 SET_NETDEV_DEV(dev, &pdev->dev); 114 SET_NETDEV_DEV(dev, &pdev->dev);
115 115
116 if (pci_request_regions(pdev, "rrunner")) { 116 ret = pci_request_regions(pdev, "rrunner");
117 ret = -EIO; 117 if (ret < 0)
118 goto out; 118 goto out;
119 }
120 119
121 pci_set_drvdata(pdev, dev); 120 pci_set_drvdata(pdev, dev);
122 121
@@ -124,11 +123,8 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
124 123
125 spin_lock_init(&rrpriv->lock); 124 spin_lock_init(&rrpriv->lock);
126 125
127 dev->irq = pdev->irq;
128 dev->netdev_ops = &rr_netdev_ops; 126 dev->netdev_ops = &rr_netdev_ops;
129 127
130 dev->base_addr = pci_resource_start(pdev, 0);
131
132 /* display version info if adapter is found */ 128 /* display version info if adapter is found */
133 if (!version_disp) { 129 if (!version_disp) {
134 /* set display flag to TRUE so that */ 130 /* set display flag to TRUE so that */
@@ -146,16 +142,15 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
146 pci_set_master(pdev); 142 pci_set_master(pdev);
147 143
148 printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI " 144 printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI "
149 "at 0x%08lx, irq %i, PCI latency %i\n", dev->name, 145 "at 0x%llx, irq %i, PCI latency %i\n", dev->name,
150 dev->base_addr, dev->irq, pci_latency); 146 (unsigned long long)pci_resource_start(pdev, 0),
147 pdev->irq, pci_latency);
151 148
152 /* 149 /*
153 * Remap the regs into kernel space. 150 * Remap the MMIO regs into kernel space.
154 */ 151 */
155 152 rrpriv->regs = pci_iomap(pdev, 0, 0x1000);
156 rrpriv->regs = ioremap(dev->base_addr, 0x1000); 153 if (!rrpriv->regs) {
157
158 if (!rrpriv->regs){
159 printk(KERN_ERR "%s: Unable to map I/O register, " 154 printk(KERN_ERR "%s: Unable to map I/O register, "
160 "RoadRunner will be disabled.\n", dev->name); 155 "RoadRunner will be disabled.\n", dev->name);
161 ret = -EIO; 156 ret = -EIO;
@@ -202,8 +197,6 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
202 197
203 rr_init(dev); 198 rr_init(dev);
204 199
205 dev->base_addr = 0;
206
207 ret = register_netdev(dev); 200 ret = register_netdev(dev);
208 if (ret) 201 if (ret)
209 goto out; 202 goto out;
@@ -217,7 +210,7 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
217 pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring, 210 pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring,
218 rrpriv->tx_ring_dma); 211 rrpriv->tx_ring_dma);
219 if (rrpriv->regs) 212 if (rrpriv->regs)
220 iounmap(rrpriv->regs); 213 pci_iounmap(pdev, rrpriv->regs);
221 if (pdev) { 214 if (pdev) {
222 pci_release_regions(pdev); 215 pci_release_regions(pdev);
223 pci_set_drvdata(pdev, NULL); 216 pci_set_drvdata(pdev, NULL);
@@ -231,29 +224,26 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
231static void __devexit rr_remove_one (struct pci_dev *pdev) 224static void __devexit rr_remove_one (struct pci_dev *pdev)
232{ 225{
233 struct net_device *dev = pci_get_drvdata(pdev); 226 struct net_device *dev = pci_get_drvdata(pdev);
227 struct rr_private *rr = netdev_priv(dev);
234 228
235 if (dev) { 229 if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)) {
236 struct rr_private *rr = netdev_priv(dev); 230 printk(KERN_ERR "%s: trying to unload running NIC\n",
237 231 dev->name);
238 if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)){ 232 writel(HALT_NIC, &rr->regs->HostCtrl);
239 printk(KERN_ERR "%s: trying to unload running NIC\n",
240 dev->name);
241 writel(HALT_NIC, &rr->regs->HostCtrl);
242 }
243
244 pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
245 rr->evt_ring_dma);
246 pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
247 rr->rx_ring_dma);
248 pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
249 rr->tx_ring_dma);
250 unregister_netdev(dev);
251 iounmap(rr->regs);
252 free_netdev(dev);
253 pci_release_regions(pdev);
254 pci_disable_device(pdev);
255 pci_set_drvdata(pdev, NULL);
256 } 233 }
234
235 unregister_netdev(dev);
236 pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
237 rr->evt_ring_dma);
238 pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
239 rr->rx_ring_dma);
240 pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
241 rr->tx_ring_dma);
242 pci_iounmap(pdev, rr->regs);
243 pci_release_regions(pdev);
244 pci_disable_device(pdev);
245 pci_set_drvdata(pdev, NULL);
246 free_netdev(dev);
257} 247}
258 248
259 249
@@ -1229,9 +1219,9 @@ static int rr_open(struct net_device *dev)
1229 readl(&regs->HostCtrl); 1219 readl(&regs->HostCtrl);
1230 spin_unlock_irqrestore(&rrpriv->lock, flags); 1220 spin_unlock_irqrestore(&rrpriv->lock, flags);
1231 1221
1232 if (request_irq(dev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) { 1222 if (request_irq(pdev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) {
1233 printk(KERN_WARNING "%s: Requested IRQ %d is busy\n", 1223 printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
1234 dev->name, dev->irq); 1224 dev->name, pdev->irq);
1235 ecode = -EAGAIN; 1225 ecode = -EAGAIN;
1236 goto error; 1226 goto error;
1237 } 1227 }
@@ -1338,16 +1328,15 @@ static void rr_dump(struct net_device *dev)
1338 1328
1339static int rr_close(struct net_device *dev) 1329static int rr_close(struct net_device *dev)
1340{ 1330{
1341 struct rr_private *rrpriv; 1331 struct rr_private *rrpriv = netdev_priv(dev);
1342 struct rr_regs __iomem *regs; 1332 struct rr_regs __iomem *regs = rrpriv->regs;
1333 struct pci_dev *pdev = rrpriv->pci_dev;
1343 unsigned long flags; 1334 unsigned long flags;
1344 u32 tmp; 1335 u32 tmp;
1345 short i; 1336 short i;
1346 1337
1347 netif_stop_queue(dev); 1338 netif_stop_queue(dev);
1348 1339
1349 rrpriv = netdev_priv(dev);
1350 regs = rrpriv->regs;
1351 1340
1352 /* 1341 /*
1353 * Lock to make sure we are not cleaning up while another CPU 1342 * Lock to make sure we are not cleaning up while another CPU
@@ -1386,15 +1375,15 @@ static int rr_close(struct net_device *dev)
1386 rr_raz_tx(rrpriv, dev); 1375 rr_raz_tx(rrpriv, dev);
1387 rr_raz_rx(rrpriv, dev); 1376 rr_raz_rx(rrpriv, dev);
1388 1377
1389 pci_free_consistent(rrpriv->pci_dev, 256 * sizeof(struct ring_ctrl), 1378 pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
1390 rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); 1379 rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
1391 rrpriv->rx_ctrl = NULL; 1380 rrpriv->rx_ctrl = NULL;
1392 1381
1393 pci_free_consistent(rrpriv->pci_dev, sizeof(struct rr_info), 1382 pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
1394 rrpriv->info, rrpriv->info_dma); 1383 rrpriv->info_dma);
1395 rrpriv->info = NULL; 1384 rrpriv->info = NULL;
1396 1385
1397 free_irq(dev->irq, dev); 1386 free_irq(pdev->irq, dev);
1398 spin_unlock_irqrestore(&rrpriv->lock, flags); 1387 spin_unlock_irqrestore(&rrpriv->lock, flags);
1399 1388
1400 return 0; 1389 return 0;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d025c83cd12a..8b919471472f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -428,6 +428,24 @@ int netvsc_device_remove(struct hv_device *device)
428 return 0; 428 return 0;
429} 429}
430 430
431
432#define RING_AVAIL_PERCENT_HIWATER 20
433#define RING_AVAIL_PERCENT_LOWATER 10
434
435/*
436 * Get the percentage of available bytes to write in the ring.
437 * The return value is in range from 0 to 100.
438 */
439static inline u32 hv_ringbuf_avail_percent(
440 struct hv_ring_buffer_info *ring_info)
441{
442 u32 avail_read, avail_write;
443
444 hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
445
446 return avail_write * 100 / ring_info->ring_datasize;
447}
448
431static void netvsc_send_completion(struct hv_device *device, 449static void netvsc_send_completion(struct hv_device *device,
432 struct vmpacket_descriptor *packet) 450 struct vmpacket_descriptor *packet)
433{ 451{
@@ -455,6 +473,8 @@ static void netvsc_send_completion(struct hv_device *device,
455 complete(&net_device->channel_init_wait); 473 complete(&net_device->channel_init_wait);
456 } else if (nvsp_packet->hdr.msg_type == 474 } else if (nvsp_packet->hdr.msg_type ==
457 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) { 475 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
476 int num_outstanding_sends;
477
458 /* Get the send context */ 478 /* Get the send context */
459 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long) 479 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
460 packet->trans_id; 480 packet->trans_id;
@@ -463,10 +483,14 @@ static void netvsc_send_completion(struct hv_device *device,
463 nvsc_packet->completion.send.send_completion( 483 nvsc_packet->completion.send.send_completion(
464 nvsc_packet->completion.send.send_completion_ctx); 484 nvsc_packet->completion.send.send_completion_ctx);
465 485
466 atomic_dec(&net_device->num_outstanding_sends); 486 num_outstanding_sends =
487 atomic_dec_return(&net_device->num_outstanding_sends);
467 488
468 if (netif_queue_stopped(ndev) && !net_device->start_remove) 489 if (netif_queue_stopped(ndev) && !net_device->start_remove &&
469 netif_wake_queue(ndev); 490 (hv_ringbuf_avail_percent(&device->channel->outbound)
491 > RING_AVAIL_PERCENT_HIWATER ||
492 num_outstanding_sends < 1))
493 netif_wake_queue(ndev);
470 } else { 494 } else {
471 netdev_err(ndev, "Unknown send completion packet type- " 495 netdev_err(ndev, "Unknown send completion packet type- "
472 "%d received!!\n", nvsp_packet->hdr.msg_type); 496 "%d received!!\n", nvsp_packet->hdr.msg_type);
@@ -519,10 +543,19 @@ int netvsc_send(struct hv_device *device,
519 543
520 if (ret == 0) { 544 if (ret == 0) {
521 atomic_inc(&net_device->num_outstanding_sends); 545 atomic_inc(&net_device->num_outstanding_sends);
546 if (hv_ringbuf_avail_percent(&device->channel->outbound) <
547 RING_AVAIL_PERCENT_LOWATER) {
548 netif_stop_queue(ndev);
549 if (atomic_read(&net_device->
550 num_outstanding_sends) < 1)
551 netif_wake_queue(ndev);
552 }
522 } else if (ret == -EAGAIN) { 553 } else if (ret == -EAGAIN) {
523 netif_stop_queue(ndev); 554 netif_stop_queue(ndev);
524 if (atomic_read(&net_device->num_outstanding_sends) < 1) 555 if (atomic_read(&net_device->num_outstanding_sends) < 1) {
525 netif_wake_queue(ndev); 556 netif_wake_queue(ndev);
557 ret = -ENOSPC;
558 }
526 } else { 559 } else {
527 netdev_err(ndev, "Unable to send packet %p ret %d\n", 560 netdev_err(ndev, "Unable to send packet %p ret %d\n",
528 packet, ret); 561 packet, ret);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index dd294783b5c5..8f8ed3320425 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -44,6 +44,7 @@ struct net_device_context {
44 /* point back to our device context */ 44 /* point back to our device context */
45 struct hv_device *device_ctx; 45 struct hv_device *device_ctx;
46 struct delayed_work dwork; 46 struct delayed_work dwork;
47 struct work_struct work;
47}; 48};
48 49
49 50
@@ -51,30 +52,22 @@ static int ring_size = 128;
51module_param(ring_size, int, S_IRUGO); 52module_param(ring_size, int, S_IRUGO);
52MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); 53MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
53 54
54struct set_multicast_work {
55 struct work_struct work;
56 struct net_device *net;
57};
58
59static void do_set_multicast(struct work_struct *w) 55static void do_set_multicast(struct work_struct *w)
60{ 56{
61 struct set_multicast_work *swk = 57 struct net_device_context *ndevctx =
62 container_of(w, struct set_multicast_work, work); 58 container_of(w, struct net_device_context, work);
63 struct net_device *net = swk->net;
64
65 struct net_device_context *ndevctx = netdev_priv(net);
66 struct netvsc_device *nvdev; 59 struct netvsc_device *nvdev;
67 struct rndis_device *rdev; 60 struct rndis_device *rdev;
68 61
69 nvdev = hv_get_drvdata(ndevctx->device_ctx); 62 nvdev = hv_get_drvdata(ndevctx->device_ctx);
70 if (nvdev == NULL) 63 if (nvdev == NULL || nvdev->ndev == NULL)
71 goto out; 64 return;
72 65
73 rdev = nvdev->extension; 66 rdev = nvdev->extension;
74 if (rdev == NULL) 67 if (rdev == NULL)
75 goto out; 68 return;
76 69
77 if (net->flags & IFF_PROMISC) 70 if (nvdev->ndev->flags & IFF_PROMISC)
78 rndis_filter_set_packet_filter(rdev, 71 rndis_filter_set_packet_filter(rdev,
79 NDIS_PACKET_TYPE_PROMISCUOUS); 72 NDIS_PACKET_TYPE_PROMISCUOUS);
80 else 73 else
@@ -82,21 +75,13 @@ static void do_set_multicast(struct work_struct *w)
82 NDIS_PACKET_TYPE_BROADCAST | 75 NDIS_PACKET_TYPE_BROADCAST |
83 NDIS_PACKET_TYPE_ALL_MULTICAST | 76 NDIS_PACKET_TYPE_ALL_MULTICAST |
84 NDIS_PACKET_TYPE_DIRECTED); 77 NDIS_PACKET_TYPE_DIRECTED);
85
86out:
87 kfree(w);
88} 78}
89 79
90static void netvsc_set_multicast_list(struct net_device *net) 80static void netvsc_set_multicast_list(struct net_device *net)
91{ 81{
92 struct set_multicast_work *swk = 82 struct net_device_context *net_device_ctx = netdev_priv(net);
93 kmalloc(sizeof(struct set_multicast_work), GFP_ATOMIC);
94 if (swk == NULL)
95 return;
96 83
97 swk->net = net; 84 schedule_work(&net_device_ctx->work);
98 INIT_WORK(&swk->work, do_set_multicast);
99 schedule_work(&swk->work);
100} 85}
101 86
102static int netvsc_open(struct net_device *net) 87static int netvsc_open(struct net_device *net)
@@ -125,6 +110,8 @@ static int netvsc_close(struct net_device *net)
125 110
126 netif_tx_disable(net); 111 netif_tx_disable(net);
127 112
113 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
114 cancel_work_sync(&net_device_ctx->work);
128 ret = rndis_filter_close(device_obj); 115 ret = rndis_filter_close(device_obj);
129 if (ret != 0) 116 if (ret != 0)
130 netdev_err(net, "unable to close device (ret %d).\n", ret); 117 netdev_err(net, "unable to close device (ret %d).\n", ret);
@@ -224,9 +211,13 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
224 net->stats.tx_packets++; 211 net->stats.tx_packets++;
225 } else { 212 } else {
226 kfree(packet); 213 kfree(packet);
214 if (ret != -EAGAIN) {
215 dev_kfree_skb_any(skb);
216 net->stats.tx_dropped++;
217 }
227 } 218 }
228 219
229 return ret ? NETDEV_TX_BUSY : NETDEV_TX_OK; 220 return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
230} 221}
231 222
232/* 223/*
@@ -335,6 +326,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
335 326
336 nvdev->start_remove = true; 327 nvdev->start_remove = true;
337 cancel_delayed_work_sync(&ndevctx->dwork); 328 cancel_delayed_work_sync(&ndevctx->dwork);
329 cancel_work_sync(&ndevctx->work);
338 netif_tx_disable(ndev); 330 netif_tx_disable(ndev);
339 rndis_filter_device_remove(hdev); 331 rndis_filter_device_remove(hdev);
340 332
@@ -403,6 +395,7 @@ static int netvsc_probe(struct hv_device *dev,
403 net_device_ctx->device_ctx = dev; 395 net_device_ctx->device_ctx = dev;
404 hv_set_drvdata(dev, net); 396 hv_set_drvdata(dev, net);
405 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp); 397 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
398 INIT_WORK(&net_device_ctx->work, do_set_multicast);
406 399
407 net->netdev_ops = &device_ops; 400 net->netdev_ops = &device_ops;
408 401
@@ -456,6 +449,7 @@ static int netvsc_remove(struct hv_device *dev)
456 449
457 ndev_ctx = netdev_priv(net); 450 ndev_ctx = netdev_priv(net);
458 cancel_delayed_work_sync(&ndev_ctx->dwork); 451 cancel_delayed_work_sync(&ndev_ctx->dwork);
452 cancel_work_sync(&ndev_ctx->work);
459 453
460 /* Stop outbound asap */ 454 /* Stop outbound asap */
461 netif_tx_disable(net); 455 netif_tx_disable(net);
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 4351296dde32..510b9c8d23a9 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1710,7 +1710,7 @@ toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
1710 1710
1711/* Flush all packets */ 1711/* Flush all packets */
1712 while ((i--) && (self->txpending)) 1712 while ((i--) && (self->txpending))
1713 udelay (10000); 1713 msleep(10);
1714 1714
1715 spin_lock_irqsave(&self->spinlock, flags); 1715 spin_lock_irqsave(&self->spinlock, flags);
1716 1716
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 725d6b367822..eb315b8d07a3 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -737,7 +737,7 @@ static int sh_irda_stop(struct net_device *ndev)
737 netif_stop_queue(ndev); 737 netif_stop_queue(ndev);
738 pm_runtime_put_sync(&self->pdev->dev); 738 pm_runtime_put_sync(&self->pdev->dev);
739 739
740 dev_info(&ndev->dev, "stoped\n"); 740 dev_info(&ndev->dev, "stopped\n");
741 741
742 return 0; 742 return 0;
743} 743}
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index e6661b5c1f83..256eddf1f75a 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -685,7 +685,7 @@ static int sh_sir_stop(struct net_device *ndev)
685 685
686 netif_stop_queue(ndev); 686 netif_stop_queue(ndev);
687 687
688 dev_info(&ndev->dev, "stoped\n"); 688 dev_info(&ndev->dev, "stopped\n");
689 689
690 return 0; 690 return 0;
691} 691}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index f975afdc315c..9653ed6998fe 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -312,7 +312,8 @@ static int macvlan_open(struct net_device *dev)
312 int err; 312 int err;
313 313
314 if (vlan->port->passthru) { 314 if (vlan->port->passthru) {
315 dev_set_promiscuity(lowerdev, 1); 315 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
316 dev_set_promiscuity(lowerdev, 1);
316 goto hash_add; 317 goto hash_add;
317 } 318 }
318 319
@@ -344,12 +345,15 @@ static int macvlan_stop(struct net_device *dev)
344 struct macvlan_dev *vlan = netdev_priv(dev); 345 struct macvlan_dev *vlan = netdev_priv(dev);
345 struct net_device *lowerdev = vlan->lowerdev; 346 struct net_device *lowerdev = vlan->lowerdev;
346 347
348 dev_uc_unsync(lowerdev, dev);
349 dev_mc_unsync(lowerdev, dev);
350
347 if (vlan->port->passthru) { 351 if (vlan->port->passthru) {
348 dev_set_promiscuity(lowerdev, -1); 352 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
353 dev_set_promiscuity(lowerdev, -1);
349 goto hash_del; 354 goto hash_del;
350 } 355 }
351 356
352 dev_mc_unsync(lowerdev, dev);
353 if (dev->flags & IFF_ALLMULTI) 357 if (dev->flags & IFF_ALLMULTI)
354 dev_set_allmulti(lowerdev, -1); 358 dev_set_allmulti(lowerdev, -1);
355 359
@@ -399,10 +403,11 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
399 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); 403 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
400} 404}
401 405
402static void macvlan_set_multicast_list(struct net_device *dev) 406static void macvlan_set_mac_lists(struct net_device *dev)
403{ 407{
404 struct macvlan_dev *vlan = netdev_priv(dev); 408 struct macvlan_dev *vlan = netdev_priv(dev);
405 409
410 dev_uc_sync(vlan->lowerdev, dev);
406 dev_mc_sync(vlan->lowerdev, dev); 411 dev_mc_sync(vlan->lowerdev, dev);
407} 412}
408 413
@@ -542,6 +547,43 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
542 return 0; 547 return 0;
543} 548}
544 549
550static int macvlan_fdb_add(struct ndmsg *ndm,
551 struct net_device *dev,
552 unsigned char *addr,
553 u16 flags)
554{
555 struct macvlan_dev *vlan = netdev_priv(dev);
556 int err = -EINVAL;
557
558 if (!vlan->port->passthru)
559 return -EOPNOTSUPP;
560
561 if (is_unicast_ether_addr(addr))
562 err = dev_uc_add_excl(dev, addr);
563 else if (is_multicast_ether_addr(addr))
564 err = dev_mc_add_excl(dev, addr);
565
566 return err;
567}
568
569static int macvlan_fdb_del(struct ndmsg *ndm,
570 struct net_device *dev,
571 unsigned char *addr)
572{
573 struct macvlan_dev *vlan = netdev_priv(dev);
574 int err = -EINVAL;
575
576 if (!vlan->port->passthru)
577 return -EOPNOTSUPP;
578
579 if (is_unicast_ether_addr(addr))
580 err = dev_uc_del(dev, addr);
581 else if (is_multicast_ether_addr(addr))
582 err = dev_mc_del(dev, addr);
583
584 return err;
585}
586
545static void macvlan_ethtool_get_drvinfo(struct net_device *dev, 587static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
546 struct ethtool_drvinfo *drvinfo) 588 struct ethtool_drvinfo *drvinfo)
547{ 589{
@@ -572,11 +614,14 @@ static const struct net_device_ops macvlan_netdev_ops = {
572 .ndo_change_mtu = macvlan_change_mtu, 614 .ndo_change_mtu = macvlan_change_mtu,
573 .ndo_change_rx_flags = macvlan_change_rx_flags, 615 .ndo_change_rx_flags = macvlan_change_rx_flags,
574 .ndo_set_mac_address = macvlan_set_mac_address, 616 .ndo_set_mac_address = macvlan_set_mac_address,
575 .ndo_set_rx_mode = macvlan_set_multicast_list, 617 .ndo_set_rx_mode = macvlan_set_mac_lists,
576 .ndo_get_stats64 = macvlan_dev_get_stats64, 618 .ndo_get_stats64 = macvlan_dev_get_stats64,
577 .ndo_validate_addr = eth_validate_addr, 619 .ndo_validate_addr = eth_validate_addr,
578 .ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid, 620 .ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid,
579 .ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid, 621 .ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid,
622 .ndo_fdb_add = macvlan_fdb_add,
623 .ndo_fdb_del = macvlan_fdb_del,
624 .ndo_fdb_dump = ndo_dflt_fdb_dump,
580}; 625};
581 626
582void macvlan_common_setup(struct net_device *dev) 627void macvlan_common_setup(struct net_device *dev)
@@ -711,6 +756,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
711 if (data && data[IFLA_MACVLAN_MODE]) 756 if (data && data[IFLA_MACVLAN_MODE])
712 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 757 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
713 758
759 if (data && data[IFLA_MACVLAN_FLAGS])
760 vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
761
714 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 762 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
715 if (port->count) 763 if (port->count)
716 return -EINVAL; 764 return -EINVAL;
@@ -760,6 +808,16 @@ static int macvlan_changelink(struct net_device *dev,
760 struct macvlan_dev *vlan = netdev_priv(dev); 808 struct macvlan_dev *vlan = netdev_priv(dev);
761 if (data && data[IFLA_MACVLAN_MODE]) 809 if (data && data[IFLA_MACVLAN_MODE])
762 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 810 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
811 if (data && data[IFLA_MACVLAN_FLAGS]) {
812 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
813 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
814
815 if (promisc && (flags & MACVLAN_FLAG_NOPROMISC))
816 dev_set_promiscuity(vlan->lowerdev, -1);
817 else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC))
818 dev_set_promiscuity(vlan->lowerdev, 1);
819 vlan->flags = flags;
820 }
763 return 0; 821 return 0;
764} 822}
765 823
@@ -773,7 +831,10 @@ static int macvlan_fill_info(struct sk_buff *skb,
773{ 831{
774 struct macvlan_dev *vlan = netdev_priv(dev); 832 struct macvlan_dev *vlan = netdev_priv(dev);
775 833
776 NLA_PUT_U32(skb, IFLA_MACVLAN_MODE, vlan->mode); 834 if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode))
835 goto nla_put_failure;
836 if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags))
837 goto nla_put_failure;
777 return 0; 838 return 0;
778 839
779nla_put_failure: 840nla_put_failure:
@@ -781,7 +842,8 @@ nla_put_failure:
781} 842}
782 843
783static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = { 844static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
784 [IFLA_MACVLAN_MODE] = { .type = NLA_U32 }, 845 [IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
846 [IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 },
785}; 847};
786 848
787int macvlan_link_register(struct rtnl_link_ops *ops) 849int macvlan_link_register(struct rtnl_link_ops *ops)
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index e16f98cb4f04..cd802eb25fd2 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -39,10 +39,7 @@ static int bcm63xx_config_init(struct phy_device *phydev)
39 MII_BCM63XX_IR_SPEED | 39 MII_BCM63XX_IR_SPEED |
40 MII_BCM63XX_IR_LINK) | 40 MII_BCM63XX_IR_LINK) |
41 MII_BCM63XX_IR_EN; 41 MII_BCM63XX_IR_EN;
42 err = phy_write(phydev, MII_BCM63XX_IR, reg); 42 return phy_write(phydev, MII_BCM63XX_IR, reg);
43 if (err < 0)
44 return err;
45 return 0;
46} 43}
47 44
48static int bcm63xx_ack_interrupt(struct phy_device *phydev) 45static int bcm63xx_ack_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 2f774acdb551..5f59cc064778 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -134,12 +134,7 @@ static int dm9161_config_init(struct phy_device *phydev)
134 return err; 134 return err;
135 135
136 /* Reconnect the PHY, and enable Autonegotiation */ 136 /* Reconnect the PHY, and enable Autonegotiation */
137 err = phy_write(phydev, MII_BMCR, BMCR_ANENABLE); 137 return phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
138
139 if (err < 0)
140 return err;
141
142 return 0;
143} 138}
144 139
145static int dm9161_ack_interrupt(struct phy_device *phydev) 140static int dm9161_ack_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index dd7ae19579d1..940b29022d0c 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1215,6 +1215,36 @@ static void dp83640_txtstamp(struct phy_device *phydev,
1215 } 1215 }
1216} 1216}
1217 1217
1218static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info)
1219{
1220 struct dp83640_private *dp83640 = dev->priv;
1221
1222 info->so_timestamping =
1223 SOF_TIMESTAMPING_TX_HARDWARE |
1224 SOF_TIMESTAMPING_RX_HARDWARE |
1225 SOF_TIMESTAMPING_RAW_HARDWARE;
1226 info->phc_index = ptp_clock_index(dp83640->clock->ptp_clock);
1227 info->tx_types =
1228 (1 << HWTSTAMP_TX_OFF) |
1229 (1 << HWTSTAMP_TX_ON) |
1230 (1 << HWTSTAMP_TX_ONESTEP_SYNC);
1231 info->rx_filters =
1232 (1 << HWTSTAMP_FILTER_NONE) |
1233 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1234 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1235 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
1236 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1237 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
1238 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
1239 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1240 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1241 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1242 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
1243 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
1244 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
1245 return 0;
1246}
1247
1218static struct phy_driver dp83640_driver = { 1248static struct phy_driver dp83640_driver = {
1219 .phy_id = DP83640_PHY_ID, 1249 .phy_id = DP83640_PHY_ID,
1220 .phy_id_mask = 0xfffffff0, 1250 .phy_id_mask = 0xfffffff0,
@@ -1225,6 +1255,7 @@ static struct phy_driver dp83640_driver = {
1225 .remove = dp83640_remove, 1255 .remove = dp83640_remove,
1226 .config_aneg = genphy_config_aneg, 1256 .config_aneg = genphy_config_aneg,
1227 .read_status = genphy_read_status, 1257 .read_status = genphy_read_status,
1258 .ts_info = dp83640_ts_info,
1228 .hwtstamp = dp83640_hwtstamp, 1259 .hwtstamp = dp83640_hwtstamp,
1229 .rxtstamp = dp83640_rxtstamp, 1260 .rxtstamp = dp83640_rxtstamp,
1230 .txtstamp = dp83640_txtstamp, 1261 .txtstamp = dp83640_txtstamp,
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index f08c85acf761..5ac46f5226f3 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -40,6 +40,7 @@ MODULE_LICENSE("GPL");
40#define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */ 40#define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */
41#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ 41#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
42#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ 42#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
43#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
43 44
44static int ip175c_config_init(struct phy_device *phydev) 45static int ip175c_config_init(struct phy_device *phydev)
45{ 46{
@@ -185,6 +186,15 @@ static int ip175c_config_aneg(struct phy_device *phydev)
185 return 0; 186 return 0;
186} 187}
187 188
189static int ip101a_g_ack_interrupt(struct phy_device *phydev)
190{
191 int err = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
192 if (err < 0)
193 return err;
194
195 return 0;
196}
197
188static struct phy_driver ip175c_driver = { 198static struct phy_driver ip175c_driver = {
189 .phy_id = 0x02430d80, 199 .phy_id = 0x02430d80,
190 .name = "ICPlus IP175C", 200 .name = "ICPlus IP175C",
@@ -204,7 +214,6 @@ static struct phy_driver ip1001_driver = {
204 .phy_id_mask = 0x0ffffff0, 214 .phy_id_mask = 0x0ffffff0,
205 .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | 215 .features = PHY_GBIT_FEATURES | SUPPORTED_Pause |
206 SUPPORTED_Asym_Pause, 216 SUPPORTED_Asym_Pause,
207 .flags = PHY_HAS_INTERRUPT,
208 .config_init = &ip1001_config_init, 217 .config_init = &ip1001_config_init,
209 .config_aneg = &genphy_config_aneg, 218 .config_aneg = &genphy_config_aneg,
210 .read_status = &genphy_read_status, 219 .read_status = &genphy_read_status,
@@ -220,6 +229,7 @@ static struct phy_driver ip101a_g_driver = {
220 .features = PHY_BASIC_FEATURES | SUPPORTED_Pause | 229 .features = PHY_BASIC_FEATURES | SUPPORTED_Pause |
221 SUPPORTED_Asym_Pause, 230 SUPPORTED_Asym_Pause,
222 .flags = PHY_HAS_INTERRUPT, 231 .flags = PHY_HAS_INTERRUPT,
232 .ack_interrupt = ip101a_g_ack_interrupt,
223 .config_init = &ip101a_g_config_init, 233 .config_init = &ip101a_g_config_init,
224 .config_aneg = &genphy_config_aneg, 234 .config_aneg = &genphy_config_aneg,
225 .read_status = &genphy_read_status, 235 .read_status = &genphy_read_status,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e8b9c53c304b..418928d644bf 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -455,11 +455,7 @@ static int m88e1111_config_init(struct phy_device *phydev)
455 if (err < 0) 455 if (err < 0)
456 return err; 456 return err;
457 457
458 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 458 return phy_write(phydev, MII_BMCR, BMCR_RESET);
459 if (err < 0)
460 return err;
461
462 return 0;
463} 459}
464 460
465static int m88e1118_config_aneg(struct phy_device *phydev) 461static int m88e1118_config_aneg(struct phy_device *phydev)
@@ -515,11 +511,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
515 if (err < 0) 511 if (err < 0)
516 return err; 512 return err;
517 513
518 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 514 return phy_write(phydev, MII_BMCR, BMCR_RESET);
519 if (err < 0)
520 return err;
521
522 return 0;
523} 515}
524 516
525static int m88e1149_config_init(struct phy_device *phydev) 517static int m88e1149_config_init(struct phy_device *phydev)
@@ -545,11 +537,7 @@ static int m88e1149_config_init(struct phy_device *phydev)
545 if (err < 0) 537 if (err < 0)
546 return err; 538 return err;
547 539
548 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 540 return phy_write(phydev, MII_BMCR, BMCR_RESET);
549 if (err < 0)
550 return err;
551
552 return 0;
553} 541}
554 542
555static int m88e1145_config_init(struct phy_device *phydev) 543static int m88e1145_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 116a2dd7c879..4eb98bc52a0a 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -348,7 +348,6 @@ static int __devexit ks8995_remove(struct spi_device *spi)
348static struct spi_driver ks8995_driver = { 348static struct spi_driver ks8995_driver = {
349 .driver = { 349 .driver = {
350 .name = "spi-ks8995", 350 .name = "spi-ks8995",
351 .bus = &spi_bus_type,
352 .owner = THIS_MODULE, 351 .owner = THIS_MODULE,
353 }, 352 },
354 .probe = ks8995_probe, 353 .probe = ks8995_probe,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 33f8c51968b6..21d7151fb0ab 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -235,7 +235,7 @@ struct ppp_net {
235/* Prototypes. */ 235/* Prototypes. */
236static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 236static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
237 struct file *file, unsigned int cmd, unsigned long arg); 237 struct file *file, unsigned int cmd, unsigned long arg);
238static int ppp_xmit_process(struct ppp *ppp); 238static void ppp_xmit_process(struct ppp *ppp);
239static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 239static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
240static void ppp_push(struct ppp *ppp); 240static void ppp_push(struct ppp *ppp);
241static void ppp_channel_push(struct channel *pch); 241static void ppp_channel_push(struct channel *pch);
@@ -969,8 +969,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
969 put_unaligned_be16(proto, pp); 969 put_unaligned_be16(proto, pp);
970 970
971 skb_queue_tail(&ppp->file.xq, skb); 971 skb_queue_tail(&ppp->file.xq, skb);
972 if (!ppp_xmit_process(ppp)) 972 ppp_xmit_process(ppp);
973 netif_stop_queue(dev);
974 return NETDEV_TX_OK; 973 return NETDEV_TX_OK;
975 974
976 outf: 975 outf:
@@ -1048,11 +1047,10 @@ static void ppp_setup(struct net_device *dev)
1048 * Called to do any work queued up on the transmit side 1047 * Called to do any work queued up on the transmit side
1049 * that can now be done. 1048 * that can now be done.
1050 */ 1049 */
1051static int 1050static void
1052ppp_xmit_process(struct ppp *ppp) 1051ppp_xmit_process(struct ppp *ppp)
1053{ 1052{
1054 struct sk_buff *skb; 1053 struct sk_buff *skb;
1055 int ret = 0;
1056 1054
1057 ppp_xmit_lock(ppp); 1055 ppp_xmit_lock(ppp);
1058 if (!ppp->closing) { 1056 if (!ppp->closing) {
@@ -1062,13 +1060,12 @@ ppp_xmit_process(struct ppp *ppp)
1062 ppp_send_frame(ppp, skb); 1060 ppp_send_frame(ppp, skb);
1063 /* If there's no work left to do, tell the core net 1061 /* If there's no work left to do, tell the core net
1064 code that we can accept some more. */ 1062 code that we can accept some more. */
1065 if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) { 1063 if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
1066 netif_wake_queue(ppp->dev); 1064 netif_wake_queue(ppp->dev);
1067 ret = 1; 1065 else
1068 } 1066 netif_stop_queue(ppp->dev);
1069 } 1067 }
1070 ppp_xmit_unlock(ppp); 1068 ppp_xmit_unlock(ppp);
1071 return ret;
1072} 1069}
1073 1070
1074static inline struct sk_buff * 1071static inline struct sk_buff *
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 885dbdd9c39e..72b50f57e7b2 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -116,8 +116,8 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
116 int i; 116 int i;
117 117
118 rcu_read_lock(); 118 rcu_read_lock();
119 for (i = find_next_bit(callid_bitmap, MAX_CALLID, 1); i < MAX_CALLID; 119 i = 1;
120 i = find_next_bit(callid_bitmap, MAX_CALLID, i + 1)) { 120 for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
121 sock = rcu_dereference(callid_sock[i]); 121 sock = rcu_dereference(callid_sock[i]);
122 if (!sock) 122 if (!sock)
123 continue; 123 continue;
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index 248a144033ca..89024d5fc33a 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -40,4 +40,15 @@ config NET_TEAM_MODE_ACTIVEBACKUP
40 To compile this team mode as a module, choose M here: the module 40 To compile this team mode as a module, choose M here: the module
41 will be called team_mode_activebackup. 41 will be called team_mode_activebackup.
42 42
43config NET_TEAM_MODE_LOADBALANCE
44 tristate "Load-balance mode support"
45 depends on NET_TEAM
46 ---help---
47 This mode provides load balancing functionality. Tx port selection
48 is done using BPF function set up from userspace (bpf_hash_func
49 option)
50
51 To compile this team mode as a module, choose M here: the module
52 will be called team_mode_loadbalance.
53
43endif # NET_TEAM 54endif # NET_TEAM
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
index 85f2028a87af..fb9f4c1c51ff 100644
--- a/drivers/net/team/Makefile
+++ b/drivers/net/team/Makefile
@@ -5,3 +5,4 @@
5obj-$(CONFIG_NET_TEAM) += team.o 5obj-$(CONFIG_NET_TEAM) += team.o
6obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o 6obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
7obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o 7obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
8obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 8f81805c6825..c61ae35a53ce 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -65,7 +65,7 @@ static int __set_port_mac(struct net_device *port_dev,
65 return dev_set_mac_address(port_dev, &addr); 65 return dev_set_mac_address(port_dev, &addr);
66} 66}
67 67
68int team_port_set_orig_mac(struct team_port *port) 68static int team_port_set_orig_mac(struct team_port *port)
69{ 69{
70 return __set_port_mac(port->dev, port->orig.dev_addr); 70 return __set_port_mac(port->dev, port->orig.dev_addr);
71} 71}
@@ -76,12 +76,26 @@ int team_port_set_team_mac(struct team_port *port)
76} 76}
77EXPORT_SYMBOL(team_port_set_team_mac); 77EXPORT_SYMBOL(team_port_set_team_mac);
78 78
79static void team_refresh_port_linkup(struct team_port *port)
80{
81 port->linkup = port->user.linkup_enabled ? port->user.linkup :
82 port->state.linkup;
83}
79 84
80/******************* 85/*******************
81 * Options handling 86 * Options handling
82 *******************/ 87 *******************/
83 88
84struct team_option *__team_find_option(struct team *team, const char *opt_name) 89struct team_option_inst { /* One for each option instance */
90 struct list_head list;
91 struct team_option *option;
92 struct team_port *port; /* != NULL if per-port */
93 bool changed;
94 bool removed;
95};
96
97static struct team_option *__team_find_option(struct team *team,
98 const char *opt_name)
85{ 99{
86 struct team_option *option; 100 struct team_option *option;
87 101
@@ -92,9 +106,121 @@ struct team_option *__team_find_option(struct team *team, const char *opt_name)
92 return NULL; 106 return NULL;
93} 107}
94 108
95int __team_options_register(struct team *team, 109static int __team_option_inst_add(struct team *team, struct team_option *option,
96 const struct team_option *option, 110 struct team_port *port)
97 size_t option_count) 111{
112 struct team_option_inst *opt_inst;
113
114 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
115 if (!opt_inst)
116 return -ENOMEM;
117 opt_inst->option = option;
118 opt_inst->port = port;
119 opt_inst->changed = true;
120 opt_inst->removed = false;
121 list_add_tail(&opt_inst->list, &team->option_inst_list);
122 return 0;
123}
124
125static void __team_option_inst_del(struct team_option_inst *opt_inst)
126{
127 list_del(&opt_inst->list);
128 kfree(opt_inst);
129}
130
131static void __team_option_inst_del_option(struct team *team,
132 struct team_option *option)
133{
134 struct team_option_inst *opt_inst, *tmp;
135
136 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
137 if (opt_inst->option == option)
138 __team_option_inst_del(opt_inst);
139 }
140}
141
142static int __team_option_inst_add_option(struct team *team,
143 struct team_option *option)
144{
145 struct team_port *port;
146 int err;
147
148 if (!option->per_port)
149 return __team_option_inst_add(team, option, 0);
150
151 list_for_each_entry(port, &team->port_list, list) {
152 err = __team_option_inst_add(team, option, port);
153 if (err)
154 goto inst_del_option;
155 }
156 return 0;
157
158inst_del_option:
159 __team_option_inst_del_option(team, option);
160 return err;
161}
162
163static void __team_option_inst_mark_removed_option(struct team *team,
164 struct team_option *option)
165{
166 struct team_option_inst *opt_inst;
167
168 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
169 if (opt_inst->option == option) {
170 opt_inst->changed = true;
171 opt_inst->removed = true;
172 }
173 }
174}
175
176static void __team_option_inst_del_port(struct team *team,
177 struct team_port *port)
178{
179 struct team_option_inst *opt_inst, *tmp;
180
181 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
182 if (opt_inst->option->per_port &&
183 opt_inst->port == port)
184 __team_option_inst_del(opt_inst);
185 }
186}
187
188static int __team_option_inst_add_port(struct team *team,
189 struct team_port *port)
190{
191 struct team_option *option;
192 int err;
193
194 list_for_each_entry(option, &team->option_list, list) {
195 if (!option->per_port)
196 continue;
197 err = __team_option_inst_add(team, option, port);
198 if (err)
199 goto inst_del_port;
200 }
201 return 0;
202
203inst_del_port:
204 __team_option_inst_del_port(team, port);
205 return err;
206}
207
208static void __team_option_inst_mark_removed_port(struct team *team,
209 struct team_port *port)
210{
211 struct team_option_inst *opt_inst;
212
213 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
214 if (opt_inst->port == port) {
215 opt_inst->changed = true;
216 opt_inst->removed = true;
217 }
218 }
219}
220
221static int __team_options_register(struct team *team,
222 const struct team_option *option,
223 size_t option_count)
98{ 224{
99 int i; 225 int i;
100 struct team_option **dst_opts; 226 struct team_option **dst_opts;
@@ -107,26 +233,32 @@ int __team_options_register(struct team *team,
107 for (i = 0; i < option_count; i++, option++) { 233 for (i = 0; i < option_count; i++, option++) {
108 if (__team_find_option(team, option->name)) { 234 if (__team_find_option(team, option->name)) {
109 err = -EEXIST; 235 err = -EEXIST;
110 goto rollback; 236 goto alloc_rollback;
111 } 237 }
112 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL); 238 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
113 if (!dst_opts[i]) { 239 if (!dst_opts[i]) {
114 err = -ENOMEM; 240 err = -ENOMEM;
115 goto rollback; 241 goto alloc_rollback;
116 } 242 }
117 } 243 }
118 244
119 for (i = 0; i < option_count; i++) { 245 for (i = 0; i < option_count; i++) {
120 dst_opts[i]->changed = true; 246 err = __team_option_inst_add_option(team, dst_opts[i]);
121 dst_opts[i]->removed = false; 247 if (err)
248 goto inst_rollback;
122 list_add_tail(&dst_opts[i]->list, &team->option_list); 249 list_add_tail(&dst_opts[i]->list, &team->option_list);
123 } 250 }
124 251
125 kfree(dst_opts); 252 kfree(dst_opts);
126 return 0; 253 return 0;
127 254
128rollback: 255inst_rollback:
129 for (i = 0; i < option_count; i++) 256 for (i--; i >= 0; i--)
257 __team_option_inst_del_option(team, dst_opts[i]);
258
259 i = option_count - 1;
260alloc_rollback:
261 for (i--; i >= 0; i--)
130 kfree(dst_opts[i]); 262 kfree(dst_opts[i]);
131 263
132 kfree(dst_opts); 264 kfree(dst_opts);
@@ -143,10 +275,8 @@ static void __team_options_mark_removed(struct team *team,
143 struct team_option *del_opt; 275 struct team_option *del_opt;
144 276
145 del_opt = __team_find_option(team, option->name); 277 del_opt = __team_find_option(team, option->name);
146 if (del_opt) { 278 if (del_opt)
147 del_opt->changed = true; 279 __team_option_inst_mark_removed_option(team, del_opt);
148 del_opt->removed = true;
149 }
150 } 280 }
151} 281}
152 282
@@ -161,6 +291,7 @@ static void __team_options_unregister(struct team *team,
161 291
162 del_opt = __team_find_option(team, option->name); 292 del_opt = __team_find_option(team, option->name);
163 if (del_opt) { 293 if (del_opt) {
294 __team_option_inst_del_option(team, del_opt);
164 list_del(&del_opt->list); 295 list_del(&del_opt->list);
165 kfree(del_opt); 296 kfree(del_opt);
166 } 297 }
@@ -193,22 +324,42 @@ void team_options_unregister(struct team *team,
193} 324}
194EXPORT_SYMBOL(team_options_unregister); 325EXPORT_SYMBOL(team_options_unregister);
195 326
196static int team_option_get(struct team *team, struct team_option *option, 327static int team_option_port_add(struct team *team, struct team_port *port)
197 void *arg) 328{
329 int err;
330
331 err = __team_option_inst_add_port(team, port);
332 if (err)
333 return err;
334 __team_options_change_check(team);
335 return 0;
336}
337
338static void team_option_port_del(struct team *team, struct team_port *port)
339{
340 __team_option_inst_mark_removed_port(team, port);
341 __team_options_change_check(team);
342 __team_option_inst_del_port(team, port);
343}
344
345static int team_option_get(struct team *team,
346 struct team_option_inst *opt_inst,
347 struct team_gsetter_ctx *ctx)
198{ 348{
199 return option->getter(team, arg); 349 return opt_inst->option->getter(team, ctx);
200} 350}
201 351
202static int team_option_set(struct team *team, struct team_option *option, 352static int team_option_set(struct team *team,
203 void *arg) 353 struct team_option_inst *opt_inst,
354 struct team_gsetter_ctx *ctx)
204{ 355{
205 int err; 356 int err;
206 357
207 err = option->setter(team, arg); 358 err = opt_inst->option->setter(team, ctx);
208 if (err) 359 if (err)
209 return err; 360 return err;
210 361
211 option->changed = true; 362 opt_inst->changed = true;
212 __team_options_change_check(team); 363 __team_options_change_check(team);
213 return err; 364 return err;
214} 365}
@@ -408,6 +559,8 @@ static int team_change_mode(struct team *team, const char *kind)
408 * Rx path frame handler 559 * Rx path frame handler
409 ************************/ 560 ************************/
410 561
562static bool team_port_enabled(struct team_port *port);
563
411/* note: already called with rcu_read_lock */ 564/* note: already called with rcu_read_lock */
412static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) 565static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
413{ 566{
@@ -424,8 +577,12 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
424 577
425 port = team_port_get_rcu(skb->dev); 578 port = team_port_get_rcu(skb->dev);
426 team = port->team; 579 team = port->team;
427 580 if (!team_port_enabled(port)) {
428 res = team->ops.receive(team, port, skb); 581 /* allow exact match delivery for disabled ports */
582 res = RX_HANDLER_EXACT;
583 } else {
584 res = team->ops.receive(team, port, skb);
585 }
429 if (res == RX_HANDLER_ANOTHER) { 586 if (res == RX_HANDLER_ANOTHER) {
430 struct team_pcpu_stats *pcpu_stats; 587 struct team_pcpu_stats *pcpu_stats;
431 588
@@ -461,17 +618,25 @@ static bool team_port_find(const struct team *team,
461 return false; 618 return false;
462} 619}
463 620
621static bool team_port_enabled(struct team_port *port)
622{
623 return port->index != -1;
624}
625
464/* 626/*
465 * Add/delete port to the team port list. Write guarded by rtnl_lock. 627 * Enable/disable port by adding to enabled port hashlist and setting
466 * Takes care of correct port->index setup (might be racy). 628 * port->index (Might be racy so reader could see incorrect ifindex when
629 * processing a flying packet, but that is not a problem). Write guarded
630 * by team->lock.
467 */ 631 */
468static void team_port_list_add_port(struct team *team, 632static void team_port_enable(struct team *team,
469 struct team_port *port) 633 struct team_port *port)
470{ 634{
471 port->index = team->port_count++; 635 if (team_port_enabled(port))
636 return;
637 port->index = team->en_port_count++;
472 hlist_add_head_rcu(&port->hlist, 638 hlist_add_head_rcu(&port->hlist,
473 team_port_index_hash(team, port->index)); 639 team_port_index_hash(team, port->index));
474 list_add_tail_rcu(&port->list, &team->port_list);
475} 640}
476 641
477static void __reconstruct_port_hlist(struct team *team, int rm_index) 642static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -479,7 +644,7 @@ static void __reconstruct_port_hlist(struct team *team, int rm_index)
479 int i; 644 int i;
480 struct team_port *port; 645 struct team_port *port;
481 646
482 for (i = rm_index + 1; i < team->port_count; i++) { 647 for (i = rm_index + 1; i < team->en_port_count; i++) {
483 port = team_get_port_by_index(team, i); 648 port = team_get_port_by_index(team, i);
484 hlist_del_rcu(&port->hlist); 649 hlist_del_rcu(&port->hlist);
485 port->index--; 650 port->index--;
@@ -488,15 +653,17 @@ static void __reconstruct_port_hlist(struct team *team, int rm_index)
488 } 653 }
489} 654}
490 655
491static void team_port_list_del_port(struct team *team, 656static void team_port_disable(struct team *team,
492 struct team_port *port) 657 struct team_port *port)
493{ 658{
494 int rm_index = port->index; 659 int rm_index = port->index;
495 660
661 if (!team_port_enabled(port))
662 return;
496 hlist_del_rcu(&port->hlist); 663 hlist_del_rcu(&port->hlist);
497 list_del_rcu(&port->list);
498 __reconstruct_port_hlist(team, rm_index); 664 __reconstruct_port_hlist(team, rm_index);
499 team->port_count--; 665 team->en_port_count--;
666 port->index = -1;
500} 667}
501 668
502#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ 669#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -642,7 +809,16 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
642 goto err_handler_register; 809 goto err_handler_register;
643 } 810 }
644 811
645 team_port_list_add_port(team, port); 812 err = team_option_port_add(team, port);
813 if (err) {
814 netdev_err(dev, "Device %s failed to add per-port options\n",
815 portname);
816 goto err_option_port_add;
817 }
818
819 port->index = -1;
820 team_port_enable(team, port);
821 list_add_tail_rcu(&port->list, &team->port_list);
646 team_adjust_ops(team); 822 team_adjust_ops(team);
647 __team_compute_features(team); 823 __team_compute_features(team);
648 __team_port_change_check(port, !!netif_carrier_ok(port_dev)); 824 __team_port_change_check(port, !!netif_carrier_ok(port_dev));
@@ -651,6 +827,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
651 827
652 return 0; 828 return 0;
653 829
830err_option_port_add:
831 netdev_rx_handler_unregister(port_dev);
832
654err_handler_register: 833err_handler_register:
655 netdev_set_master(port_dev, NULL); 834 netdev_set_master(port_dev, NULL);
656 835
@@ -688,8 +867,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
688 867
689 port->removed = true; 868 port->removed = true;
690 __team_port_change_check(port, false); 869 __team_port_change_check(port, false);
691 team_port_list_del_port(team, port); 870 team_port_disable(team, port);
871 list_del_rcu(&port->list);
692 team_adjust_ops(team); 872 team_adjust_ops(team);
873 team_option_port_del(team, port);
693 netdev_rx_handler_unregister(port_dev); 874 netdev_rx_handler_unregister(port_dev);
694 netdev_set_master(port_dev, NULL); 875 netdev_set_master(port_dev, NULL);
695 vlan_vids_del_by_dev(port_dev, dev); 876 vlan_vids_del_by_dev(port_dev, dev);
@@ -712,19 +893,66 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
712 893
713static const char team_no_mode_kind[] = "*NOMODE*"; 894static const char team_no_mode_kind[] = "*NOMODE*";
714 895
715static int team_mode_option_get(struct team *team, void *arg) 896static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
897{
898 ctx->data.str_val = team->mode ? team->mode->kind : team_no_mode_kind;
899 return 0;
900}
901
902static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
903{
904 return team_change_mode(team, ctx->data.str_val);
905}
906
907static int team_port_en_option_get(struct team *team,
908 struct team_gsetter_ctx *ctx)
909{
910 ctx->data.bool_val = team_port_enabled(ctx->port);
911 return 0;
912}
913
914static int team_port_en_option_set(struct team *team,
915 struct team_gsetter_ctx *ctx)
916{
917 if (ctx->data.bool_val)
918 team_port_enable(team, ctx->port);
919 else
920 team_port_disable(team, ctx->port);
921 return 0;
922}
923
924static int team_user_linkup_option_get(struct team *team,
925 struct team_gsetter_ctx *ctx)
926{
927 ctx->data.bool_val = ctx->port->user.linkup;
928 return 0;
929}
930
931static int team_user_linkup_option_set(struct team *team,
932 struct team_gsetter_ctx *ctx)
933{
934 ctx->port->user.linkup = ctx->data.bool_val;
935 team_refresh_port_linkup(ctx->port);
936 return 0;
937}
938
939static int team_user_linkup_en_option_get(struct team *team,
940 struct team_gsetter_ctx *ctx)
716{ 941{
717 const char **str = arg; 942 struct team_port *port = ctx->port;
718 943
719 *str = team->mode ? team->mode->kind : team_no_mode_kind; 944 ctx->data.bool_val = port->user.linkup_enabled;
720 return 0; 945 return 0;
721} 946}
722 947
723static int team_mode_option_set(struct team *team, void *arg) 948static int team_user_linkup_en_option_set(struct team *team,
949 struct team_gsetter_ctx *ctx)
724{ 950{
725 const char **str = arg; 951 struct team_port *port = ctx->port;
726 952
727 return team_change_mode(team, *str); 953 port->user.linkup_enabled = ctx->data.bool_val;
954 team_refresh_port_linkup(ctx->port);
955 return 0;
728} 956}
729 957
730static const struct team_option team_options[] = { 958static const struct team_option team_options[] = {
@@ -734,6 +962,27 @@ static const struct team_option team_options[] = {
734 .getter = team_mode_option_get, 962 .getter = team_mode_option_get,
735 .setter = team_mode_option_set, 963 .setter = team_mode_option_set,
736 }, 964 },
965 {
966 .name = "enabled",
967 .type = TEAM_OPTION_TYPE_BOOL,
968 .per_port = true,
969 .getter = team_port_en_option_get,
970 .setter = team_port_en_option_set,
971 },
972 {
973 .name = "user_linkup",
974 .type = TEAM_OPTION_TYPE_BOOL,
975 .per_port = true,
976 .getter = team_user_linkup_option_get,
977 .setter = team_user_linkup_option_set,
978 },
979 {
980 .name = "user_linkup_enabled",
981 .type = TEAM_OPTION_TYPE_BOOL,
982 .per_port = true,
983 .getter = team_user_linkup_en_option_get,
984 .setter = team_user_linkup_en_option_set,
985 },
737}; 986};
738 987
739static int team_init(struct net_device *dev) 988static int team_init(struct net_device *dev)
@@ -750,12 +999,13 @@ static int team_init(struct net_device *dev)
750 return -ENOMEM; 999 return -ENOMEM;
751 1000
752 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) 1001 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
753 INIT_HLIST_HEAD(&team->port_hlist[i]); 1002 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
754 INIT_LIST_HEAD(&team->port_list); 1003 INIT_LIST_HEAD(&team->port_list);
755 1004
756 team_adjust_ops(team); 1005 team_adjust_ops(team);
757 1006
758 INIT_LIST_HEAD(&team->option_list); 1007 INIT_LIST_HEAD(&team->option_list);
1008 INIT_LIST_HEAD(&team->option_inst_list);
759 err = team_options_register(team, team_options, ARRAY_SIZE(team_options)); 1009 err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
760 if (err) 1010 if (err)
761 goto err_options_register; 1011 goto err_options_register;
@@ -1145,10 +1395,7 @@ team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
1145 }, 1395 },
1146 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG }, 1396 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
1147 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 }, 1397 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
1148 [TEAM_ATTR_OPTION_DATA] = { 1398 [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
1149 .type = NLA_BINARY,
1150 .len = TEAM_STRING_MAX_LEN,
1151 },
1152}; 1399};
1153 1400
1154static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) 1401static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
@@ -1241,46 +1488,86 @@ static int team_nl_fill_options_get(struct sk_buff *skb,
1241{ 1488{
1242 struct nlattr *option_list; 1489 struct nlattr *option_list;
1243 void *hdr; 1490 void *hdr;
1244 struct team_option *option; 1491 struct team_option_inst *opt_inst;
1492 int err;
1245 1493
1246 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, 1494 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1247 TEAM_CMD_OPTIONS_GET); 1495 TEAM_CMD_OPTIONS_GET);
1248 if (IS_ERR(hdr)) 1496 if (IS_ERR(hdr))
1249 return PTR_ERR(hdr); 1497 return PTR_ERR(hdr);
1250 1498
1251 NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex); 1499 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1500 goto nla_put_failure;
1252 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION); 1501 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
1253 if (!option_list) 1502 if (!option_list)
1254 return -EMSGSIZE; 1503 return -EMSGSIZE;
1255 1504
1256 list_for_each_entry(option, &team->option_list, list) { 1505 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
1257 struct nlattr *option_item; 1506 struct nlattr *option_item;
1258 long arg; 1507 struct team_option *option = opt_inst->option;
1508 struct team_gsetter_ctx ctx;
1259 1509
1260 /* Include only changed options if fill all mode is not on */ 1510 /* Include only changed options if fill all mode is not on */
1261 if (!fillall && !option->changed) 1511 if (!fillall && !opt_inst->changed)
1262 continue; 1512 continue;
1263 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); 1513 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
1264 if (!option_item) 1514 if (!option_item)
1265 goto nla_put_failure; 1515 goto nla_put_failure;
1266 NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name); 1516 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
1267 if (option->changed) { 1517 goto nla_put_failure;
1268 NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED); 1518 if (opt_inst->changed) {
1269 option->changed = false; 1519 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
1520 goto nla_put_failure;
1521 opt_inst->changed = false;
1270 } 1522 }
1271 if (option->removed) 1523 if (opt_inst->removed &&
1272 NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_REMOVED); 1524 nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
1525 goto nla_put_failure;
1526 if (opt_inst->port &&
1527 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
1528 opt_inst->port->dev->ifindex))
1529 goto nla_put_failure;
1530 ctx.port = opt_inst->port;
1273 switch (option->type) { 1531 switch (option->type) {
1274 case TEAM_OPTION_TYPE_U32: 1532 case TEAM_OPTION_TYPE_U32:
1275 NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32); 1533 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
1276 team_option_get(team, option, &arg); 1534 goto nla_put_failure;
1277 NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg); 1535 err = team_option_get(team, opt_inst, &ctx);
1536 if (err)
1537 goto errout;
1538 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA,
1539 ctx.data.u32_val))
1540 goto nla_put_failure;
1278 break; 1541 break;
1279 case TEAM_OPTION_TYPE_STRING: 1542 case TEAM_OPTION_TYPE_STRING:
1280 NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING); 1543 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
1281 team_option_get(team, option, &arg); 1544 goto nla_put_failure;
1282 NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA, 1545 err = team_option_get(team, opt_inst, &ctx);
1283 (char *) arg); 1546 if (err)
1547 goto errout;
1548 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
1549 ctx.data.str_val))
1550 goto nla_put_failure;
1551 break;
1552 case TEAM_OPTION_TYPE_BINARY:
1553 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
1554 goto nla_put_failure;
1555 err = team_option_get(team, opt_inst, &ctx);
1556 if (err)
1557 goto errout;
1558 if (nla_put(skb, TEAM_ATTR_OPTION_DATA,
1559 ctx.data.bin_val.len, ctx.data.bin_val.ptr))
1560 goto nla_put_failure;
1561 break;
1562 case TEAM_OPTION_TYPE_BOOL:
1563 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
1564 goto nla_put_failure;
1565 err = team_option_get(team, opt_inst, &ctx);
1566 if (err)
1567 goto errout;
1568 if (ctx.data.bool_val &&
1569 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
1570 goto nla_put_failure;
1284 break; 1571 break;
1285 default: 1572 default:
1286 BUG(); 1573 BUG();
@@ -1292,8 +1579,10 @@ static int team_nl_fill_options_get(struct sk_buff *skb,
1292 return genlmsg_end(skb, hdr); 1579 return genlmsg_end(skb, hdr);
1293 1580
1294nla_put_failure: 1581nla_put_failure:
1582 err = -EMSGSIZE;
1583errout:
1295 genlmsg_cancel(skb, hdr); 1584 genlmsg_cancel(skb, hdr);
1296 return -EMSGSIZE; 1585 return err;
1297} 1586}
1298 1587
1299static int team_nl_fill_options_get_all(struct sk_buff *skb, 1588static int team_nl_fill_options_get_all(struct sk_buff *skb,
@@ -1339,9 +1628,12 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1339 } 1628 }
1340 1629
1341 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) { 1630 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
1342 struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1]; 1631 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
1632 struct nlattr *attr_port_ifindex;
1633 struct nlattr *attr_data;
1343 enum team_option_type opt_type; 1634 enum team_option_type opt_type;
1344 struct team_option *option; 1635 int opt_port_ifindex = 0; /* != 0 for per-port options */
1636 struct team_option_inst *opt_inst;
1345 char *opt_name; 1637 char *opt_name;
1346 bool opt_found = false; 1638 bool opt_found = false;
1347 1639
@@ -1349,48 +1641,78 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1349 err = -EINVAL; 1641 err = -EINVAL;
1350 goto team_put; 1642 goto team_put;
1351 } 1643 }
1352 err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX, 1644 err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
1353 nl_option, team_nl_option_policy); 1645 nl_option, team_nl_option_policy);
1354 if (err) 1646 if (err)
1355 goto team_put; 1647 goto team_put;
1356 if (!mode_attrs[TEAM_ATTR_OPTION_NAME] || 1648 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
1357 !mode_attrs[TEAM_ATTR_OPTION_TYPE] || 1649 !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
1358 !mode_attrs[TEAM_ATTR_OPTION_DATA]) {
1359 err = -EINVAL; 1650 err = -EINVAL;
1360 goto team_put; 1651 goto team_put;
1361 } 1652 }
1362 switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) { 1653 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
1363 case NLA_U32: 1654 case NLA_U32:
1364 opt_type = TEAM_OPTION_TYPE_U32; 1655 opt_type = TEAM_OPTION_TYPE_U32;
1365 break; 1656 break;
1366 case NLA_STRING: 1657 case NLA_STRING:
1367 opt_type = TEAM_OPTION_TYPE_STRING; 1658 opt_type = TEAM_OPTION_TYPE_STRING;
1368 break; 1659 break;
1660 case NLA_BINARY:
1661 opt_type = TEAM_OPTION_TYPE_BINARY;
1662 break;
1663 case NLA_FLAG:
1664 opt_type = TEAM_OPTION_TYPE_BOOL;
1665 break;
1369 default: 1666 default:
1370 goto team_put; 1667 goto team_put;
1371 } 1668 }
1372 1669
1373 opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]); 1670 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
1374 list_for_each_entry(option, &team->option_list, list) { 1671 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
1375 long arg; 1672 err = -EINVAL;
1376 struct nlattr *opt_data_attr; 1673 goto team_put;
1674 }
1675
1676 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
1677 attr_port_ifindex = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
1678 if (attr_port_ifindex)
1679 opt_port_ifindex = nla_get_u32(attr_port_ifindex);
1680
1681 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
1682 struct team_option *option = opt_inst->option;
1683 struct team_gsetter_ctx ctx;
1684 int tmp_ifindex;
1377 1685
1686 tmp_ifindex = opt_inst->port ?
1687 opt_inst->port->dev->ifindex : 0;
1378 if (option->type != opt_type || 1688 if (option->type != opt_type ||
1379 strcmp(option->name, opt_name)) 1689 strcmp(option->name, opt_name) ||
1690 tmp_ifindex != opt_port_ifindex)
1380 continue; 1691 continue;
1381 opt_found = true; 1692 opt_found = true;
1382 opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA]; 1693 ctx.port = opt_inst->port;
1383 switch (opt_type) { 1694 switch (opt_type) {
1384 case TEAM_OPTION_TYPE_U32: 1695 case TEAM_OPTION_TYPE_U32:
1385 arg = nla_get_u32(opt_data_attr); 1696 ctx.data.u32_val = nla_get_u32(attr_data);
1386 break; 1697 break;
1387 case TEAM_OPTION_TYPE_STRING: 1698 case TEAM_OPTION_TYPE_STRING:
1388 arg = (long) nla_data(opt_data_attr); 1699 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
1700 err = -EINVAL;
1701 goto team_put;
1702 }
1703 ctx.data.str_val = nla_data(attr_data);
1704 break;
1705 case TEAM_OPTION_TYPE_BINARY:
1706 ctx.data.bin_val.len = nla_len(attr_data);
1707 ctx.data.bin_val.ptr = nla_data(attr_data);
1708 break;
1709 case TEAM_OPTION_TYPE_BOOL:
1710 ctx.data.bool_val = attr_data ? true : false;
1389 break; 1711 break;
1390 default: 1712 default:
1391 BUG(); 1713 BUG();
1392 } 1714 }
1393 err = team_option_set(team, option, &arg); 1715 err = team_option_set(team, opt_inst, &ctx);
1394 if (err) 1716 if (err)
1395 goto team_put; 1717 goto team_put;
1396 } 1718 }
@@ -1420,7 +1742,8 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
1420 if (IS_ERR(hdr)) 1742 if (IS_ERR(hdr))
1421 return PTR_ERR(hdr); 1743 return PTR_ERR(hdr);
1422 1744
1423 NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex); 1745 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1746 goto nla_put_failure;
1424 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT); 1747 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
1425 if (!port_list) 1748 if (!port_list)
1426 return -EMSGSIZE; 1749 return -EMSGSIZE;
@@ -1434,17 +1757,20 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
1434 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT); 1757 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
1435 if (!port_item) 1758 if (!port_item)
1436 goto nla_put_failure; 1759 goto nla_put_failure;
1437 NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex); 1760 if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
1761 goto nla_put_failure;
1438 if (port->changed) { 1762 if (port->changed) {
1439 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED); 1763 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
1764 goto nla_put_failure;
1440 port->changed = false; 1765 port->changed = false;
1441 } 1766 }
1442 if (port->removed) 1767 if ((port->removed &&
1443 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_REMOVED); 1768 nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
1444 if (port->linkup) 1769 (port->state.linkup &&
1445 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP); 1770 nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
1446 NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed); 1771 nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
1447 NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex); 1772 nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
1773 goto nla_put_failure;
1448 nla_nest_end(skb, port_item); 1774 nla_nest_end(skb, port_item);
1449 } 1775 }
1450 1776
@@ -1603,23 +1929,24 @@ static void __team_port_change_check(struct team_port *port, bool linkup)
1603{ 1929{
1604 int err; 1930 int err;
1605 1931
1606 if (!port->removed && port->linkup == linkup) 1932 if (!port->removed && port->state.linkup == linkup)
1607 return; 1933 return;
1608 1934
1609 port->changed = true; 1935 port->changed = true;
1610 port->linkup = linkup; 1936 port->state.linkup = linkup;
1937 team_refresh_port_linkup(port);
1611 if (linkup) { 1938 if (linkup) {
1612 struct ethtool_cmd ecmd; 1939 struct ethtool_cmd ecmd;
1613 1940
1614 err = __ethtool_get_settings(port->dev, &ecmd); 1941 err = __ethtool_get_settings(port->dev, &ecmd);
1615 if (!err) { 1942 if (!err) {
1616 port->speed = ethtool_cmd_speed(&ecmd); 1943 port->state.speed = ethtool_cmd_speed(&ecmd);
1617 port->duplex = ecmd.duplex; 1944 port->state.duplex = ecmd.duplex;
1618 goto send_event; 1945 goto send_event;
1619 } 1946 }
1620 } 1947 }
1621 port->speed = 0; 1948 port->state.speed = 0;
1622 port->duplex = 0; 1949 port->state.duplex = 0;
1623 1950
1624send_event: 1951send_event:
1625 err = team_nl_send_event_port_list_get(port->team); 1952 err = team_nl_send_event_port_list_get(port->team);
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
index f4d960e82e29..fd6bd03aaa89 100644
--- a/drivers/net/team/team_mode_activebackup.c
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -59,23 +59,21 @@ static void ab_port_leave(struct team *team, struct team_port *port)
59 RCU_INIT_POINTER(ab_priv(team)->active_port, NULL); 59 RCU_INIT_POINTER(ab_priv(team)->active_port, NULL);
60} 60}
61 61
62static int ab_active_port_get(struct team *team, void *arg) 62static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
63{ 63{
64 u32 *ifindex = arg;
65
66 *ifindex = 0;
67 if (ab_priv(team)->active_port) 64 if (ab_priv(team)->active_port)
68 *ifindex = ab_priv(team)->active_port->dev->ifindex; 65 ctx->data.u32_val = ab_priv(team)->active_port->dev->ifindex;
66 else
67 ctx->data.u32_val = 0;
69 return 0; 68 return 0;
70} 69}
71 70
72static int ab_active_port_set(struct team *team, void *arg) 71static int ab_active_port_set(struct team *team, struct team_gsetter_ctx *ctx)
73{ 72{
74 u32 *ifindex = arg;
75 struct team_port *port; 73 struct team_port *port;
76 74
77 list_for_each_entry_rcu(port, &team->port_list, list) { 75 list_for_each_entry(port, &team->port_list, list) {
78 if (port->dev->ifindex == *ifindex) { 76 if (port->dev->ifindex == ctx->data.u32_val) {
79 rcu_assign_pointer(ab_priv(team)->active_port, port); 77 rcu_assign_pointer(ab_priv(team)->active_port, port);
80 return 0; 78 return 0;
81 } 79 }
@@ -92,12 +90,12 @@ static const struct team_option ab_options[] = {
92 }, 90 },
93}; 91};
94 92
95int ab_init(struct team *team) 93static int ab_init(struct team *team)
96{ 94{
97 return team_options_register(team, ab_options, ARRAY_SIZE(ab_options)); 95 return team_options_register(team, ab_options, ARRAY_SIZE(ab_options));
98} 96}
99 97
100void ab_exit(struct team *team) 98static void ab_exit(struct team *team)
101{ 99{
102 team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options)); 100 team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options));
103} 101}
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
new file mode 100644
index 000000000000..86e8183c8e3d
--- /dev/null
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -0,0 +1,174 @@
1/*
2 * drivers/net/team/team_mode_loadbalance.c - Load-balancing mode for team
3 * Copyright (c) 2012 Jiri Pirko <jpirko@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16#include <linux/netdevice.h>
17#include <linux/filter.h>
18#include <linux/if_team.h>
19
20struct lb_priv {
21 struct sk_filter __rcu *fp;
22 struct sock_fprog *orig_fprog;
23};
24
25static struct lb_priv *lb_priv(struct team *team)
26{
27 return (struct lb_priv *) &team->mode_priv;
28}
29
30static bool lb_transmit(struct team *team, struct sk_buff *skb)
31{
32 struct sk_filter *fp;
33 struct team_port *port;
34 unsigned int hash;
35 int port_index;
36
37 fp = rcu_dereference(lb_priv(team)->fp);
38 if (unlikely(!fp))
39 goto drop;
40 hash = SK_RUN_FILTER(fp, skb);
41 port_index = hash % team->en_port_count;
42 port = team_get_port_by_index_rcu(team, port_index);
43 if (unlikely(!port))
44 goto drop;
45 skb->dev = port->dev;
46 if (dev_queue_xmit(skb))
47 return false;
48 return true;
49
50drop:
51 dev_kfree_skb_any(skb);
52 return false;
53}
54
55static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
56{
57 if (!lb_priv(team)->orig_fprog) {
58 ctx->data.bin_val.len = 0;
59 ctx->data.bin_val.ptr = NULL;
60 return 0;
61 }
62 ctx->data.bin_val.len = lb_priv(team)->orig_fprog->len *
63 sizeof(struct sock_filter);
64 ctx->data.bin_val.ptr = lb_priv(team)->orig_fprog->filter;
65 return 0;
66}
67
68static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
69 const void *data)
70{
71 struct sock_fprog *fprog;
72 struct sock_filter *filter = (struct sock_filter *) data;
73
74 if (data_len % sizeof(struct sock_filter))
75 return -EINVAL;
76 fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
77 if (!fprog)
78 return -ENOMEM;
79 fprog->filter = kmemdup(filter, data_len, GFP_KERNEL);
80 if (!fprog->filter) {
81 kfree(fprog);
82 return -ENOMEM;
83 }
84 fprog->len = data_len / sizeof(struct sock_filter);
85 *pfprog = fprog;
86 return 0;
87}
88
89static void __fprog_destroy(struct sock_fprog *fprog)
90{
91 kfree(fprog->filter);
92 kfree(fprog);
93}
94
95static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
96{
97 struct sk_filter *fp = NULL;
98 struct sock_fprog *fprog = NULL;
99 int err;
100
101 if (ctx->data.bin_val.len) {
102 err = __fprog_create(&fprog, ctx->data.bin_val.len,
103 ctx->data.bin_val.ptr);
104 if (err)
105 return err;
106 err = sk_unattached_filter_create(&fp, fprog);
107 if (err) {
108 __fprog_destroy(fprog);
109 return err;
110 }
111 }
112
113 if (lb_priv(team)->orig_fprog) {
114 /* Clear old filter data */
115 __fprog_destroy(lb_priv(team)->orig_fprog);
116 sk_unattached_filter_destroy(lb_priv(team)->fp);
117 }
118
119 rcu_assign_pointer(lb_priv(team)->fp, fp);
120 lb_priv(team)->orig_fprog = fprog;
121 return 0;
122}
123
124static const struct team_option lb_options[] = {
125 {
126 .name = "bpf_hash_func",
127 .type = TEAM_OPTION_TYPE_BINARY,
128 .getter = lb_bpf_func_get,
129 .setter = lb_bpf_func_set,
130 },
131};
132
133static int lb_init(struct team *team)
134{
135 return team_options_register(team, lb_options,
136 ARRAY_SIZE(lb_options));
137}
138
139static void lb_exit(struct team *team)
140{
141 team_options_unregister(team, lb_options,
142 ARRAY_SIZE(lb_options));
143}
144
145static const struct team_mode_ops lb_mode_ops = {
146 .init = lb_init,
147 .exit = lb_exit,
148 .transmit = lb_transmit,
149};
150
151static struct team_mode lb_mode = {
152 .kind = "loadbalance",
153 .owner = THIS_MODULE,
154 .priv_size = sizeof(struct lb_priv),
155 .ops = &lb_mode_ops,
156};
157
158static int __init lb_init_module(void)
159{
160 return team_mode_register(&lb_mode);
161}
162
163static void __exit lb_cleanup_module(void)
164{
165 team_mode_unregister(&lb_mode);
166}
167
168module_init(lb_init_module);
169module_exit(lb_cleanup_module);
170
171MODULE_LICENSE("GPL v2");
172MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
173MODULE_DESCRIPTION("Load-balancing mode for team");
174MODULE_ALIAS("team-mode-loadbalance");
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index a0e8f806331a..6abfbdc96be5 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -50,7 +50,7 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
50 struct team_port *port; 50 struct team_port *port;
51 int port_index; 51 int port_index;
52 52
53 port_index = rr_priv(team)->sent_packets++ % team->port_count; 53 port_index = rr_priv(team)->sent_packets++ % team->en_port_count;
54 port = team_get_port_by_index_rcu(team, port_index); 54 port = team_get_port_by_index_rcu(team, port_index);
55 port = __get_first_port_up(team, port); 55 port = __get_first_port_up(team, port);
56 if (unlikely(!port)) 56 if (unlikely(!port))
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index b15ac81d46fa..0924f572f59b 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -1826,18 +1826,6 @@ static struct pci_driver xl_3c359_driver = {
1826 .remove = __devexit_p(xl_remove_one), 1826 .remove = __devexit_p(xl_remove_one),
1827}; 1827};
1828 1828
1829static int __init xl_pci_init (void) 1829module_pci_driver(xl_3c359_driver);
1830{
1831 return pci_register_driver(&xl_3c359_driver);
1832}
1833
1834
1835static void __exit xl_pci_cleanup (void)
1836{
1837 pci_unregister_driver (&xl_3c359_driver);
1838}
1839
1840module_init(xl_pci_init);
1841module_exit(xl_pci_cleanup);
1842 1830
1843MODULE_LICENSE("GPL") ; 1831MODULE_LICENSE("GPL") ;
diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig
index 45550d42b368..ef3bb1326e4f 100644
--- a/drivers/net/tokenring/Kconfig
+++ b/drivers/net/tokenring/Kconfig
@@ -98,7 +98,7 @@ config 3C359
98 98
99config TMS380TR 99config TMS380TR
100 tristate "Generic TMS380 Token Ring ISA/PCI adapter support" 100 tristate "Generic TMS380 Token Ring ISA/PCI adapter support"
101 depends on PCI || ISA && ISA_DMA_API || MCA 101 depends on PCI || ISA || MCA
102 select FW_LOADER 102 select FW_LOADER
103 ---help--- 103 ---help---
104 This driver provides generic support for token ring adapters 104 This driver provides generic support for token ring adapters
@@ -137,7 +137,7 @@ config TMSPCI
137 137
138config SKISA 138config SKISA
139 tristate "SysKonnect TR4/16 ISA support" 139 tristate "SysKonnect TR4/16 ISA support"
140 depends on TMS380TR && ISA 140 depends on TMS380TR && ISA && ISA_DMA_API
141 help 141 help
142 This tms380 module supports SysKonnect TR4/16 ISA cards. 142 This tms380 module supports SysKonnect TR4/16 ISA cards.
143 143
@@ -149,7 +149,7 @@ config SKISA
149 149
150config PROTEON 150config PROTEON
151 tristate "Proteon ISA support" 151 tristate "Proteon ISA support"
152 depends on TMS380TR && ISA 152 depends on TMS380TR && ISA && ISA_DMA_API
153 help 153 help
154 This tms380 module supports Proteon ISA cards. 154 This tms380 module supports Proteon ISA cards.
155 155
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 3e4b4f091113..97e4c65c1e29 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -1904,14 +1904,6 @@ static struct pci_driver streamer_pci_driver = {
1904 .remove = __devexit_p(streamer_remove_one), 1904 .remove = __devexit_p(streamer_remove_one),
1905}; 1905};
1906 1906
1907static int __init streamer_init_module(void) { 1907module_pci_driver(streamer_pci_driver);
1908 return pci_register_driver(&streamer_pci_driver);
1909}
1910
1911static void __exit streamer_cleanup_module(void) {
1912 pci_unregister_driver(&streamer_pci_driver);
1913}
1914 1908
1915module_init(streamer_init_module);
1916module_exit(streamer_cleanup_module);
1917MODULE_LICENSE("GPL"); 1909MODULE_LICENSE("GPL");
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 0e234741cc79..4d45fe8bd206 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -1732,18 +1732,6 @@ static struct pci_driver olympic_driver = {
1732 .remove = __devexit_p(olympic_remove_one), 1732 .remove = __devexit_p(olympic_remove_one),
1733}; 1733};
1734 1734
1735static int __init olympic_pci_init(void) 1735module_pci_driver(olympic_driver);
1736{
1737 return pci_register_driver(&olympic_driver) ;
1738}
1739
1740static void __exit olympic_pci_cleanup(void)
1741{
1742 pci_unregister_driver(&olympic_driver) ;
1743}
1744
1745
1746module_init(olympic_pci_init) ;
1747module_exit(olympic_pci_cleanup) ;
1748 1736
1749MODULE_LICENSE("GPL"); 1737MODULE_LICENSE("GPL");
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index be4813e0366c..b5e0855e4b39 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -254,7 +254,7 @@ int tms380tr_open(struct net_device *dev)
254 254
255 /* Reset the hardware here. Don't forget to set the station address. */ 255 /* Reset the hardware here. Don't forget to set the station address. */
256 256
257#ifdef CONFIG_ISA 257#if defined(CONFIG_ISA) && defined(CONFIG_ISA_DMA_API)
258 if(dev->dma > 0) 258 if(dev->dma > 0)
259 { 259 {
260 unsigned long flags=claim_dma_lock(); 260 unsigned long flags=claim_dma_lock();
@@ -1125,8 +1125,8 @@ int tms380tr_close(struct net_device *dev)
1125 1125
1126 del_timer(&tp->timer); 1126 del_timer(&tp->timer);
1127 tms380tr_disable_interrupts(dev); 1127 tms380tr_disable_interrupts(dev);
1128 1128
1129#ifdef CONFIG_ISA 1129#if defined(CONFIG_ISA) && defined(CONFIG_ISA_DMA_API)
1130 if(dev->dma > 0) 1130 if(dev->dma > 0)
1131 { 1131 {
1132 unsigned long flags=claim_dma_lock(); 1132 unsigned long flags=claim_dma_lock();
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index fb9918da5792..90f3fa44a151 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -233,16 +233,4 @@ static struct pci_driver tms_pci_driver = {
233 .remove = __devexit_p(tms_pci_detach), 233 .remove = __devexit_p(tms_pci_detach),
234}; 234};
235 235
236static int __init tms_pci_init (void) 236module_pci_driver(tms_pci_driver);
237{
238 return pci_register_driver(&tms_pci_driver);
239}
240
241static void __exit tms_pci_rmmod (void)
242{
243 pci_unregister_driver (&tms_pci_driver);
244}
245
246module_init(tms_pci_init);
247module_exit(tms_pci_rmmod);
248
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 552d24bf862e..d316503b35d4 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -365,6 +365,27 @@ static const struct driver_info qmi_wwan_force_int4 = {
365 .data = BIT(4), /* interface whitelist bitmap */ 365 .data = BIT(4), /* interface whitelist bitmap */
366}; 366};
367 367
368/* Sierra Wireless provide equally useless interface descriptors
369 * Devices in QMI mode can be switched between two different
370 * configurations:
371 * a) USB interface #8 is QMI/wwan
372 * b) USB interfaces #8, #19 and #20 are QMI/wwan
373 *
374 * Both configurations provide a number of other interfaces (serial++),
375 * some of which have the same endpoint configuration as we expect, so
376 * a whitelist or blacklist is necessary.
377 *
378 * FIXME: The below whitelist should include BIT(20). It does not
379 * because I cannot get it to work...
380 */
381static const struct driver_info qmi_wwan_sierra = {
382 .description = "Sierra Wireless wwan/QMI device",
383 .flags = FLAG_WWAN,
384 .bind = qmi_wwan_bind_gobi,
385 .unbind = qmi_wwan_unbind_shared,
386 .manage_power = qmi_wwan_manage_power,
387 .data = BIT(8) | BIT(19), /* interface whitelist bitmap */
388};
368 389
369#define HUAWEI_VENDOR_ID 0x12D1 390#define HUAWEI_VENDOR_ID 0x12D1
370#define QMI_GOBI_DEVICE(vend, prod) \ 391#define QMI_GOBI_DEVICE(vend, prod) \
@@ -445,6 +466,15 @@ static const struct usb_device_id products[] = {
445 .bInterfaceProtocol = 0xff, 466 .bInterfaceProtocol = 0xff,
446 .driver_info = (unsigned long)&qmi_wwan_force_int4, 467 .driver_info = (unsigned long)&qmi_wwan_force_int4,
447 }, 468 },
469 { /* Sierra Wireless MC77xx in QMI mode */
470 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
471 .idVendor = 0x1199,
472 .idProduct = 0x68a2,
473 .bInterfaceClass = 0xff,
474 .bInterfaceSubClass = 0xff,
475 .bInterfaceProtocol = 0xff,
476 .driver_info = (unsigned long)&qmi_wwan_sierra,
477 },
448 {QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 478 {QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
449 {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ 479 {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
450 {QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ 480 {QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 187d01ccb973..a2349483cd2a 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1051,6 +1051,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
1051 dev->net->ethtool_ops = &smsc75xx_ethtool_ops; 1051 dev->net->ethtool_ops = &smsc75xx_ethtool_ops;
1052 dev->net->flags |= IFF_MULTICAST; 1052 dev->net->flags |= IFF_MULTICAST;
1053 dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD; 1053 dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD;
1054 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1054 return 0; 1055 return 0;
1055} 1056}
1056 1057
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index b7b3f5b0d406..db9953630da5 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -884,6 +884,7 @@ static const struct ethtool_ops usbnet_ethtool_ops = {
884 .get_drvinfo = usbnet_get_drvinfo, 884 .get_drvinfo = usbnet_get_drvinfo,
885 .get_msglevel = usbnet_get_msglevel, 885 .get_msglevel = usbnet_get_msglevel,
886 .set_msglevel = usbnet_set_msglevel, 886 .set_msglevel = usbnet_set_msglevel,
887 .get_ts_info = ethtool_op_get_ts_info,
887}; 888};
888 889
889/*-------------------------------------------------------------------------*/ 890/*-------------------------------------------------------------------------*/
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4de2760c5937..fa58c7869954 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -66,12 +66,21 @@ struct virtnet_info {
66 /* Host will merge rx buffers for big packets (shake it! shake it!) */ 66 /* Host will merge rx buffers for big packets (shake it! shake it!) */
67 bool mergeable_rx_bufs; 67 bool mergeable_rx_bufs;
68 68
69 /* enable config space updates */
70 bool config_enable;
71
69 /* Active statistics */ 72 /* Active statistics */
70 struct virtnet_stats __percpu *stats; 73 struct virtnet_stats __percpu *stats;
71 74
72 /* Work struct for refilling if we run low on memory. */ 75 /* Work struct for refilling if we run low on memory. */
73 struct delayed_work refill; 76 struct delayed_work refill;
74 77
78 /* Work struct for config space updates */
79 struct work_struct config_work;
80
81 /* Lock for config space updates */
82 struct mutex config_lock;
83
75 /* Chain pages by the private ptr. */ 84 /* Chain pages by the private ptr. */
76 struct page *pages; 85 struct page *pages;
77 86
@@ -626,16 +635,15 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
626 /* This can happen with OOM and indirect buffers. */ 635 /* This can happen with OOM and indirect buffers. */
627 if (unlikely(capacity < 0)) { 636 if (unlikely(capacity < 0)) {
628 if (likely(capacity == -ENOMEM)) { 637 if (likely(capacity == -ENOMEM)) {
629 if (net_ratelimit()) { 638 if (net_ratelimit())
630 dev_warn(&dev->dev, 639 dev_warn(&dev->dev,
631 "TX queue failure: out of memory\n"); 640 "TX queue failure: out of memory\n");
632 } else { 641 } else {
633 dev->stats.tx_fifo_errors++; 642 dev->stats.tx_fifo_errors++;
634 if (net_ratelimit()) 643 if (net_ratelimit())
635 dev_warn(&dev->dev, 644 dev_warn(&dev->dev,
636 "Unexpected TX queue failure: %d\n", 645 "Unexpected TX queue failure: %d\n",
637 capacity); 646 capacity);
638 }
639 } 647 }
640 dev->stats.tx_dropped++; 648 dev->stats.tx_dropped++;
641 kfree_skb(skb); 649 kfree_skb(skb);
@@ -781,6 +789,16 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
781 return status == VIRTIO_NET_OK; 789 return status == VIRTIO_NET_OK;
782} 790}
783 791
792static void virtnet_ack_link_announce(struct virtnet_info *vi)
793{
794 rtnl_lock();
795 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
796 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL,
797 0, 0))
798 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
799 rtnl_unlock();
800}
801
784static int virtnet_close(struct net_device *dev) 802static int virtnet_close(struct net_device *dev)
785{ 803{
786 struct virtnet_info *vi = netdev_priv(dev); 804 struct virtnet_info *vi = netdev_priv(dev);
@@ -952,20 +970,31 @@ static const struct net_device_ops virtnet_netdev = {
952#endif 970#endif
953}; 971};
954 972
955static void virtnet_update_status(struct virtnet_info *vi) 973static void virtnet_config_changed_work(struct work_struct *work)
956{ 974{
975 struct virtnet_info *vi =
976 container_of(work, struct virtnet_info, config_work);
957 u16 v; 977 u16 v;
958 978
979 mutex_lock(&vi->config_lock);
980 if (!vi->config_enable)
981 goto done;
982
959 if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, 983 if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
960 offsetof(struct virtio_net_config, status), 984 offsetof(struct virtio_net_config, status),
961 &v) < 0) 985 &v) < 0)
962 return; 986 goto done;
987
988 if (v & VIRTIO_NET_S_ANNOUNCE) {
989 netif_notify_peers(vi->dev);
990 virtnet_ack_link_announce(vi);
991 }
963 992
964 /* Ignore unknown (future) status bits */ 993 /* Ignore unknown (future) status bits */
965 v &= VIRTIO_NET_S_LINK_UP; 994 v &= VIRTIO_NET_S_LINK_UP;
966 995
967 if (vi->status == v) 996 if (vi->status == v)
968 return; 997 goto done;
969 998
970 vi->status = v; 999 vi->status = v;
971 1000
@@ -976,13 +1005,15 @@ static void virtnet_update_status(struct virtnet_info *vi)
976 netif_carrier_off(vi->dev); 1005 netif_carrier_off(vi->dev);
977 netif_stop_queue(vi->dev); 1006 netif_stop_queue(vi->dev);
978 } 1007 }
1008done:
1009 mutex_unlock(&vi->config_lock);
979} 1010}
980 1011
981static void virtnet_config_changed(struct virtio_device *vdev) 1012static void virtnet_config_changed(struct virtio_device *vdev)
982{ 1013{
983 struct virtnet_info *vi = vdev->priv; 1014 struct virtnet_info *vi = vdev->priv;
984 1015
985 virtnet_update_status(vi); 1016 queue_work(system_nrt_wq, &vi->config_work);
986} 1017}
987 1018
988static int init_vqs(struct virtnet_info *vi) 1019static int init_vqs(struct virtnet_info *vi)
@@ -1076,6 +1107,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1076 goto free; 1107 goto free;
1077 1108
1078 INIT_DELAYED_WORK(&vi->refill, refill_work); 1109 INIT_DELAYED_WORK(&vi->refill, refill_work);
1110 mutex_init(&vi->config_lock);
1111 vi->config_enable = true;
1112 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
1079 sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg)); 1113 sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
1080 sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg)); 1114 sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
1081 1115
@@ -1111,7 +1145,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1111 otherwise get link status from config. */ 1145 otherwise get link status from config. */
1112 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 1146 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
1113 netif_carrier_off(dev); 1147 netif_carrier_off(dev);
1114 virtnet_update_status(vi); 1148 queue_work(system_nrt_wq, &vi->config_work);
1115 } else { 1149 } else {
1116 vi->status = VIRTIO_NET_S_LINK_UP; 1150 vi->status = VIRTIO_NET_S_LINK_UP;
1117 netif_carrier_on(dev); 1151 netif_carrier_on(dev);
@@ -1170,10 +1204,17 @@ static void __devexit virtnet_remove(struct virtio_device *vdev)
1170{ 1204{
1171 struct virtnet_info *vi = vdev->priv; 1205 struct virtnet_info *vi = vdev->priv;
1172 1206
1207 /* Prevent config work handler from accessing the device. */
1208 mutex_lock(&vi->config_lock);
1209 vi->config_enable = false;
1210 mutex_unlock(&vi->config_lock);
1211
1173 unregister_netdev(vi->dev); 1212 unregister_netdev(vi->dev);
1174 1213
1175 remove_vq_common(vi); 1214 remove_vq_common(vi);
1176 1215
1216 flush_work(&vi->config_work);
1217
1177 free_percpu(vi->stats); 1218 free_percpu(vi->stats);
1178 free_netdev(vi->dev); 1219 free_netdev(vi->dev);
1179} 1220}
@@ -1183,6 +1224,11 @@ static int virtnet_freeze(struct virtio_device *vdev)
1183{ 1224{
1184 struct virtnet_info *vi = vdev->priv; 1225 struct virtnet_info *vi = vdev->priv;
1185 1226
1227 /* Prevent config work handler from accessing the device */
1228 mutex_lock(&vi->config_lock);
1229 vi->config_enable = false;
1230 mutex_unlock(&vi->config_lock);
1231
1186 virtqueue_disable_cb(vi->rvq); 1232 virtqueue_disable_cb(vi->rvq);
1187 virtqueue_disable_cb(vi->svq); 1233 virtqueue_disable_cb(vi->svq);
1188 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) 1234 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
@@ -1196,6 +1242,8 @@ static int virtnet_freeze(struct virtio_device *vdev)
1196 1242
1197 remove_vq_common(vi); 1243 remove_vq_common(vi);
1198 1244
1245 flush_work(&vi->config_work);
1246
1199 return 0; 1247 return 0;
1200} 1248}
1201 1249
@@ -1216,6 +1264,10 @@ static int virtnet_restore(struct virtio_device *vdev)
1216 if (!try_fill_recv(vi, GFP_KERNEL)) 1264 if (!try_fill_recv(vi, GFP_KERNEL))
1217 queue_delayed_work(system_nrt_wq, &vi->refill, 0); 1265 queue_delayed_work(system_nrt_wq, &vi->refill, 0);
1218 1266
1267 mutex_lock(&vi->config_lock);
1268 vi->config_enable = true;
1269 mutex_unlock(&vi->config_lock);
1270
1219 return 0; 1271 return 0;
1220} 1272}
1221#endif 1273#endif
@@ -1233,6 +1285,7 @@ static unsigned int features[] = {
1233 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, 1285 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1234 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 1286 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1235 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 1287 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1288 VIRTIO_NET_F_GUEST_ANNOUNCE,
1236}; 1289};
1237 1290
1238static struct virtio_driver virtio_net_driver = { 1291static struct virtio_driver virtio_net_driver = {
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index c676de7de024..9eb6479306d6 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -2055,15 +2055,4 @@ static struct pci_driver dscc4_driver = {
2055 .remove = __devexit_p(dscc4_remove_one), 2055 .remove = __devexit_p(dscc4_remove_one),
2056}; 2056};
2057 2057
2058static int __init dscc4_init_module(void) 2058module_pci_driver(dscc4_driver);
2059{
2060 return pci_register_driver(&dscc4_driver);
2061}
2062
2063static void __exit dscc4_cleanup_module(void)
2064{
2065 pci_unregister_driver(&dscc4_driver);
2066}
2067
2068module_init(dscc4_init_module);
2069module_exit(dscc4_cleanup_module);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index ebb9f24eefb5..1a623183cbe5 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2483,6 +2483,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2483 pr_err("Control memory remap failed\n"); 2483 pr_err("Control memory remap failed\n");
2484 pci_release_regions(pdev); 2484 pci_release_regions(pdev);
2485 pci_disable_device(pdev); 2485 pci_disable_device(pdev);
2486 iounmap(card->mem);
2486 kfree(card); 2487 kfree(card);
2487 return -ENODEV; 2488 return -ENODEV;
2488 } 2489 }
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 76a8a4a522e9..f5d533a706ea 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1120,7 +1120,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
1120{ 1120{
1121 lmc_softc_t *sc = dev_to_sc(dev); 1121 lmc_softc_t *sc = dev_to_sc(dev);
1122 1122
1123 lmc_trace(dev, "lmc_runnig_reset in"); 1123 lmc_trace(dev, "lmc_running_reset in");
1124 1124
1125 /* stop interrupts */ 1125 /* stop interrupts */
1126 /* Clear the interrupt mask */ 1126 /* Clear the interrupt mask */
@@ -1736,18 +1736,7 @@ static struct pci_driver lmc_driver = {
1736 .remove = __devexit_p(lmc_remove_one), 1736 .remove = __devexit_p(lmc_remove_one),
1737}; 1737};
1738 1738
1739static int __init init_lmc(void) 1739module_pci_driver(lmc_driver);
1740{
1741 return pci_register_driver(&lmc_driver);
1742}
1743
1744static void __exit exit_lmc(void)
1745{
1746 pci_unregister_driver(&lmc_driver);
1747}
1748
1749module_init(init_lmc);
1750module_exit(exit_lmc);
1751 1740
1752unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/ 1741unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
1753{ 1742{
diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/net/wimax/i2400m/Kconfig
index 3f703384295e..672de18a776c 100644
--- a/drivers/net/wimax/i2400m/Kconfig
+++ b/drivers/net/wimax/i2400m/Kconfig
@@ -32,8 +32,9 @@ config WIMAX_I2400M_SDIO
32 If unsure, it is safe to select M (module). 32 If unsure, it is safe to select M (module).
33 33
34config WIMAX_IWMC3200_SDIO 34config WIMAX_IWMC3200_SDIO
35 bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO" 35 bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO (EXPERIMENTAL)"
36 depends on WIMAX_I2400M_SDIO 36 depends on WIMAX_I2400M_SDIO
37 depends on EXPERIMENTAL
37 select IWMC3200TOP 38 select IWMC3200TOP
38 help 39 help
39 Select if you have a device based on the Intel Multicom WiMAX 40 Select if you have a device based on the Intel Multicom WiMAX
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 29b1e033a10b..713d033891e6 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -695,7 +695,7 @@ int i2400mu_resume(struct usb_interface *iface)
695 d_fnstart(3, dev, "(iface %p)\n", iface); 695 d_fnstart(3, dev, "(iface %p)\n", iface);
696 rmb(); /* see i2400m->updown's documentation */ 696 rmb(); /* see i2400m->updown's documentation */
697 if (i2400m->updown == 0) { 697 if (i2400m->updown == 0) {
698 d_printf(1, dev, "fw was down, no resume neeed\n"); 698 d_printf(1, dev, "fw was down, no resume needed\n");
699 goto out; 699 goto out;
700 } 700 }
701 d_printf(1, dev, "fw was up, resuming\n"); 701 d_printf(1, dev, "fw was up, resuming\n");
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 8faa129da5a0..8c50d9d19d78 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -19,6 +19,7 @@
19#include <linux/nl80211.h> 19#include <linux/nl80211.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
22#include <linux/export.h>
22#include <ar231x_platform.h> 23#include <ar231x_platform.h>
23#include "ath5k.h" 24#include "ath5k.h"
24#include "debug.h" 25#include "debug.h"
@@ -119,7 +120,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
119 if (res == NULL) { 120 if (res == NULL) {
120 dev_err(&pdev->dev, "no IRQ resource found\n"); 121 dev_err(&pdev->dev, "no IRQ resource found\n");
121 ret = -ENXIO; 122 ret = -ENXIO;
122 goto err_out; 123 goto err_iounmap;
123 } 124 }
124 125
125 irq = res->start; 126 irq = res->start;
@@ -128,7 +129,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
128 if (hw == NULL) { 129 if (hw == NULL) {
129 dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); 130 dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
130 ret = -ENOMEM; 131 ret = -ENOMEM;
131 goto err_out; 132 goto err_iounmap;
132 } 133 }
133 134
134 ah = hw->priv; 135 ah = hw->priv;
@@ -185,6 +186,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
185 err_free_hw: 186 err_free_hw:
186 ieee80211_free_hw(hw); 187 ieee80211_free_hw(hw);
187 platform_set_drvdata(pdev, NULL); 188 platform_set_drvdata(pdev, NULL);
189 err_iounmap:
190 iounmap(mem);
188 err_out: 191 err_out:
189 return ret; 192 return ret;
190} 193}
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c
index 6675c92b542b..acc9aa832f76 100644
--- a/drivers/net/wireless/ath/ath6kl/testmode.c
+++ b/drivers/net/wireless/ath/ath6kl/testmode.c
@@ -55,8 +55,9 @@ void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len)
55 ath6kl_warn("failed to allocate testmode rx skb!\n"); 55 ath6kl_warn("failed to allocate testmode rx skb!\n");
56 return; 56 return;
57 } 57 }
58 NLA_PUT_U32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD); 58 if (nla_put_u32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD) ||
59 NLA_PUT(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf); 59 nla_put(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf))
60 goto nla_put_failure;
60 cfg80211_testmode_event(skb, GFP_KERNEL); 61 cfg80211_testmode_event(skb, GFP_KERNEL);
61 return; 62 return;
62 63
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index f4b7334cd27c..dfa78e8b6470 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1574,6 +1574,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1574 struct ath_hw *ah = sc->sc_ah; 1574 struct ath_hw *ah = sc->sc_ah;
1575 struct ath_common *common = ath9k_hw_common(ah); 1575 struct ath_common *common = ath9k_hw_common(ah);
1576 struct ieee80211_conf *conf = &hw->conf; 1576 struct ieee80211_conf *conf = &hw->conf;
1577 bool reset_channel = false;
1577 1578
1578 ath9k_ps_wakeup(sc); 1579 ath9k_ps_wakeup(sc);
1579 mutex_lock(&sc->mutex); 1580 mutex_lock(&sc->mutex);
@@ -1582,6 +1583,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1582 sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); 1583 sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1583 if (sc->ps_idle) 1584 if (sc->ps_idle)
1584 ath_cancel_work(sc); 1585 ath_cancel_work(sc);
1586 else
1587 /*
1588 * The chip needs a reset to properly wake up from
1589 * full sleep
1590 */
1591 reset_channel = ah->chip_fullsleep;
1585 } 1592 }
1586 1593
1587 /* 1594 /*
@@ -1610,7 +1617,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1610 } 1617 }
1611 } 1618 }
1612 1619
1613 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1620 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
1614 struct ieee80211_channel *curchan = hw->conf.channel; 1621 struct ieee80211_channel *curchan = hw->conf.channel;
1615 int pos = curchan->hw_value; 1622 int pos = curchan->hw_value;
1616 int old_pos = -1; 1623 int old_pos = -1;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 834e6bc45e8b..23eaa1b26ebe 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1820,6 +1820,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1820 struct ath_frame_info *fi = get_frame_info(skb); 1820 struct ath_frame_info *fi = get_frame_info(skb);
1821 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1821 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1822 struct ath_buf *bf; 1822 struct ath_buf *bf;
1823 int fragno;
1823 u16 seqno; 1824 u16 seqno;
1824 1825
1825 bf = ath_tx_get_buffer(sc); 1826 bf = ath_tx_get_buffer(sc);
@@ -1831,9 +1832,16 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1831 ATH_TXBUF_RESET(bf); 1832 ATH_TXBUF_RESET(bf);
1832 1833
1833 if (tid) { 1834 if (tid) {
1835 fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
1834 seqno = tid->seq_next; 1836 seqno = tid->seq_next;
1835 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); 1837 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1836 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1838
1839 if (fragno)
1840 hdr->seq_ctrl |= cpu_to_le16(fragno);
1841
1842 if (!ieee80211_has_morefrags(hdr->frame_control))
1843 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1844
1837 bf->bf_state.seqno = seqno; 1845 bf->bf_state.seqno = seqno;
1838 } 1846 }
1839 1847
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index 80b0755ed3af..a54fb2d29089 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -193,7 +193,7 @@ static struct sdio_driver b43_sdio_driver = {
193 .name = "b43-sdio", 193 .name = "b43-sdio",
194 .id_table = b43_sdio_ids, 194 .id_table = b43_sdio_ids,
195 .probe = b43_sdio_probe, 195 .probe = b43_sdio_probe,
196 .remove = b43_sdio_remove, 196 .remove = __devexit_p(b43_sdio_remove),
197}; 197};
198 198
199int b43_sdio_init(void) 199int b43_sdio_init(void)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 231ddf4a674f..7083db75b00c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -7614,6 +7614,7 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
7614{ 7614{
7615 int len_mpdu; 7615 int len_mpdu;
7616 struct ieee80211_rx_status rx_status; 7616 struct ieee80211_rx_status rx_status;
7617 struct ieee80211_hdr *hdr;
7617 7618
7618 memset(&rx_status, 0, sizeof(rx_status)); 7619 memset(&rx_status, 0, sizeof(rx_status));
7619 prep_mac80211_status(wlc, rxh, p, &rx_status); 7620 prep_mac80211_status(wlc, rxh, p, &rx_status);
@@ -7623,6 +7624,13 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
7623 skb_pull(p, D11_PHY_HDR_LEN); 7624 skb_pull(p, D11_PHY_HDR_LEN);
7624 __skb_trim(p, len_mpdu); 7625 __skb_trim(p, len_mpdu);
7625 7626
7627 /* unmute transmit */
7628 if (wlc->hw->suspended_fifos) {
7629 hdr = (struct ieee80211_hdr *)p->data;
7630 if (ieee80211_is_beacon(hdr->frame_control))
7631 brcms_b_mute(wlc->hw, false);
7632 }
7633
7626 memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status)); 7634 memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status));
7627 ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p); 7635 ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p);
7628} 7636}
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 819368788319..c72136c07774 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -344,38 +344,50 @@ static struct iw_handler_def ipw2100_wx_handler_def;
344 344
345static inline void read_register(struct net_device *dev, u32 reg, u32 * val) 345static inline void read_register(struct net_device *dev, u32 reg, u32 * val)
346{ 346{
347 *val = readl((void __iomem *)(dev->base_addr + reg)); 347 struct ipw2100_priv *priv = libipw_priv(dev);
348
349 *val = ioread32(priv->ioaddr + reg);
348 IPW_DEBUG_IO("r: 0x%08X => 0x%08X\n", reg, *val); 350 IPW_DEBUG_IO("r: 0x%08X => 0x%08X\n", reg, *val);
349} 351}
350 352
351static inline void write_register(struct net_device *dev, u32 reg, u32 val) 353static inline void write_register(struct net_device *dev, u32 reg, u32 val)
352{ 354{
353 writel(val, (void __iomem *)(dev->base_addr + reg)); 355 struct ipw2100_priv *priv = libipw_priv(dev);
356
357 iowrite32(val, priv->ioaddr + reg);
354 IPW_DEBUG_IO("w: 0x%08X <= 0x%08X\n", reg, val); 358 IPW_DEBUG_IO("w: 0x%08X <= 0x%08X\n", reg, val);
355} 359}
356 360
357static inline void read_register_word(struct net_device *dev, u32 reg, 361static inline void read_register_word(struct net_device *dev, u32 reg,
358 u16 * val) 362 u16 * val)
359{ 363{
360 *val = readw((void __iomem *)(dev->base_addr + reg)); 364 struct ipw2100_priv *priv = libipw_priv(dev);
365
366 *val = ioread16(priv->ioaddr + reg);
361 IPW_DEBUG_IO("r: 0x%08X => %04X\n", reg, *val); 367 IPW_DEBUG_IO("r: 0x%08X => %04X\n", reg, *val);
362} 368}
363 369
364static inline void read_register_byte(struct net_device *dev, u32 reg, u8 * val) 370static inline void read_register_byte(struct net_device *dev, u32 reg, u8 * val)
365{ 371{
366 *val = readb((void __iomem *)(dev->base_addr + reg)); 372 struct ipw2100_priv *priv = libipw_priv(dev);
373
374 *val = ioread8(priv->ioaddr + reg);
367 IPW_DEBUG_IO("r: 0x%08X => %02X\n", reg, *val); 375 IPW_DEBUG_IO("r: 0x%08X => %02X\n", reg, *val);
368} 376}
369 377
370static inline void write_register_word(struct net_device *dev, u32 reg, u16 val) 378static inline void write_register_word(struct net_device *dev, u32 reg, u16 val)
371{ 379{
372 writew(val, (void __iomem *)(dev->base_addr + reg)); 380 struct ipw2100_priv *priv = libipw_priv(dev);
381
382 iowrite16(val, priv->ioaddr + reg);
373 IPW_DEBUG_IO("w: 0x%08X <= %04X\n", reg, val); 383 IPW_DEBUG_IO("w: 0x%08X <= %04X\n", reg, val);
374} 384}
375 385
376static inline void write_register_byte(struct net_device *dev, u32 reg, u8 val) 386static inline void write_register_byte(struct net_device *dev, u32 reg, u8 val)
377{ 387{
378 writeb(val, (void __iomem *)(dev->base_addr + reg)); 388 struct ipw2100_priv *priv = libipw_priv(dev);
389
390 iowrite8(val, priv->ioaddr + reg);
379 IPW_DEBUG_IO("w: 0x%08X =< %02X\n", reg, val); 391 IPW_DEBUG_IO("w: 0x%08X =< %02X\n", reg, val);
380} 392}
381 393
@@ -507,13 +519,13 @@ static void read_nic_memory(struct net_device *dev, u32 addr, u32 len,
507 read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i, buf); 519 read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i, buf);
508} 520}
509 521
510static inline int ipw2100_hw_is_adapter_in_system(struct net_device *dev) 522static bool ipw2100_hw_is_adapter_in_system(struct net_device *dev)
511{ 523{
512 return (dev->base_addr && 524 u32 dbg;
513 (readl 525
514 ((void __iomem *)(dev->base_addr + 526 read_register(dev, IPW_REG_DOA_DEBUG_AREA_START, &dbg);
515 IPW_REG_DOA_DEBUG_AREA_START)) 527
516 == IPW_DATA_DOA_DEBUG_VALUE)); 528 return dbg == IPW_DATA_DOA_DEBUG_VALUE;
517} 529}
518 530
519static int ipw2100_get_ordinal(struct ipw2100_priv *priv, u32 ord, 531static int ipw2100_get_ordinal(struct ipw2100_priv *priv, u32 ord,
@@ -3777,7 +3789,7 @@ IPW2100_ORD(STAT_TX_HOST_REQUESTS, "requested Host Tx's (MSDU)"),
3777 IPW2100_ORD(COUNTRY_CODE, 3789 IPW2100_ORD(COUNTRY_CODE,
3778 "IEEE country code as recv'd from beacon"), 3790 "IEEE country code as recv'd from beacon"),
3779 IPW2100_ORD(COUNTRY_CHANNELS, 3791 IPW2100_ORD(COUNTRY_CHANNELS,
3780 "channels suported by country"), 3792 "channels supported by country"),
3781 IPW2100_ORD(RESET_CNT, "adapter resets (warm)"), 3793 IPW2100_ORD(RESET_CNT, "adapter resets (warm)"),
3782 IPW2100_ORD(BEACON_INTERVAL, "Beacon interval"), 3794 IPW2100_ORD(BEACON_INTERVAL, "Beacon interval"),
3783 IPW2100_ORD(ANTENNA_DIVERSITY, 3795 IPW2100_ORD(ANTENNA_DIVERSITY,
@@ -4066,7 +4078,7 @@ static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode)
4066 ipw2100_firmware.version = 0; 4078 ipw2100_firmware.version = 0;
4067#endif 4079#endif
4068 4080
4069 printk(KERN_INFO "%s: Reseting on mode change.\n", priv->net_dev->name); 4081 printk(KERN_INFO "%s: Resetting on mode change.\n", priv->net_dev->name);
4070 priv->reset_backoff = 0; 4082 priv->reset_backoff = 0;
4071 schedule_reset(priv); 4083 schedule_reset(priv);
4072 4084
@@ -6086,9 +6098,7 @@ static const struct net_device_ops ipw2100_netdev_ops = {
6086/* Look into using netdev destructor to shutdown libipw? */ 6098/* Look into using netdev destructor to shutdown libipw? */
6087 6099
6088static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, 6100static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6089 void __iomem * base_addr, 6101 void __iomem * ioaddr)
6090 unsigned long mem_start,
6091 unsigned long mem_len)
6092{ 6102{
6093 struct ipw2100_priv *priv; 6103 struct ipw2100_priv *priv;
6094 struct net_device *dev; 6104 struct net_device *dev;
@@ -6100,6 +6110,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6100 priv->ieee = netdev_priv(dev); 6110 priv->ieee = netdev_priv(dev);
6101 priv->pci_dev = pci_dev; 6111 priv->pci_dev = pci_dev;
6102 priv->net_dev = dev; 6112 priv->net_dev = dev;
6113 priv->ioaddr = ioaddr;
6103 6114
6104 priv->ieee->hard_start_xmit = ipw2100_tx; 6115 priv->ieee->hard_start_xmit = ipw2100_tx;
6105 priv->ieee->set_security = shim__set_security; 6116 priv->ieee->set_security = shim__set_security;
@@ -6115,10 +6126,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6115 dev->watchdog_timeo = 3 * HZ; 6126 dev->watchdog_timeo = 3 * HZ;
6116 dev->irq = 0; 6127 dev->irq = 0;
6117 6128
6118 dev->base_addr = (unsigned long)base_addr;
6119 dev->mem_start = mem_start;
6120 dev->mem_end = dev->mem_start + mem_len - 1;
6121
6122 /* NOTE: We don't use the wireless_handlers hook 6129 /* NOTE: We don't use the wireless_handlers hook
6123 * in dev as the system will start throwing WX requests 6130 * in dev as the system will start throwing WX requests
6124 * to us before we're actually initialized and it just 6131 * to us before we're actually initialized and it just
@@ -6219,8 +6226,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6219static int ipw2100_pci_init_one(struct pci_dev *pci_dev, 6226static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6220 const struct pci_device_id *ent) 6227 const struct pci_device_id *ent)
6221{ 6228{
6222 unsigned long mem_start, mem_len, mem_flags; 6229 void __iomem *ioaddr;
6223 void __iomem *base_addr = NULL;
6224 struct net_device *dev = NULL; 6230 struct net_device *dev = NULL;
6225 struct ipw2100_priv *priv = NULL; 6231 struct ipw2100_priv *priv = NULL;
6226 int err = 0; 6232 int err = 0;
@@ -6229,18 +6235,14 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6229 6235
6230 IPW_DEBUG_INFO("enter\n"); 6236 IPW_DEBUG_INFO("enter\n");
6231 6237
6232 mem_start = pci_resource_start(pci_dev, 0); 6238 if (!(pci_resource_flags(pci_dev, 0) & IORESOURCE_MEM)) {
6233 mem_len = pci_resource_len(pci_dev, 0);
6234 mem_flags = pci_resource_flags(pci_dev, 0);
6235
6236 if ((mem_flags & IORESOURCE_MEM) != IORESOURCE_MEM) {
6237 IPW_DEBUG_INFO("weird - resource type is not memory\n"); 6239 IPW_DEBUG_INFO("weird - resource type is not memory\n");
6238 err = -ENODEV; 6240 err = -ENODEV;
6239 goto fail; 6241 goto out;
6240 } 6242 }
6241 6243
6242 base_addr = ioremap_nocache(mem_start, mem_len); 6244 ioaddr = pci_iomap(pci_dev, 0, 0);
6243 if (!base_addr) { 6245 if (!ioaddr) {
6244 printk(KERN_WARNING DRV_NAME 6246 printk(KERN_WARNING DRV_NAME
6245 "Error calling ioremap_nocache.\n"); 6247 "Error calling ioremap_nocache.\n");
6246 err = -EIO; 6248 err = -EIO;
@@ -6248,7 +6250,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6248 } 6250 }
6249 6251
6250 /* allocate and initialize our net_device */ 6252 /* allocate and initialize our net_device */
6251 dev = ipw2100_alloc_device(pci_dev, base_addr, mem_start, mem_len); 6253 dev = ipw2100_alloc_device(pci_dev, ioaddr);
6252 if (!dev) { 6254 if (!dev) {
6253 printk(KERN_WARNING DRV_NAME 6255 printk(KERN_WARNING DRV_NAME
6254 "Error calling ipw2100_alloc_device.\n"); 6256 "Error calling ipw2100_alloc_device.\n");
@@ -6383,8 +6385,8 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6383 priv->status |= STATUS_INITIALIZED; 6385 priv->status |= STATUS_INITIALIZED;
6384 6386
6385 mutex_unlock(&priv->action_mutex); 6387 mutex_unlock(&priv->action_mutex);
6386 6388out:
6387 return 0; 6389 return err;
6388 6390
6389 fail_unlock: 6391 fail_unlock:
6390 mutex_unlock(&priv->action_mutex); 6392 mutex_unlock(&priv->action_mutex);
@@ -6413,63 +6415,56 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6413 pci_set_drvdata(pci_dev, NULL); 6415 pci_set_drvdata(pci_dev, NULL);
6414 } 6416 }
6415 6417
6416 if (base_addr) 6418 pci_iounmap(pci_dev, ioaddr);
6417 iounmap(base_addr);
6418 6419
6419 pci_release_regions(pci_dev); 6420 pci_release_regions(pci_dev);
6420 pci_disable_device(pci_dev); 6421 pci_disable_device(pci_dev);
6421 6422 goto out;
6422 return err;
6423} 6423}
6424 6424
6425static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev) 6425static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
6426{ 6426{
6427 struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); 6427 struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
6428 struct net_device *dev; 6428 struct net_device *dev = priv->net_dev;
6429 6429
6430 if (priv) { 6430 mutex_lock(&priv->action_mutex);
6431 mutex_lock(&priv->action_mutex);
6432 6431
6433 priv->status &= ~STATUS_INITIALIZED; 6432 priv->status &= ~STATUS_INITIALIZED;
6434 6433
6435 dev = priv->net_dev; 6434 sysfs_remove_group(&pci_dev->dev.kobj, &ipw2100_attribute_group);
6436 sysfs_remove_group(&pci_dev->dev.kobj,
6437 &ipw2100_attribute_group);
6438 6435
6439#ifdef CONFIG_PM 6436#ifdef CONFIG_PM
6440 if (ipw2100_firmware.version) 6437 if (ipw2100_firmware.version)
6441 ipw2100_release_firmware(priv, &ipw2100_firmware); 6438 ipw2100_release_firmware(priv, &ipw2100_firmware);
6442#endif 6439#endif
6443 /* Take down the hardware */ 6440 /* Take down the hardware */
6444 ipw2100_down(priv); 6441 ipw2100_down(priv);
6445 6442
6446 /* Release the mutex so that the network subsystem can 6443 /* Release the mutex so that the network subsystem can
6447 * complete any needed calls into the driver... */ 6444 * complete any needed calls into the driver... */
6448 mutex_unlock(&priv->action_mutex); 6445 mutex_unlock(&priv->action_mutex);
6449 6446
6450 /* Unregister the device first - this results in close() 6447 /* Unregister the device first - this results in close()
6451 * being called if the device is open. If we free storage 6448 * being called if the device is open. If we free storage
6452 * first, then close() will crash. */ 6449 * first, then close() will crash.
6453 unregister_netdev(dev); 6450 * FIXME: remove the comment above. */
6451 unregister_netdev(dev);
6454 6452
6455 ipw2100_kill_works(priv); 6453 ipw2100_kill_works(priv);
6456 6454
6457 ipw2100_queues_free(priv); 6455 ipw2100_queues_free(priv);
6458 6456
6459 /* Free potential debugging firmware snapshot */ 6457 /* Free potential debugging firmware snapshot */
6460 ipw2100_snapshot_free(priv); 6458 ipw2100_snapshot_free(priv);
6461 6459
6462 if (dev->irq) 6460 free_irq(dev->irq, priv);
6463 free_irq(dev->irq, priv);
6464 6461
6465 if (dev->base_addr) 6462 pci_iounmap(pci_dev, priv->ioaddr);
6466 iounmap((void __iomem *)dev->base_addr);
6467 6463
6468 /* wiphy_unregister needs to be here, before free_libipw */ 6464 /* wiphy_unregister needs to be here, before free_libipw */
6469 wiphy_unregister(priv->ieee->wdev.wiphy); 6465 wiphy_unregister(priv->ieee->wdev.wiphy);
6470 kfree(priv->ieee->bg_band.channels); 6466 kfree(priv->ieee->bg_band.channels);
6471 free_libipw(dev, 0); 6467 free_libipw(dev, 0);
6472 }
6473 6468
6474 pci_release_regions(pci_dev); 6469 pci_release_regions(pci_dev);
6475 pci_disable_device(pci_dev); 6470 pci_disable_device(pci_dev);
@@ -8612,7 +8607,7 @@ static int ipw2100_ucode_download(struct ipw2100_priv *priv,
8612 struct net_device *dev = priv->net_dev; 8607 struct net_device *dev = priv->net_dev;
8613 const unsigned char *microcode_data = fw->uc.data; 8608 const unsigned char *microcode_data = fw->uc.data;
8614 unsigned int microcode_data_left = fw->uc.size; 8609 unsigned int microcode_data_left = fw->uc.size;
8615 void __iomem *reg = (void __iomem *)dev->base_addr; 8610 void __iomem *reg = priv->ioaddr;
8616 8611
8617 struct symbol_alive_response response; 8612 struct symbol_alive_response response;
8618 int i, j; 8613 int i, j;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 18741a409cf2..973125242490 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -479,6 +479,7 @@ enum {
479#define CAP_PRIVACY_ON (1<<1) /* Off = No privacy */ 479#define CAP_PRIVACY_ON (1<<1) /* Off = No privacy */
480 480
481struct ipw2100_priv { 481struct ipw2100_priv {
482 void __iomem *ioaddr;
482 483
483 int stop_hang_check; /* Set 1 when shutting down to kill hang_check */ 484 int stop_hang_check; /* Set 1 when shutting down to kill hang_check */
484 int stop_rf_kill; /* Set 1 when shutting down to kill rf_kill */ 485 int stop_rf_kill; /* Set 1 when shutting down to kill rf_kill */
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index f37d315f942f..d57522c64073 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -7024,7 +7024,7 @@ static int ipw_qos_activate(struct ipw_priv *priv,
7024 cpu_to_le16(burst_duration); 7024 cpu_to_le16(burst_duration);
7025 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 7025 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7026 if (type == IEEE_B) { 7026 if (type == IEEE_B) {
7027 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n", 7027 IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
7028 type); 7028 type);
7029 if (priv->qos_data.qos_enable == 0) 7029 if (priv->qos_data.qos_enable == 0)
7030 active_one = &def_parameters_CCK; 7030 active_one = &def_parameters_CCK;
@@ -11829,10 +11829,6 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11829 net_dev->wireless_data = &priv->wireless_data; 11829 net_dev->wireless_data = &priv->wireless_data;
11830 net_dev->wireless_handlers = &ipw_wx_handler_def; 11830 net_dev->wireless_handlers = &ipw_wx_handler_def;
11831 net_dev->ethtool_ops = &ipw_ethtool_ops; 11831 net_dev->ethtool_ops = &ipw_ethtool_ops;
11832 net_dev->irq = pdev->irq;
11833 net_dev->base_addr = (unsigned long)priv->hw_base;
11834 net_dev->mem_start = pci_resource_start(pdev, 0);
11835 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11836 11832
11837 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group); 11833 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11838 if (err) { 11834 if (err) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
index bd92bd824c1e..060aac3e22f1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.c
@@ -183,9 +183,10 @@ static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
183 "Run out of memory for messages to user space ?\n"); 183 "Run out of memory for messages to user space ?\n");
184 return; 184 return;
185 } 185 }
186 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT); 186 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
187 /* the length doesn't include len_n_flags field, so add it manually */ 187 /* the length doesn't include len_n_flags field, so add it manually */
188 NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data); 188 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data))
189 goto nla_put_failure;
189 cfg80211_testmode_event(skb, GFP_ATOMIC); 190 cfg80211_testmode_event(skb, GFP_ATOMIC);
190 return; 191 return;
191 192
@@ -313,8 +314,9 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
313 memcpy(reply_buf, &(pkt->hdr), reply_len); 314 memcpy(reply_buf, &(pkt->hdr), reply_len);
314 iwl_free_resp(&cmd); 315 iwl_free_resp(&cmd);
315 316
316 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT); 317 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
317 NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf); 318 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
319 goto nla_put_failure;
318 return cfg80211_testmode_reply(skb); 320 return cfg80211_testmode_reply(skb);
319 321
320nla_put_failure: 322nla_put_failure:
@@ -378,7 +380,8 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
378 IWL_ERR(priv, "Memory allocation fail\n"); 380 IWL_ERR(priv, "Memory allocation fail\n");
379 return -ENOMEM; 381 return -ENOMEM;
380 } 382 }
381 NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32); 383 if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
384 goto nla_put_failure;
382 status = cfg80211_testmode_reply(skb); 385 status = cfg80211_testmode_reply(skb);
383 if (status < 0) 386 if (status < 0)
384 IWL_ERR(priv, "Error sending msg : %d\n", status); 387 IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -480,10 +483,11 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
480 IWL_ERR(priv, "Memory allocation fail\n"); 483 IWL_ERR(priv, "Memory allocation fail\n");
481 return -ENOMEM; 484 return -ENOMEM;
482 } 485 }
483 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, 486 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
484 IWL_TM_CMD_DEV2APP_SYNC_RSP); 487 IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
485 NLA_PUT(skb, IWL_TM_ATTR_SYNC_RSP, 488 nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
486 rsp_data_len, rsp_data_ptr); 489 rsp_data_len, rsp_data_ptr))
490 goto nla_put_failure;
487 status = cfg80211_testmode_reply(skb); 491 status = cfg80211_testmode_reply(skb);
488 if (status < 0) 492 if (status < 0)
489 IWL_ERR(priv, "Error sending msg : %d\n", status); 493 IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -538,11 +542,12 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
538 IWL_ERR(priv, "Memory allocation fail\n"); 542 IWL_ERR(priv, "Memory allocation fail\n");
539 return -ENOMEM; 543 return -ENOMEM;
540 } 544 }
541 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, 545 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
542 IWL_TM_CMD_DEV2APP_EEPROM_RSP); 546 IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
543 NLA_PUT(skb, IWL_TM_ATTR_EEPROM, 547 nla_put(skb, IWL_TM_ATTR_EEPROM,
544 priv->cfg->base_params->eeprom_size, 548 priv->cfg->base_params->eeprom_size,
545 priv->eeprom); 549 priv->eeprom))
550 goto nla_put_failure;
546 status = cfg80211_testmode_reply(skb); 551 status = cfg80211_testmode_reply(skb);
547 if (status < 0) 552 if (status < 0)
548 IWL_ERR(priv, "Error sending msg : %d\n", 553 IWL_ERR(priv, "Error sending msg : %d\n",
@@ -568,8 +573,9 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
568 IWL_ERR(priv, "Memory allocation fail\n"); 573 IWL_ERR(priv, "Memory allocation fail\n");
569 return -ENOMEM; 574 return -ENOMEM;
570 } 575 }
571 NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION, 576 if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION,
572 priv->fw->ucode_ver); 577 priv->fw->ucode_ver))
578 goto nla_put_failure;
573 status = cfg80211_testmode_reply(skb); 579 status = cfg80211_testmode_reply(skb);
574 if (status < 0) 580 if (status < 0)
575 IWL_ERR(priv, "Error sending msg : %d\n", status); 581 IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -584,7 +590,8 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
584 IWL_ERR(priv, "Memory allocation fail\n"); 590 IWL_ERR(priv, "Memory allocation fail\n");
585 return -ENOMEM; 591 return -ENOMEM;
586 } 592 }
587 NLA_PUT_U32(skb, IWL_TM_ATTR_DEVICE_ID, devid); 593 if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
594 goto nla_put_failure;
588 status = cfg80211_testmode_reply(skb); 595 status = cfg80211_testmode_reply(skb);
589 if (status < 0) 596 if (status < 0)
590 IWL_ERR(priv, "Error sending msg : %d\n", status); 597 IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -604,9 +611,10 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
604 inst_size = img->sec[IWL_UCODE_SECTION_INST].len; 611 inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
605 data_size = img->sec[IWL_UCODE_SECTION_DATA].len; 612 data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
606 } 613 }
607 NLA_PUT_U32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode); 614 if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
608 NLA_PUT_U32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size); 615 nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
609 NLA_PUT_U32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size); 616 nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
617 goto nla_put_failure;
610 status = cfg80211_testmode_reply(skb); 618 status = cfg80211_testmode_reply(skb);
611 if (status < 0) 619 if (status < 0)
612 IWL_ERR(priv, "Error sending msg : %d\n", status); 620 IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -680,9 +688,10 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
680 iwl_trace_cleanup(priv); 688 iwl_trace_cleanup(priv);
681 return -ENOMEM; 689 return -ENOMEM;
682 } 690 }
683 NLA_PUT(skb, IWL_TM_ATTR_TRACE_ADDR, 691 if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
684 sizeof(priv->testmode_trace.dma_addr), 692 sizeof(priv->testmode_trace.dma_addr),
685 (u64 *)&priv->testmode_trace.dma_addr); 693 (u64 *)&priv->testmode_trace.dma_addr))
694 goto nla_put_failure;
686 status = cfg80211_testmode_reply(skb); 695 status = cfg80211_testmode_reply(skb);
687 if (status < 0) { 696 if (status < 0) {
688 IWL_ERR(priv, "Error sending msg : %d\n", status); 697 IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -727,9 +736,10 @@ static int iwl_testmode_trace_dump(struct ieee80211_hw *hw,
727 length = priv->testmode_trace.buff_size % 736 length = priv->testmode_trace.buff_size %
728 DUMP_CHUNK_SIZE; 737 DUMP_CHUNK_SIZE;
729 738
730 NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length, 739 if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
731 priv->testmode_trace.trace_addr + 740 priv->testmode_trace.trace_addr +
732 (DUMP_CHUNK_SIZE * idx)); 741 (DUMP_CHUNK_SIZE * idx)))
742 goto nla_put_failure;
733 idx++; 743 idx++;
734 cb->args[4] = idx; 744 cb->args[4] = idx;
735 return 0; 745 return 0;
@@ -924,9 +934,10 @@ static int iwl_testmode_buffer_dump(struct ieee80211_hw *hw,
924 length = priv->testmode_mem.buff_size % 934 length = priv->testmode_mem.buff_size %
925 DUMP_CHUNK_SIZE; 935 DUMP_CHUNK_SIZE;
926 936
927 NLA_PUT(skb, IWL_TM_ATTR_BUFFER_DUMP, length, 937 if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
928 priv->testmode_mem.buff_addr + 938 priv->testmode_mem.buff_addr +
929 (DUMP_CHUNK_SIZE * idx)); 939 (DUMP_CHUNK_SIZE * idx)))
940 goto nla_put_failure;
930 idx++; 941 idx++;
931 cb->args[4] = idx; 942 cb->args[4] = idx;
932 return 0; 943 return 0;
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index 03f998d098c5..7107ce53d4d4 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -1,5 +1,5 @@
1config IWM 1config IWM
2 tristate "Intel Wireless Multicomm 3200 WiFi driver" 2 tristate "Intel Wireless Multicomm 3200 WiFi driver (EXPERIMENTAL)"
3 depends on MMC && EXPERIMENTAL 3 depends on MMC && EXPERIMENTAL
4 depends on CFG80211 4 depends on CFG80211
5 select FW_LOADER 5 select FW_LOADER
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 3fa1ecebadfd..2fa879b015b6 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -103,7 +103,7 @@ static const u32 cipher_suites[] = {
103 * Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1 103 * Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1
104 * in the firmware spec 104 * in the firmware spec
105 */ 105 */
106static u8 lbs_auth_to_authtype(enum nl80211_auth_type auth_type) 106static int lbs_auth_to_authtype(enum nl80211_auth_type auth_type)
107{ 107{
108 int ret = -ENOTSUPP; 108 int ret = -ENOTSUPP;
109 109
@@ -1411,7 +1411,12 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
1411 goto done; 1411 goto done;
1412 } 1412 }
1413 1413
1414 lbs_set_authtype(priv, sme); 1414 ret = lbs_set_authtype(priv, sme);
1415 if (ret == -ENOTSUPP) {
1416 wiphy_err(wiphy, "unsupported authtype 0x%x\n", sme->auth_type);
1417 goto done;
1418 }
1419
1415 lbs_set_radio(priv, preamble, 1); 1420 lbs_set_radio(priv, preamble, 1);
1416 1421
1417 /* Do the actual association */ 1422 /* Do the actual association */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 3edd473d8acd..03c0c6b1372c 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -582,11 +582,13 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
582 goto nla_put_failure; 582 goto nla_put_failure;
583 } 583 }
584 584
585 NLA_PUT(skb, HWSIM_ATTR_ADDR_TRANSMITTER, 585 if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
586 sizeof(struct mac_address), data->addresses[1].addr); 586 sizeof(struct mac_address), data->addresses[1].addr))
587 goto nla_put_failure;
587 588
588 /* We get the skb->data */ 589 /* We get the skb->data */
589 NLA_PUT(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data); 590 if (nla_put(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data))
591 goto nla_put_failure;
590 592
591 /* We get the flags for this transmission, and we translate them to 593 /* We get the flags for this transmission, and we translate them to
592 wmediumd flags */ 594 wmediumd flags */
@@ -597,7 +599,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
597 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 599 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
598 hwsim_flags |= HWSIM_TX_CTL_NO_ACK; 600 hwsim_flags |= HWSIM_TX_CTL_NO_ACK;
599 601
600 NLA_PUT_U32(skb, HWSIM_ATTR_FLAGS, hwsim_flags); 602 if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags))
603 goto nla_put_failure;
601 604
602 /* We get the tx control (rate and retries) info*/ 605 /* We get the tx control (rate and retries) info*/
603 606
@@ -606,12 +609,14 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
606 tx_attempts[i].count = info->status.rates[i].count; 609 tx_attempts[i].count = info->status.rates[i].count;
607 } 610 }
608 611
609 NLA_PUT(skb, HWSIM_ATTR_TX_INFO, 612 if (nla_put(skb, HWSIM_ATTR_TX_INFO,
610 sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES, 613 sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES,
611 tx_attempts); 614 tx_attempts))
615 goto nla_put_failure;
612 616
613 /* We create a cookie to identify this skb */ 617 /* We create a cookie to identify this skb */
614 NLA_PUT_U64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb); 618 if (nla_put_u64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb))
619 goto nla_put_failure;
615 620
616 genlmsg_end(skb, msg_head); 621 genlmsg_end(skb, msg_head);
617 genlmsg_unicast(&init_net, skb, dst_pid); 622 genlmsg_unicast(&init_net, skb, dst_pid);
@@ -1109,7 +1114,8 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
1109 nla_total_size(sizeof(u32))); 1114 nla_total_size(sizeof(u32)));
1110 if (!skb) 1115 if (!skb)
1111 return -ENOMEM; 1116 return -ENOMEM;
1112 NLA_PUT_U32(skb, HWSIM_TM_ATTR_PS, hwsim->ps); 1117 if (nla_put_u32(skb, HWSIM_TM_ATTR_PS, hwsim->ps))
1118 goto nla_put_failure;
1113 return cfg80211_testmode_reply(skb); 1119 return cfg80211_testmode_reply(skb);
1114 default: 1120 default:
1115 return -EOPNOTSUPP; 1121 return -EOPNOTSUPP;
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
index 445ff21772e2..2f218f9a3fd3 100644
--- a/drivers/net/wireless/mwifiex/pcie.h
+++ b/drivers/net/wireless/mwifiex/pcie.h
@@ -48,15 +48,15 @@
48#define PCIE_HOST_INT_STATUS_MASK 0xC3C 48#define PCIE_HOST_INT_STATUS_MASK 0xC3C
49#define PCIE_SCRATCH_2_REG 0xC40 49#define PCIE_SCRATCH_2_REG 0xC40
50#define PCIE_SCRATCH_3_REG 0xC44 50#define PCIE_SCRATCH_3_REG 0xC44
51#define PCIE_SCRATCH_4_REG 0xCC0 51#define PCIE_SCRATCH_4_REG 0xCD0
52#define PCIE_SCRATCH_5_REG 0xCC4 52#define PCIE_SCRATCH_5_REG 0xCD4
53#define PCIE_SCRATCH_6_REG 0xCC8 53#define PCIE_SCRATCH_6_REG 0xCD8
54#define PCIE_SCRATCH_7_REG 0xCCC 54#define PCIE_SCRATCH_7_REG 0xCDC
55#define PCIE_SCRATCH_8_REG 0xCD0 55#define PCIE_SCRATCH_8_REG 0xCE0
56#define PCIE_SCRATCH_9_REG 0xCD4 56#define PCIE_SCRATCH_9_REG 0xCE4
57#define PCIE_SCRATCH_10_REG 0xCD8 57#define PCIE_SCRATCH_10_REG 0xCE8
58#define PCIE_SCRATCH_11_REG 0xCDC 58#define PCIE_SCRATCH_11_REG 0xCEC
59#define PCIE_SCRATCH_12_REG 0xCE0 59#define PCIE_SCRATCH_12_REG 0xCF0
60 60
61#define CPU_INTR_DNLD_RDY BIT(0) 61#define CPU_INTR_DNLD_RDY BIT(0)
62#define CPU_INTR_DOOR_BELL BIT(1) 62#define CPU_INTR_DOOR_BELL BIT(1)
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index bac3d03f5786..e1eac830e2fc 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -1131,7 +1131,7 @@ static struct usb_driver p54u_driver = {
1131 .name = "p54usb", 1131 .name = "p54usb",
1132 .id_table = p54u_table, 1132 .id_table = p54u_table,
1133 .probe = p54u_probe, 1133 .probe = p54u_probe,
1134 .disconnect = p54u_disconnect, 1134 .disconnect = __devexit_p(p54u_disconnect),
1135 .pre_reset = p54u_pre_reset, 1135 .pre_reset = p54u_pre_reset,
1136 .post_reset = p54u_post_reset, 1136 .post_reset = p54u_post_reset,
1137#ifdef CONFIG_PM 1137#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 9cda706e4e3f..0e59ea2cdd39 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -115,7 +115,8 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
115 goto out_sleep; 115 goto out_sleep;
116 } 116 }
117 117
118 NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf); 118 if (nla_put(skb, WL1271_TM_ATTR_DATA, buf_len, buf))
119 goto nla_put_failure;
119 ret = cfg80211_testmode_reply(skb); 120 ret = cfg80211_testmode_reply(skb);
120 if (ret < 0) 121 if (ret < 0)
121 goto out_sleep; 122 goto out_sleep;
@@ -177,7 +178,8 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
177 goto out_free; 178 goto out_free;
178 } 179 }
179 180
180 NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); 181 if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd))
182 goto nla_put_failure;
181 ret = cfg80211_testmode_reply(skb); 183 ret = cfg80211_testmode_reply(skb);
182 if (ret < 0) 184 if (ret < 0)
183 goto out_free; 185 goto out_free;
@@ -296,7 +298,8 @@ static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[])
296 goto out; 298 goto out;
297 } 299 }
298 300
299 NLA_PUT(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr); 301 if (nla_put(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr))
302 goto nla_put_failure;
300 ret = cfg80211_testmode_reply(skb); 303 ret = cfg80211_testmode_reply(skb);
301 if (ret < 0) 304 if (ret < 0)
302 goto out; 305 goto out;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 663b32c2e931..0ebbb1906c30 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1965,7 +1965,7 @@ static int __init netif_init(void)
1965 if (xen_initial_domain()) 1965 if (xen_initial_domain())
1966 return 0; 1966 return 0;
1967 1967
1968 if (!xen_platform_pci_unplug) 1968 if (xen_hvm_domain() && !xen_platform_pci_unplug)
1969 return -ENODEV; 1969 return -ENODEV;
1970 1970
1971 printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); 1971 printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");