aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c59x.c83
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/Kconfig20
-rw-r--r--drivers/net/Makefile11
-rw-r--r--drivers/net/acenic.c3
-rw-r--r--drivers/net/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/au1000_eth.c5
-rw-r--r--drivers/net/bcm63xx_enet.c2
-rw-r--r--drivers/net/benet/be.h1
-rw-r--r--drivers/net/benet/be_cmds.c37
-rw-r--r--drivers/net/benet/be_cmds.h10
-rw-r--r--drivers/net/benet/be_ethtool.c2
-rw-r--r--drivers/net/benet/be_main.c56
-rw-r--r--drivers/net/bnx2.h9
-rw-r--r--drivers/net/bonding/bond_main.c10
-rw-r--r--drivers/net/bonding/bond_sysfs.c2
-rw-r--r--drivers/net/can/Kconfig13
-rw-r--r--drivers/net/can/Makefile3
-rw-r--r--drivers/net/can/at91_can.c1186
-rw-r--r--drivers/net/can/sja1000/ems_pci.c16
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c1
-rw-r--r--drivers/net/can/usb/Makefile5
-rw-r--r--drivers/net/can/usb/ems_usb.c1155
-rw-r--r--drivers/net/cnic.c13
-rw-r--r--drivers/net/cnic_if.h4
-rw-r--r--drivers/net/cpmac.c8
-rw-r--r--drivers/net/cris/eth_v10.c20
-rw-r--r--drivers/net/davinci_emac.c45
-rw-r--r--drivers/net/depca.c1
-rw-r--r--drivers/net/dm9000.h2
-rw-r--r--drivers/net/e100.c1
-rw-r--r--drivers/net/e1000/e1000.h3
-rw-r--r--drivers/net/e1000/e1000_ethtool.c202
-rw-r--r--drivers/net/e1000/e1000_hw.c12914
-rw-r--r--drivers/net/e1000/e1000_hw.h3231
-rw-r--r--drivers/net/e1000/e1000_main.c825
-rw-r--r--drivers/net/e1000/e1000_param.c22
-rw-r--r--drivers/net/e1000e/82571.c4
-rw-r--r--drivers/net/e1000e/e1000.h12
-rw-r--r--drivers/net/e1000e/hw.h2
-rw-r--r--drivers/net/e1000e/ich8lan.c150
-rw-r--r--drivers/net/e1000e/netdev.c13
-rw-r--r--drivers/net/e1000e/phy.c469
-rw-r--r--drivers/net/ehea/ehea_main.c1
-rw-r--r--drivers/net/ehea/ehea_qmr.c2
-rw-r--r--drivers/net/enc28j60.c1
-rw-r--r--drivers/net/eql.c1
-rw-r--r--drivers/net/ethoc.c104
-rw-r--r--drivers/net/ewrk3.c1
-rw-r--r--drivers/net/fec.c2
-rw-r--r--drivers/net/fec_mpc52xx.c6
-rw-r--r--drivers/net/fec_mpc52xx_phy.c1
-rw-r--r--drivers/net/forcedeth.c1
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/fsl_pq_mdio.c1
-rw-r--r--drivers/net/gianfar.c4
-rw-r--r--drivers/net/hamachi.c1
-rw-r--r--drivers/net/hamradio/baycom_epp.c1
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c1
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c1
-rw-r--r--drivers/net/hamradio/hdlcdrv.c1
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/hp100.c1
-rw-r--r--drivers/net/ibm_newemac/core.c9
-rw-r--r--drivers/net/ibm_newemac/emac.h1
-rw-r--r--drivers/net/ifb.c3
-rw-r--r--drivers/net/igb/e1000_mac.c72
-rw-r--r--drivers/net/igb/e1000_mac.h1
-rw-r--r--drivers/net/igb/igb_ethtool.c36
-rw-r--r--drivers/net/igb/igb_main.c13
-rw-r--r--drivers/net/igbvf/ethtool.c30
-rw-r--r--drivers/net/irda/kingsun-sir.c1
-rw-r--r--drivers/net/irda/ks959-sir.c1
-rw-r--r--drivers/net/irda/ksdazzle-sir.c1
-rw-r--r--drivers/net/irda/mcs7780.c1
-rw-r--r--drivers/net/irda/pxaficp_ir.c47
-rw-r--r--drivers/net/irda/sa1100_ir.c7
-rw-r--r--drivers/net/irda/toim3232-sir.c1
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgbe/ixgbe.h6
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c232
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c101
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c167
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h11
-rw-r--r--drivers/net/ixp2000/enp2611.c18
-rw-r--r--drivers/net/ixp2000/ixpdev.c11
-rw-r--r--drivers/net/ks8851.c43
-rw-r--r--drivers/net/ks8851.h1
-rw-r--r--drivers/net/ks8851_mll.c1697
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/mlx4/fw.c5
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c17
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h1
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c14
-rw-r--r--drivers/net/netxen/netxen_nic_init.c8
-rw-r--r--drivers/net/netxen/netxen_nic_main.c12
-rw-r--r--drivers/net/niu.c4
-rw-r--r--drivers/net/ns83820.c1
-rw-r--r--drivers/net/pasemi_mac_ethtool.c3
-rw-r--r--drivers/net/pcmcia/3c574_cs.c13
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c21
-rw-r--r--drivers/net/pcnet32.c1
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/pppoe.c129
-rw-r--r--drivers/net/pppol2tp.c2
-rw-r--r--drivers/net/qlge/qlge.h37
-rw-r--r--drivers/net/qlge/qlge_ethtool.c2
-rw-r--r--drivers/net/qlge/qlge_main.c217
-rw-r--r--drivers/net/qlge/qlge_mpi.c128
-rw-r--r--drivers/net/r8169.c1000
-rw-r--r--drivers/net/sb1000.c1
-rw-r--r--drivers/net/sfc/efx.c3
-rw-r--r--drivers/net/sfc/rx.c9
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/sh_eth.c1
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/skfp/skfddi.c1
-rw-r--r--drivers/net/skge.c17
-rw-r--r--drivers/net/skge.h2
-rw-r--r--drivers/net/sky2.c11
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/slip.c1
-rw-r--r--drivers/net/stmmac/Kconfig53
-rw-r--r--drivers/net/stmmac/Makefile4
-rw-r--r--drivers/net/stmmac/common.h330
-rw-r--r--drivers/net/stmmac/descs.h163
-rw-r--r--drivers/net/stmmac/gmac.c693
-rw-r--r--drivers/net/stmmac/gmac.h204
-rw-r--r--drivers/net/stmmac/mac100.c517
-rw-r--r--drivers/net/stmmac/mac100.h116
-rw-r--r--drivers/net/stmmac/stmmac.h98
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c395
-rw-r--r--drivers/net/stmmac/stmmac_main.c2204
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c217
-rw-r--r--drivers/net/stmmac/stmmac_timer.c140
-rw-r--r--drivers/net/stmmac/stmmac_timer.h41
-rw-r--r--drivers/net/sungem.c1
-rw-r--r--drivers/net/sunvnet.c1
-rw-r--r--drivers/net/tg3.c41
-rw-r--r--drivers/net/tg3.h2
-rw-r--r--drivers/net/tokenring/ibmtr.c1
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/typhoon.c1
-rw-r--r--drivers/net/usb/cdc_eem.c17
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/usb/kaweth.c18
-rw-r--r--drivers/net/usb/pegasus.c13
-rw-r--r--drivers/net/usb/pegasus.h6
-rw-r--r--drivers/net/usb/rndis_host.c1
-rw-r--r--drivers/net/usb/smsc95xx.c67
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/virtio_net.c245
-rw-r--r--drivers/net/vmxnet3/Makefile35
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h96
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h535
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2574
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c566
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h389
-rw-r--r--drivers/net/wan/c101.c1
-rw-r--r--drivers/net/wan/cosa.c1
-rw-r--r--drivers/net/wan/cycx_x25.c1
-rw-r--r--drivers/net/wan/dscc4.c1
-rw-r--r--drivers/net/wan/farsync.c1
-rw-r--r--drivers/net/wan/hdlc_cisco.c18
-rw-r--r--drivers/net/wan/n2.c1
-rw-r--r--drivers/net/wan/pci200syn.c1
-rw-r--r--drivers/net/wireless/Kconfig13
-rw-r--r--drivers/net/wireless/adm8211.h2
-rw-r--r--drivers/net/wireless/airo.c5
-rw-r--r--drivers/net/wireless/arlan-proc.c28
-rw-r--r--drivers/net/wireless/ath/ar9170/phy.c6
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c202
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h3
-rw-r--r--drivers/net/wireless/b43/Kconfig21
-rw-r--r--drivers/net/wireless/b43/Makefile1
-rw-r--r--drivers/net/wireless/b43/b43.h185
-rw-r--r--drivers/net/wireless/b43/debugfs.c1
-rw-r--r--drivers/net/wireless/b43/debugfs.h1
-rw-r--r--drivers/net/wireless/b43/dma.c4
-rw-r--r--drivers/net/wireless/b43/leds.c266
-rw-r--r--drivers/net/wireless/b43/leds.h34
-rw-r--r--drivers/net/wireless/b43/main.c228
-rw-r--r--drivers/net/wireless/b43/phy_lp.c12
-rw-r--r--drivers/net/wireless/b43/pio.c95
-rw-r--r--drivers/net/wireless/b43/rfkill.c5
-rw-r--r--drivers/net/wireless/b43/sdio.c202
-rw-r--r--drivers/net/wireless/b43/sdio.h45
-rw-r--r--drivers/net/wireless/b43/xmit.c10
-rw-r--r--drivers/net/wireless/b43legacy/main.c1
-rw-r--r--drivers/net/wireless/b43legacy/phy.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c188
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c188
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c43
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c1
-rw-r--r--drivers/net/wireless/libertas/cmd.c1
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c1
-rw-r--r--drivers/net/wireless/libertas/if_spi.c11
-rw-r--r--drivers/net/wireless/libertas/tx.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c3
-rw-r--r--drivers/net/wireless/p54/p54spi.c1
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c1
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c1
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c1
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xilinx_emaclite.c7
-rw-r--r--drivers/net/znet.c8
246 files changed, 24203 insertions, 12537 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 7adff4d0960d..975e25b19ebe 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -805,58 +805,54 @@ static void poll_vortex(struct net_device *dev)
805 805
806#ifdef CONFIG_PM 806#ifdef CONFIG_PM
807 807
808static int vortex_suspend(struct pci_dev *pdev, pm_message_t state) 808static int vortex_suspend(struct device *dev)
809{ 809{
810 struct net_device *dev = pci_get_drvdata(pdev); 810 struct pci_dev *pdev = to_pci_dev(dev);
811 struct net_device *ndev = pci_get_drvdata(pdev);
812
813 if (!ndev || !netif_running(ndev))
814 return 0;
815
816 netif_device_detach(ndev);
817 vortex_down(ndev, 1);
811 818
812 if (dev && netdev_priv(dev)) {
813 if (netif_running(dev)) {
814 netif_device_detach(dev);
815 vortex_down(dev, 1);
816 }
817 pci_save_state(pdev);
818 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
819 free_irq(dev->irq, dev);
820 pci_disable_device(pdev);
821 pci_set_power_state(pdev, pci_choose_state(pdev, state));
822 }
823 return 0; 819 return 0;
824} 820}
825 821
826static int vortex_resume(struct pci_dev *pdev) 822static int vortex_resume(struct device *dev)
827{ 823{
828 struct net_device *dev = pci_get_drvdata(pdev); 824 struct pci_dev *pdev = to_pci_dev(dev);
829 struct vortex_private *vp = netdev_priv(dev); 825 struct net_device *ndev = pci_get_drvdata(pdev);
830 int err; 826 int err;
831 827
832 if (dev && vp) { 828 if (!ndev || !netif_running(ndev))
833 pci_set_power_state(pdev, PCI_D0); 829 return 0;
834 pci_restore_state(pdev); 830
835 err = pci_enable_device(pdev); 831 err = vortex_up(ndev);
836 if (err) { 832 if (err)
837 pr_warning("%s: Could not enable device\n", 833 return err;
838 dev->name); 834
839 return err; 835 netif_device_attach(ndev);
840 } 836
841 pci_set_master(pdev);
842 if (request_irq(dev->irq, vp->full_bus_master_rx ?
843 &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev)) {
844 pr_warning("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
845 pci_disable_device(pdev);
846 return -EBUSY;
847 }
848 if (netif_running(dev)) {
849 err = vortex_up(dev);
850 if (err)
851 return err;
852 else
853 netif_device_attach(dev);
854 }
855 }
856 return 0; 837 return 0;
857} 838}
858 839
859#endif /* CONFIG_PM */ 840static struct dev_pm_ops vortex_pm_ops = {
841 .suspend = vortex_suspend,
842 .resume = vortex_resume,
843 .freeze = vortex_suspend,
844 .thaw = vortex_resume,
845 .poweroff = vortex_suspend,
846 .restore = vortex_resume,
847};
848
849#define VORTEX_PM_OPS (&vortex_pm_ops)
850
851#else /* !CONFIG_PM */
852
853#define VORTEX_PM_OPS NULL
854
855#endif /* !CONFIG_PM */
860 856
861#ifdef CONFIG_EISA 857#ifdef CONFIG_EISA
862static struct eisa_device_id vortex_eisa_ids[] = { 858static struct eisa_device_id vortex_eisa_ids[] = {
@@ -3205,10 +3201,7 @@ static struct pci_driver vortex_driver = {
3205 .probe = vortex_init_one, 3201 .probe = vortex_init_one,
3206 .remove = __devexit_p(vortex_remove_one), 3202 .remove = __devexit_p(vortex_remove_one),
3207 .id_table = vortex_pci_tbl, 3203 .id_table = vortex_pci_tbl,
3208#ifdef CONFIG_PM 3204 .driver.pm = VORTEX_PM_OPS,
3209 .suspend = vortex_suspend,
3210 .resume = vortex_resume,
3211#endif
3212}; 3205};
3213 3206
3214 3207
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 462d9f59c53a..83a1922e68e0 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -87,7 +87,7 @@
87 87
88/* These identify the driver base version and may not be removed. */ 88/* These identify the driver base version and may not be removed. */
89static char version[] = 89static char version[] =
90KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; 90DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
91 91
92MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); 92MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
93MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver"); 93MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ed5741b2e701..e19ca4bb7510 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1738,6 +1738,14 @@ config KS8851
1738 help 1738 help
1739 SPI driver for Micrel KS8851 SPI attached network chip. 1739 SPI driver for Micrel KS8851 SPI attached network chip.
1740 1740
1741config KS8851_MLL
1742 tristate "Micrel KS8851 MLL"
1743 depends on HAS_IOMEM
1744 select MII
1745 help
1746 This platform driver is for Micrel KS8851 Address/data bus
1747 multiplexed network chip.
1748
1741config VIA_RHINE 1749config VIA_RHINE
1742 tristate "VIA Rhine support" 1750 tristate "VIA Rhine support"
1743 depends on NET_PCI && PCI 1751 depends on NET_PCI && PCI
@@ -1875,7 +1883,7 @@ config 68360_ENET
1875 1883
1876config FEC 1884config FEC
1877 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" 1885 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
1878 depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27 || ARCH_MX35 1886 depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27 || ARCH_MX35 || ARCH_MX25
1879 help 1887 help
1880 Say Y here if you want to use the built-in 10/100 Fast ethernet 1888 Say Y here if you want to use the built-in 10/100 Fast ethernet
1881 controller on some Motorola ColdFire and Freescale i.MX processors. 1889 controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -2475,6 +2483,8 @@ config S6GMAC
2475 To compile this driver as a module, choose M here. The module 2483 To compile this driver as a module, choose M here. The module
2476 will be called s6gmac. 2484 will be called s6gmac.
2477 2485
2486source "drivers/net/stmmac/Kconfig"
2487
2478endif # NETDEV_1000 2488endif # NETDEV_1000
2479 2489
2480# 2490#
@@ -3223,4 +3233,12 @@ config VIRTIO_NET
3223 This is the virtual network driver for virtio. It can be used with 3233 This is the virtual network driver for virtio. It can be used with
3224 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. 3234 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
3225 3235
3236config VMXNET3
3237 tristate "VMware VMXNET3 ethernet driver"
3238 depends on PCI && X86 && INET
3239 help
3240 This driver supports VMware's vmxnet3 virtual ethernet NIC.
3241 To compile this driver as a module, choose M here: the
3242 module will be called vmxnet3.
3243
3226endif # NETDEVICES 3244endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ae8cd30f13d6..246323d7f161 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -2,6 +2,10 @@
2# Makefile for the Linux network (ethercard) device drivers. 2# Makefile for the Linux network (ethercard) device drivers.
3# 3#
4 4
5obj-$(CONFIG_MII) += mii.o
6obj-$(CONFIG_MDIO) += mdio.o
7obj-$(CONFIG_PHYLIB) += phy/
8
5obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o 9obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
6 10
7obj-$(CONFIG_E1000) += e1000/ 11obj-$(CONFIG_E1000) += e1000/
@@ -26,6 +30,7 @@ obj-$(CONFIG_TEHUTI) += tehuti.o
26obj-$(CONFIG_ENIC) += enic/ 30obj-$(CONFIG_ENIC) += enic/
27obj-$(CONFIG_JME) += jme.o 31obj-$(CONFIG_JME) += jme.o
28obj-$(CONFIG_BE2NET) += benet/ 32obj-$(CONFIG_BE2NET) += benet/
33obj-$(CONFIG_VMXNET3) += vmxnet3/
29 34
30gianfar_driver-objs := gianfar.o \ 35gianfar_driver-objs := gianfar.o \
31 gianfar_ethtool.o \ 36 gianfar_ethtool.o \
@@ -89,20 +94,18 @@ obj-$(CONFIG_SKY2) += sky2.o
89obj-$(CONFIG_SKFP) += skfp/ 94obj-$(CONFIG_SKFP) += skfp/
90obj-$(CONFIG_KS8842) += ks8842.o 95obj-$(CONFIG_KS8842) += ks8842.o
91obj-$(CONFIG_KS8851) += ks8851.o 96obj-$(CONFIG_KS8851) += ks8851.o
97obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
92obj-$(CONFIG_VIA_RHINE) += via-rhine.o 98obj-$(CONFIG_VIA_RHINE) += via-rhine.o
93obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o 99obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
94obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o 100obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
95obj-$(CONFIG_RIONET) += rionet.o 101obj-$(CONFIG_RIONET) += rionet.o
96obj-$(CONFIG_SH_ETH) += sh_eth.o 102obj-$(CONFIG_SH_ETH) += sh_eth.o
103obj-$(CONFIG_STMMAC_ETH) += stmmac/
97 104
98# 105#
99# end link order section 106# end link order section
100# 107#
101 108
102obj-$(CONFIG_MII) += mii.o
103obj-$(CONFIG_MDIO) += mdio.o
104obj-$(CONFIG_PHYLIB) += phy/
105
106obj-$(CONFIG_SUNDANCE) += sundance.o 109obj-$(CONFIG_SUNDANCE) += sundance.o
107obj-$(CONFIG_HAMACHI) += hamachi.o 110obj-$(CONFIG_HAMACHI) += hamachi.o
108obj-$(CONFIG_NET) += Space.o loopback.o 111obj-$(CONFIG_NET) += Space.o loopback.o
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 5f0b05c2d71f..d82a9a994753 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -1209,7 +1209,8 @@ static int __devinit ace_init(struct net_device *dev)
1209 memset(ap->info, 0, sizeof(struct ace_info)); 1209 memset(ap->info, 0, sizeof(struct ace_info));
1210 memset(ap->skb, 0, sizeof(struct ace_skb)); 1210 memset(ap->skb, 0, sizeof(struct ace_skb));
1211 1211
1212 if (ace_load_firmware(dev)) 1212 ecode = ace_load_firmware(dev);
1213 if (ecode)
1213 goto init_error; 1214 goto init_error;
1214 1215
1215 ap->fw_running = 0; 1216 ap->fw_running = 0;
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index be2c6cfe6e84..1372e9a99f5b 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -2296,7 +2296,7 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
2296 u32 ctrl; 2296 u32 ctrl;
2297 u32 mac_ctrl_data; 2297 u32 mac_ctrl_data;
2298 u32 master_ctrl_data; 2298 u32 master_ctrl_data;
2299 u32 wol_ctrl_data; 2299 u32 wol_ctrl_data = 0;
2300 u16 mii_bmsr_data; 2300 u16 mii_bmsr_data;
2301 u16 save_autoneg_advertised; 2301 u16 save_autoneg_advertised;
2302 u16 mii_intr_status_data; 2302 u16 mii_intr_status_data;
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index fdf5937233fc..ce6f1ac25df8 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -34,6 +34,7 @@
34 * 34 *
35 * 35 *
36 */ 36 */
37#include <linux/capability.h>
37#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
38#include <linux/module.h> 39#include <linux/module.h>
39#include <linux/kernel.h> 40#include <linux/kernel.h>
@@ -721,7 +722,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status)
721 ps->rx_errors++; 722 ps->rx_errors++;
722 if (status & RX_MISSED_FRAME) 723 if (status & RX_MISSED_FRAME)
723 ps->rx_missed_errors++; 724 ps->rx_missed_errors++;
724 if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR)) 725 if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
725 ps->rx_length_errors++; 726 ps->rx_length_errors++;
726 if (status & RX_CRC_ERROR) 727 if (status & RX_CRC_ERROR)
727 ps->rx_crc_errors++; 728 ps->rx_crc_errors++;
@@ -794,8 +795,6 @@ static int au1000_rx(struct net_device *dev)
794 printk("rx len error\n"); 795 printk("rx len error\n");
795 if (status & RX_U_CNTRL_FRAME) 796 if (status & RX_U_CNTRL_FRAME)
796 printk("rx u control frame\n"); 797 printk("rx u control frame\n");
797 if (status & RX_MISSED_FRAME)
798 printk("rx miss\n");
799 } 798 }
800 } 799 }
801 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); 800 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 09d270913c50..ba29dc319b34 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -90,7 +90,7 @@ static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
90 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) 90 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
91 break; 91 break;
92 udelay(1); 92 udelay(1);
93 } while (limit-- >= 0); 93 } while (limit-- > 0);
94 94
95 return (limit < 0) ? 1 : 0; 95 return (limit < 0) ? 1 : 0;
96} 96}
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 684c6fe24c8d..a80da0e14a52 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -258,6 +258,7 @@ struct be_adapter {
258 bool link_up; 258 bool link_up;
259 u32 port_num; 259 u32 port_num;
260 bool promiscuous; 260 bool promiscuous;
261 u32 cap;
261}; 262};
262 263
263extern const struct ethtool_ops be_ethtool_ops; 264extern const struct ethtool_ops be_ethtool_ops;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 3dd76c4170bf..28a0eda92680 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -243,15 +243,26 @@ static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
243 243
244int be_cmd_POST(struct be_adapter *adapter) 244int be_cmd_POST(struct be_adapter *adapter)
245{ 245{
246 u16 stage, error; 246 u16 stage;
247 int status, timeout = 0;
247 248
248 error = be_POST_stage_get(adapter, &stage); 249 do {
249 if (error || stage != POST_STAGE_ARMFW_RDY) { 250 status = be_POST_stage_get(adapter, &stage);
250 dev_err(&adapter->pdev->dev, "POST failed.\n"); 251 if (status) {
251 return -1; 252 dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
252 } 253 stage);
254 return -1;
255 } else if (stage != POST_STAGE_ARMFW_RDY) {
256 set_current_state(TASK_INTERRUPTIBLE);
257 schedule_timeout(2 * HZ);
258 timeout += 2;
259 } else {
260 return 0;
261 }
262 } while (timeout < 20);
253 263
254 return 0; 264 dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
265 return -1;
255} 266}
256 267
257static inline void *embedded_payload(struct be_mcc_wrb *wrb) 268static inline void *embedded_payload(struct be_mcc_wrb *wrb)
@@ -729,8 +740,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
729/* Create an rx filtering policy configuration on an i/f 740/* Create an rx filtering policy configuration on an i/f
730 * Uses mbox 741 * Uses mbox
731 */ 742 */
732int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac, 743int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
733 bool pmac_invalid, u32 *if_handle, u32 *pmac_id) 744 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
734{ 745{
735 struct be_mcc_wrb *wrb; 746 struct be_mcc_wrb *wrb;
736 struct be_cmd_req_if_create *req; 747 struct be_cmd_req_if_create *req;
@@ -746,8 +757,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
746 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 757 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
747 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req)); 758 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
748 759
749 req->capability_flags = cpu_to_le32(flags); 760 req->capability_flags = cpu_to_le32(cap_flags);
750 req->enable_flags = cpu_to_le32(flags); 761 req->enable_flags = cpu_to_le32(en_flags);
751 req->pmac_invalid = pmac_invalid; 762 req->pmac_invalid = pmac_invalid;
752 if (!pmac_invalid) 763 if (!pmac_invalid)
753 memcpy(req->mac_addr, mac, ETH_ALEN); 764 memcpy(req->mac_addr, mac, ETH_ALEN);
@@ -1068,7 +1079,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1068} 1079}
1069 1080
1070/* Uses mbox */ 1081/* Uses mbox */
1071int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num) 1082int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
1072{ 1083{
1073 struct be_mcc_wrb *wrb; 1084 struct be_mcc_wrb *wrb;
1074 struct be_cmd_req_query_fw_cfg *req; 1085 struct be_cmd_req_query_fw_cfg *req;
@@ -1088,6 +1099,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num)
1088 if (!status) { 1099 if (!status) {
1089 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 1100 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1090 *port_num = le32_to_cpu(resp->phys_port); 1101 *port_num = le32_to_cpu(resp->phys_port);
1102 *cap = le32_to_cpu(resp->function_cap);
1091 } 1103 }
1092 1104
1093 spin_unlock(&adapter->mbox_lock); 1105 spin_unlock(&adapter->mbox_lock);
@@ -1128,7 +1140,6 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1128 spin_lock_bh(&adapter->mcc_lock); 1140 spin_lock_bh(&adapter->mcc_lock);
1129 1141
1130 wrb = wrb_from_mccq(adapter); 1142 wrb = wrb_from_mccq(adapter);
1131 req = embedded_payload(wrb);
1132 sge = nonembedded_sgl(wrb); 1143 sge = nonembedded_sgl(wrb);
1133 1144
1134 be_wrb_hdr_prepare(wrb, cmd->size, false, 1); 1145 be_wrb_hdr_prepare(wrb, cmd->size, false, 1);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 93e432f3d926..49953787e41c 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -62,7 +62,7 @@ enum {
62 MCC_STATUS_QUEUE_FLUSHING = 0x4, 62 MCC_STATUS_QUEUE_FLUSHING = 0x4,
63/* The command is completing with a DMA error */ 63/* The command is completing with a DMA error */
64 MCC_STATUS_DMA_FAILED = 0x5, 64 MCC_STATUS_DMA_FAILED = 0x5,
65 MCC_STATUS_NOT_SUPPORTED = 0x66 65 MCC_STATUS_NOT_SUPPORTED = 66
66}; 66};
67 67
68#define CQE_STATUS_COMPL_MASK 0xFFFF 68#define CQE_STATUS_COMPL_MASK 0xFFFF
@@ -720,8 +720,9 @@ extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
720extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 720extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
721 u32 if_id, u32 *pmac_id); 721 u32 if_id, u32 *pmac_id);
722extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id); 722extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
723extern int be_cmd_if_create(struct be_adapter *adapter, u32 if_flags, u8 *mac, 723extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
724 bool pmac_invalid, u32 *if_handle, u32 *pmac_id); 724 u32 en_flags, u8 *mac, bool pmac_invalid,
725 u32 *if_handle, u32 *pmac_id);
725extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle); 726extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
726extern int be_cmd_eq_create(struct be_adapter *adapter, 727extern int be_cmd_eq_create(struct be_adapter *adapter,
727 struct be_queue_info *eq, int eq_delay); 728 struct be_queue_info *eq, int eq_delay);
@@ -760,7 +761,8 @@ extern int be_cmd_set_flow_control(struct be_adapter *adapter,
760 u32 tx_fc, u32 rx_fc); 761 u32 tx_fc, u32 rx_fc);
761extern int be_cmd_get_flow_control(struct be_adapter *adapter, 762extern int be_cmd_get_flow_control(struct be_adapter *adapter,
762 u32 *tx_fc, u32 *rx_fc); 763 u32 *tx_fc, u32 *rx_fc);
763extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num); 764extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
765 u32 *port_num, u32 *cap);
764extern int be_cmd_reset_function(struct be_adapter *adapter); 766extern int be_cmd_reset_function(struct be_adapter *adapter);
765extern int be_process_mcc(struct be_adapter *adapter); 767extern int be_process_mcc(struct be_adapter *adapter);
766extern int be_cmd_write_flashrom(struct be_adapter *adapter, 768extern int be_cmd_write_flashrom(struct be_adapter *adapter,
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 11445df3dbc0..cda5bf2fc50a 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -358,7 +358,7 @@ const struct ethtool_ops be_ethtool_ops = {
358 .get_rx_csum = be_get_rx_csum, 358 .get_rx_csum = be_get_rx_csum,
359 .set_rx_csum = be_set_rx_csum, 359 .set_rx_csum = be_set_rx_csum,
360 .get_tx_csum = ethtool_op_get_tx_csum, 360 .get_tx_csum = ethtool_op_get_tx_csum,
361 .set_tx_csum = ethtool_op_set_tx_csum, 361 .set_tx_csum = ethtool_op_set_tx_hw_csum,
362 .get_sg = ethtool_op_get_sg, 362 .get_sg = ethtool_op_get_sg,
363 .set_sg = ethtool_op_set_sg, 363 .set_sg = ethtool_op_set_sg,
364 .get_tso = ethtool_op_get_tso, 364 .get_tso = ethtool_op_get_tso,
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 409cf0595903..1f941f027718 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -197,7 +197,7 @@ void netdev_stats_update(struct be_adapter *adapter)
197 /* no space available in linux */ 197 /* no space available in linux */
198 dev_stats->tx_dropped = 0; 198 dev_stats->tx_dropped = 0;
199 199
200 dev_stats->multicast = port_stats->tx_multicastframes; 200 dev_stats->multicast = port_stats->rx_multicast_frames;
201 dev_stats->collisions = 0; 201 dev_stats->collisions = 0;
202 202
203 /* detailed tx_errors */ 203 /* detailed tx_errors */
@@ -747,9 +747,16 @@ static void be_rx_compl_process(struct be_adapter *adapter,
747 struct be_eth_rx_compl *rxcp) 747 struct be_eth_rx_compl *rxcp)
748{ 748{
749 struct sk_buff *skb; 749 struct sk_buff *skb;
750 u32 vtp, vid; 750 u32 vlanf, vid;
751 u8 vtm;
751 752
752 vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 753 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
754 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
755
756 /* vlanf could be wrongly set in some cards.
757 * ignore if vtm is not set */
758 if ((adapter->cap == 0x400) && !vtm)
759 vlanf = 0;
753 760
754 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); 761 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
755 if (!skb) { 762 if (!skb) {
@@ -772,7 +779,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
772 skb->protocol = eth_type_trans(skb, adapter->netdev); 779 skb->protocol = eth_type_trans(skb, adapter->netdev);
773 skb->dev = adapter->netdev; 780 skb->dev = adapter->netdev;
774 781
775 if (vtp) { 782 if (vlanf) {
776 if (!adapter->vlan_grp || adapter->num_vlans == 0) { 783 if (!adapter->vlan_grp || adapter->num_vlans == 0) {
777 kfree_skb(skb); 784 kfree_skb(skb);
778 return; 785 return;
@@ -797,11 +804,18 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
797 struct be_eq_obj *eq_obj = &adapter->rx_eq; 804 struct be_eq_obj *eq_obj = &adapter->rx_eq;
798 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; 805 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
799 u16 i, rxq_idx = 0, vid, j; 806 u16 i, rxq_idx = 0, vid, j;
807 u8 vtm;
800 808
801 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 809 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
802 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 810 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
803 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 811 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
804 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 812 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
813 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
814
815 /* vlanf could be wrongly set in some cards.
816 * ignore if vtm is not set */
817 if ((adapter->cap == 0x400) && !vtm)
818 vlanf = 0;
805 819
806 skb = napi_get_frags(&eq_obj->napi); 820 skb = napi_get_frags(&eq_obj->napi);
807 if (!skb) { 821 if (!skb) {
@@ -1606,19 +1620,22 @@ static int be_open(struct net_device *netdev)
1606static int be_setup(struct be_adapter *adapter) 1620static int be_setup(struct be_adapter *adapter)
1607{ 1621{
1608 struct net_device *netdev = adapter->netdev; 1622 struct net_device *netdev = adapter->netdev;
1609 u32 if_flags; 1623 u32 cap_flags, en_flags;
1610 int status; 1624 int status;
1611 1625
1612 if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS | 1626 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1613 BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED | 1627 BE_IF_FLAGS_MCAST_PROMISCUOUS |
1614 BE_IF_FLAGS_PASS_L3L4_ERRORS; 1628 BE_IF_FLAGS_PROMISCUOUS |
1615 status = be_cmd_if_create(adapter, if_flags, netdev->dev_addr, 1629 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1616 false/* pmac_invalid */, &adapter->if_handle, 1630 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1617 &adapter->pmac_id); 1631 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1632
1633 status = be_cmd_if_create(adapter, cap_flags, en_flags,
1634 netdev->dev_addr, false/* pmac_invalid */,
1635 &adapter->if_handle, &adapter->pmac_id);
1618 if (status != 0) 1636 if (status != 0)
1619 goto do_none; 1637 goto do_none;
1620 1638
1621
1622 status = be_tx_queues_create(adapter); 1639 status = be_tx_queues_create(adapter);
1623 if (status != 0) 1640 if (status != 0)
1624 goto if_destroy; 1641 goto if_destroy;
@@ -1885,8 +1902,8 @@ static void be_netdev_init(struct net_device *netdev)
1885 struct be_adapter *adapter = netdev_priv(netdev); 1902 struct be_adapter *adapter = netdev_priv(netdev);
1886 1903
1887 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | 1904 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
1888 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM | 1905 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
1889 NETIF_F_IPV6_CSUM | NETIF_F_GRO; 1906 NETIF_F_GRO;
1890 1907
1891 netdev->flags |= IFF_MULTICAST; 1908 netdev->flags |= IFF_MULTICAST;
1892 1909
@@ -2041,11 +2058,16 @@ static int be_hw_up(struct be_adapter *adapter)
2041 if (status) 2058 if (status)
2042 return status; 2059 return status;
2043 2060
2061 status = be_cmd_reset_function(adapter);
2062 if (status)
2063 return status;
2064
2044 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver); 2065 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2045 if (status) 2066 if (status)
2046 return status; 2067 return status;
2047 2068
2048 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num); 2069 status = be_cmd_query_fw_cfg(adapter,
2070 &adapter->port_num, &adapter->cap);
2049 return status; 2071 return status;
2050} 2072}
2051 2073
@@ -2093,10 +2115,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
2093 if (status) 2115 if (status)
2094 goto free_netdev; 2116 goto free_netdev;
2095 2117
2096 status = be_cmd_reset_function(adapter);
2097 if (status)
2098 goto ctrl_clean;
2099
2100 status = be_stats_init(adapter); 2118 status = be_stats_init(adapter);
2101 if (status) 2119 if (status)
2102 goto ctrl_clean; 2120 goto ctrl_clean;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 6c7f795d12de..a4d83409f205 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -361,9 +361,12 @@ struct l2_fhdr {
361#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28) 361#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28)
362 362
363#define BNX2_L2CTX_HOST_BDIDX 0x00000004 363#define BNX2_L2CTX_HOST_BDIDX 0x00000004
364#define BNX2_L2CTX_STATUSB_NUM_SHIFT 16 364#define BNX2_L2CTX_L5_STATUSB_NUM_SHIFT 16
365#define BNX2_L2CTX_STATUSB_NUM(sb_id) \ 365#define BNX2_L2CTX_L2_STATUSB_NUM_SHIFT 24
366 (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0) 366#define BNX2_L2CTX_L5_STATUSB_NUM(sb_id) \
367 (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L5_STATUSB_NUM_SHIFT) : 0)
368#define BNX2_L2CTX_L2_STATUSB_NUM(sb_id) \
369 (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT) : 0)
367#define BNX2_L2CTX_HOST_BSEQ 0x00000008 370#define BNX2_L2CTX_HOST_BSEQ 0x00000008
368#define BNX2_L2CTX_NX_BSEQ 0x0000000c 371#define BNX2_L2CTX_NX_BSEQ 0x0000000c
369#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010 372#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 69c5b15e22da..40fb5eefc72e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -691,7 +691,7 @@ static int bond_check_dev_link(struct bonding *bond,
691 struct net_device *slave_dev, int reporting) 691 struct net_device *slave_dev, int reporting)
692{ 692{
693 const struct net_device_ops *slave_ops = slave_dev->netdev_ops; 693 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
694 static int (*ioctl)(struct net_device *, struct ifreq *, int); 694 int (*ioctl)(struct net_device *, struct ifreq *, int);
695 struct ifreq ifr; 695 struct ifreq ifr;
696 struct mii_ioctl_data *mii; 696 struct mii_ioctl_data *mii;
697 697
@@ -3665,10 +3665,10 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb,
3665 3665
3666 if (skb->protocol == htons(ETH_P_IP)) { 3666 if (skb->protocol == htons(ETH_P_IP)) {
3667 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ 3667 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
3668 (data->h_dest[5] ^ bond_dev->dev_addr[5])) % count; 3668 (data->h_dest[5] ^ data->h_source[5])) % count;
3669 } 3669 }
3670 3670
3671 return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count; 3671 return (data->h_dest[5] ^ data->h_source[5]) % count;
3672} 3672}
3673 3673
3674/* 3674/*
@@ -3695,7 +3695,7 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb,
3695 3695
3696 } 3696 }
3697 3697
3698 return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count; 3698 return (data->h_dest[5] ^ data->h_source[5]) % count;
3699} 3699}
3700 3700
3701/* 3701/*
@@ -3706,7 +3706,7 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb,
3706{ 3706{
3707 struct ethhdr *data = (struct ethhdr *)skb->data; 3707 struct ethhdr *data = (struct ethhdr *)skb->data;
3708 3708
3709 return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count; 3709 return (data->h_dest[5] ^ data->h_source[5]) % count;
3710} 3710}
3711 3711
3712/*-------------------------- Device entry points ----------------------------*/ 3712/*-------------------------- Device entry points ----------------------------*/
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 6044e12ff9fc..8762a27a2a18 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -22,6 +22,7 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/sched.h>
25#include <linux/sysdev.h> 26#include <linux/sysdev.h>
26#include <linux/fs.h> 27#include <linux/fs.h>
27#include <linux/types.h> 28#include <linux/types.h>
@@ -1182,6 +1183,7 @@ static ssize_t bonding_store_primary(struct device *d,
1182 ": %s: Setting %s as primary slave.\n", 1183 ": %s: Setting %s as primary slave.\n",
1183 bond->dev->name, slave->dev->name); 1184 bond->dev->name, slave->dev->name);
1184 bond->primary_slave = slave; 1185 bond->primary_slave = slave;
1186 strcpy(bond->params.primary, slave->dev->name);
1185 bond_select_active_slave(bond); 1187 bond_select_active_slave(bond);
1186 goto out; 1188 goto out;
1187 } 1189 }
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 090074372462..df32c109b7ac 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -75,6 +75,13 @@ config CAN_EMS_PCI
75 CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche 75 CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche
76 (http://www.ems-wuensche.de). 76 (http://www.ems-wuensche.de).
77 77
78config CAN_EMS_USB
79 tristate "EMS CPC-USB/ARM7 CAN/USB interface"
80 depends on USB && CAN_DEV
81 ---help---
82 This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
83 from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
84
78config CAN_KVASER_PCI 85config CAN_KVASER_PCI
79 tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards" 86 tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
80 depends on PCI && CAN_SJA1000 87 depends on PCI && CAN_SJA1000
@@ -82,6 +89,12 @@ config CAN_KVASER_PCI
82 This driver is for the the PCIcanx and PCIcan cards (1, 2 or 89 This driver is for the the PCIcanx and PCIcan cards (1, 2 or
83 4 channel) from Kvaser (http://www.kvaser.com). 90 4 channel) from Kvaser (http://www.kvaser.com).
84 91
92config CAN_AT91
93 tristate "Atmel AT91 onchip CAN controller"
94 depends on CAN && CAN_DEV && ARCH_AT91SAM9263
95 ---help---
96 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263.
97
85config CAN_DEBUG_DEVICES 98config CAN_DEBUG_DEVICES
86 bool "CAN devices debugging messages" 99 bool "CAN devices debugging messages"
87 depends on CAN 100 depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 523a941b358b..0dea62721f2f 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -7,6 +7,9 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o
7obj-$(CONFIG_CAN_DEV) += can-dev.o 7obj-$(CONFIG_CAN_DEV) += can-dev.o
8can-dev-y := dev.o 8can-dev-y := dev.o
9 9
10obj-y += usb/
11
10obj-$(CONFIG_CAN_SJA1000) += sja1000/ 12obj-$(CONFIG_CAN_SJA1000) += sja1000/
13obj-$(CONFIG_CAN_AT91) += at91_can.o
11 14
12ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 15ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
new file mode 100644
index 000000000000..f67ae285a35a
--- /dev/null
+++ b/drivers/net/can/at91_can.c
@@ -0,0 +1,1186 @@
1/*
2 * at91_can.c - CAN network driver for AT91 SoC CAN controller
3 *
4 * (C) 2007 by Hans J. Koch <hjk@linutronix.de>
5 * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
6 *
7 * This software may be distributed under the terms of the GNU General
8 * Public License ("GPL") version 2 as distributed in the 'COPYING'
9 * file from the main directory of the linux kernel source.
10 *
11 * Send feedback to <socketcan-users@lists.berlios.de>
12 *
13 *
14 * Your platform definition file should specify something like:
15 *
16 * static struct at91_can_data ek_can_data = {
17 * transceiver_switch = sam9263ek_transceiver_switch,
18 * };
19 *
20 * at91_add_device_can(&ek_can_data);
21 *
22 */
23
24#include <linux/clk.h>
25#include <linux/errno.h>
26#include <linux/if_arp.h>
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/netdevice.h>
32#include <linux/platform_device.h>
33#include <linux/skbuff.h>
34#include <linux/spinlock.h>
35#include <linux/string.h>
36#include <linux/types.h>
37
38#include <linux/can.h>
39#include <linux/can/dev.h>
40#include <linux/can/error.h>
41
42#include <mach/board.h>
43
44#define DRV_NAME "at91_can"
45#define AT91_NAPI_WEIGHT 12
46
47/*
48 * RX/TX Mailbox split
49 * don't dare to touch
50 */
51#define AT91_MB_RX_NUM 12
52#define AT91_MB_TX_SHIFT 2
53
54#define AT91_MB_RX_FIRST 0
55#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
56
57#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1)
58#define AT91_MB_RX_SPLIT 8
59#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1)
60#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT))
61
62#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT)
63#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1)
64#define AT91_MB_TX_LAST (AT91_MB_TX_FIRST + AT91_MB_TX_NUM - 1)
65
66#define AT91_NEXT_PRIO_SHIFT (AT91_MB_TX_SHIFT)
67#define AT91_NEXT_PRIO_MASK (0xf << AT91_MB_TX_SHIFT)
68#define AT91_NEXT_MB_MASK (AT91_MB_TX_NUM - 1)
69#define AT91_NEXT_MASK ((AT91_MB_TX_NUM - 1) | AT91_NEXT_PRIO_MASK)
70
71/* Common registers */
72enum at91_reg {
73 AT91_MR = 0x000,
74 AT91_IER = 0x004,
75 AT91_IDR = 0x008,
76 AT91_IMR = 0x00C,
77 AT91_SR = 0x010,
78 AT91_BR = 0x014,
79 AT91_TIM = 0x018,
80 AT91_TIMESTP = 0x01C,
81 AT91_ECR = 0x020,
82 AT91_TCR = 0x024,
83 AT91_ACR = 0x028,
84};
85
86/* Mailbox registers (0 <= i <= 15) */
87#define AT91_MMR(i) (enum at91_reg)(0x200 + ((i) * 0x20))
88#define AT91_MAM(i) (enum at91_reg)(0x204 + ((i) * 0x20))
89#define AT91_MID(i) (enum at91_reg)(0x208 + ((i) * 0x20))
90#define AT91_MFID(i) (enum at91_reg)(0x20C + ((i) * 0x20))
91#define AT91_MSR(i) (enum at91_reg)(0x210 + ((i) * 0x20))
92#define AT91_MDL(i) (enum at91_reg)(0x214 + ((i) * 0x20))
93#define AT91_MDH(i) (enum at91_reg)(0x218 + ((i) * 0x20))
94#define AT91_MCR(i) (enum at91_reg)(0x21C + ((i) * 0x20))
95
96/* Register bits */
97#define AT91_MR_CANEN BIT(0)
98#define AT91_MR_LPM BIT(1)
99#define AT91_MR_ABM BIT(2)
100#define AT91_MR_OVL BIT(3)
101#define AT91_MR_TEOF BIT(4)
102#define AT91_MR_TTM BIT(5)
103#define AT91_MR_TIMFRZ BIT(6)
104#define AT91_MR_DRPT BIT(7)
105
106#define AT91_SR_RBSY BIT(29)
107
108#define AT91_MMR_PRIO_SHIFT (16)
109
110#define AT91_MID_MIDE BIT(29)
111
112#define AT91_MSR_MRTR BIT(20)
113#define AT91_MSR_MABT BIT(22)
114#define AT91_MSR_MRDY BIT(23)
115#define AT91_MSR_MMI BIT(24)
116
117#define AT91_MCR_MRTR BIT(20)
118#define AT91_MCR_MTCR BIT(23)
119
120/* Mailbox Modes */
121enum at91_mb_mode {
122 AT91_MB_MODE_DISABLED = 0,
123 AT91_MB_MODE_RX = 1,
124 AT91_MB_MODE_RX_OVRWR = 2,
125 AT91_MB_MODE_TX = 3,
126 AT91_MB_MODE_CONSUMER = 4,
127 AT91_MB_MODE_PRODUCER = 5,
128};
129
130/* Interrupt mask bits */
131#define AT91_IRQ_MB_RX ((1 << (AT91_MB_RX_LAST + 1)) \
132 - (1 << AT91_MB_RX_FIRST))
133#define AT91_IRQ_MB_TX ((1 << (AT91_MB_TX_LAST + 1)) \
134 - (1 << AT91_MB_TX_FIRST))
135#define AT91_IRQ_MB_ALL (AT91_IRQ_MB_RX | AT91_IRQ_MB_TX)
136
137#define AT91_IRQ_ERRA (1 << 16)
138#define AT91_IRQ_WARN (1 << 17)
139#define AT91_IRQ_ERRP (1 << 18)
140#define AT91_IRQ_BOFF (1 << 19)
141#define AT91_IRQ_SLEEP (1 << 20)
142#define AT91_IRQ_WAKEUP (1 << 21)
143#define AT91_IRQ_TOVF (1 << 22)
144#define AT91_IRQ_TSTP (1 << 23)
145#define AT91_IRQ_CERR (1 << 24)
146#define AT91_IRQ_SERR (1 << 25)
147#define AT91_IRQ_AERR (1 << 26)
148#define AT91_IRQ_FERR (1 << 27)
149#define AT91_IRQ_BERR (1 << 28)
150
151#define AT91_IRQ_ERR_ALL (0x1fff0000)
152#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \
153 AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR)
154#define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \
155 AT91_IRQ_ERRP | AT91_IRQ_BOFF)
156
157#define AT91_IRQ_ALL (0x1fffffff)
158
159struct at91_priv {
160 struct can_priv can; /* must be the first member! */
161 struct net_device *dev;
162 struct napi_struct napi;
163
164 void __iomem *reg_base;
165
166 u32 reg_sr;
167 unsigned int tx_next;
168 unsigned int tx_echo;
169 unsigned int rx_next;
170
171 struct clk *clk;
172 struct at91_can_data *pdata;
173};
174
175static struct can_bittiming_const at91_bittiming_const = {
176 .tseg1_min = 4,
177 .tseg1_max = 16,
178 .tseg2_min = 2,
179 .tseg2_max = 8,
180 .sjw_max = 4,
181 .brp_min = 2,
182 .brp_max = 128,
183 .brp_inc = 1,
184};
185
186static inline int get_tx_next_mb(const struct at91_priv *priv)
187{
188 return (priv->tx_next & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
189}
190
191static inline int get_tx_next_prio(const struct at91_priv *priv)
192{
193 return (priv->tx_next >> AT91_NEXT_PRIO_SHIFT) & 0xf;
194}
195
196static inline int get_tx_echo_mb(const struct at91_priv *priv)
197{
198 return (priv->tx_echo & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
199}
200
201static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
202{
203 return readl(priv->reg_base + reg);
204}
205
206static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
207 u32 value)
208{
209 writel(value, priv->reg_base + reg);
210}
211
212static inline void set_mb_mode_prio(const struct at91_priv *priv,
213 unsigned int mb, enum at91_mb_mode mode, int prio)
214{
215 at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16));
216}
217
218static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
219 enum at91_mb_mode mode)
220{
221 set_mb_mode_prio(priv, mb, mode, 0);
222}
223
224static struct sk_buff *alloc_can_skb(struct net_device *dev,
225 struct can_frame **cf)
226{
227 struct sk_buff *skb;
228
229 skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
230 if (unlikely(!skb))
231 return NULL;
232
233 skb->protocol = htons(ETH_P_CAN);
234 skb->ip_summed = CHECKSUM_UNNECESSARY;
235 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
236
237 return skb;
238}
239
240static struct sk_buff *alloc_can_err_skb(struct net_device *dev,
241 struct can_frame **cf)
242{
243 struct sk_buff *skb;
244
245 skb = alloc_can_skb(dev, cf);
246 if (unlikely(!skb))
247 return NULL;
248
249 memset(*cf, 0, sizeof(struct can_frame));
250 (*cf)->can_id = CAN_ERR_FLAG;
251 (*cf)->can_dlc = CAN_ERR_DLC;
252
253 return skb;
254}
255
256/*
257 * Swtich transceiver on or off
258 */
259static void at91_transceiver_switch(const struct at91_priv *priv, int on)
260{
261 if (priv->pdata && priv->pdata->transceiver_switch)
262 priv->pdata->transceiver_switch(on);
263}
264
265static void at91_setup_mailboxes(struct net_device *dev)
266{
267 struct at91_priv *priv = netdev_priv(dev);
268 unsigned int i;
269
270 /*
271 * The first 12 mailboxes are used as a reception FIFO. The
272 * last mailbox is configured with overwrite option. The
273 * overwrite flag indicates a FIFO overflow.
274 */
275 for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
276 set_mb_mode(priv, i, AT91_MB_MODE_RX);
277 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
278
279 /* The last 4 mailboxes are used for transmitting. */
280 for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
281 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
282
283 /* Reset tx and rx helper pointers */
284 priv->tx_next = priv->tx_echo = priv->rx_next = 0;
285}
286
287static int at91_set_bittiming(struct net_device *dev)
288{
289 const struct at91_priv *priv = netdev_priv(dev);
290 const struct can_bittiming *bt = &priv->can.bittiming;
291 u32 reg_br;
292
293 reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) |
294 ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
295 ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
296 ((bt->phase_seg2 - 1) << 0);
297
298 dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br);
299
300 at91_write(priv, AT91_BR, reg_br);
301
302 return 0;
303}
304
305static void at91_chip_start(struct net_device *dev)
306{
307 struct at91_priv *priv = netdev_priv(dev);
308 u32 reg_mr, reg_ier;
309
310 /* disable interrupts */
311 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
312
313 /* disable chip */
314 reg_mr = at91_read(priv, AT91_MR);
315 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
316
317 at91_setup_mailboxes(dev);
318 at91_transceiver_switch(priv, 1);
319
320 /* enable chip */
321 at91_write(priv, AT91_MR, AT91_MR_CANEN);
322
323 priv->can.state = CAN_STATE_ERROR_ACTIVE;
324
325 /* Enable interrupts */
326 reg_ier = AT91_IRQ_MB_RX | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
327 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
328 at91_write(priv, AT91_IER, reg_ier);
329}
330
331static void at91_chip_stop(struct net_device *dev, enum can_state state)
332{
333 struct at91_priv *priv = netdev_priv(dev);
334 u32 reg_mr;
335
336 /* disable interrupts */
337 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
338
339 reg_mr = at91_read(priv, AT91_MR);
340 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
341
342 at91_transceiver_switch(priv, 0);
343 priv->can.state = state;
344}
345
346/*
347 * theory of operation:
348 *
349 * According to the datasheet priority 0 is the highest priority, 15
350 * is the lowest. If two mailboxes have the same priority level the
351 * message of the mailbox with the lowest number is sent first.
352 *
353 * We use the first TX mailbox (AT91_MB_TX_FIRST) with prio 0, then
354 * the next mailbox with prio 0, and so on, until all mailboxes are
355 * used. Then we start from the beginning with mailbox
356 * AT91_MB_TX_FIRST, but with prio 1, mailbox AT91_MB_TX_FIRST + 1
357 * prio 1. When we reach the last mailbox with prio 15, we have to
358 * stop sending, waiting for all messages to be delivered, then start
359 * again with mailbox AT91_MB_TX_FIRST prio 0.
360 *
361 * We use the priv->tx_next as counter for the next transmission
362 * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits
363 * encode the mailbox number, the upper 4 bits the mailbox priority:
364 *
365 * priv->tx_next = (prio << AT91_NEXT_PRIO_SHIFT) ||
366 * (mb - AT91_MB_TX_FIRST);
367 *
368 */
369static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
370{
371 struct at91_priv *priv = netdev_priv(dev);
372 struct net_device_stats *stats = &dev->stats;
373 struct can_frame *cf = (struct can_frame *)skb->data;
374 unsigned int mb, prio;
375 u32 reg_mid, reg_mcr;
376
377 mb = get_tx_next_mb(priv);
378 prio = get_tx_next_prio(priv);
379
380 if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
381 netif_stop_queue(dev);
382
383 dev_err(dev->dev.parent,
384 "BUG! TX buffer full when queue awake!\n");
385 return NETDEV_TX_BUSY;
386 }
387
388 if (cf->can_id & CAN_EFF_FLAG)
389 reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
390 else
391 reg_mid = (cf->can_id & CAN_SFF_MASK) << 18;
392
393 reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
394 (cf->can_dlc << 16) | AT91_MCR_MTCR;
395
396 /* disable MB while writing ID (see datasheet) */
397 set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED);
398 at91_write(priv, AT91_MID(mb), reg_mid);
399 set_mb_mode_prio(priv, mb, AT91_MB_MODE_TX, prio);
400
401 at91_write(priv, AT91_MDL(mb), *(u32 *)(cf->data + 0));
402 at91_write(priv, AT91_MDH(mb), *(u32 *)(cf->data + 4));
403
404 /* This triggers transmission */
405 at91_write(priv, AT91_MCR(mb), reg_mcr);
406
407 stats->tx_bytes += cf->can_dlc;
408 dev->trans_start = jiffies;
409
410 /* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
411 can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST);
412
413 /*
414 * we have to stop the queue and deliver all messages in case
415 * of a prio+mb counter wrap around. This is the case if
416 * tx_next buffer prio and mailbox equals 0.
417 *
418 * also stop the queue if next buffer is still in use
419 * (== not ready)
420 */
421 priv->tx_next++;
422 if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) &
423 AT91_MSR_MRDY) ||
424 (priv->tx_next & AT91_NEXT_MASK) == 0)
425 netif_stop_queue(dev);
426
427 /* Enable interrupt for this mailbox */
428 at91_write(priv, AT91_IER, 1 << mb);
429
430 return NETDEV_TX_OK;
431}
432
433/**
434 * at91_activate_rx_low - activate lower rx mailboxes
435 * @priv: a91 context
436 *
437 * Reenables the lower mailboxes for reception of new CAN messages
438 */
439static inline void at91_activate_rx_low(const struct at91_priv *priv)
440{
441 u32 mask = AT91_MB_RX_LOW_MASK;
442 at91_write(priv, AT91_TCR, mask);
443}
444
445/**
446 * at91_activate_rx_mb - reactive single rx mailbox
447 * @priv: a91 context
448 * @mb: mailbox to reactivate
449 *
450 * Reenables given mailbox for reception of new CAN messages
451 */
452static inline void at91_activate_rx_mb(const struct at91_priv *priv,
453 unsigned int mb)
454{
455 u32 mask = 1 << mb;
456 at91_write(priv, AT91_TCR, mask);
457}
458
459/**
460 * at91_rx_overflow_err - send error frame due to rx overflow
461 * @dev: net device
462 */
463static void at91_rx_overflow_err(struct net_device *dev)
464{
465 struct net_device_stats *stats = &dev->stats;
466 struct sk_buff *skb;
467 struct can_frame *cf;
468
469 dev_dbg(dev->dev.parent, "RX buffer overflow\n");
470 stats->rx_over_errors++;
471 stats->rx_errors++;
472
473 skb = alloc_can_err_skb(dev, &cf);
474 if (unlikely(!skb))
475 return;
476
477 cf->can_id |= CAN_ERR_CRTL;
478 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
479 netif_receive_skb(skb);
480
481 stats->rx_packets++;
482 stats->rx_bytes += cf->can_dlc;
483}
484
485/**
486 * at91_read_mb - read CAN msg from mailbox (lowlevel impl)
487 * @dev: net device
488 * @mb: mailbox number to read from
489 * @cf: can frame where to store message
490 *
491 * Reads a CAN message from the given mailbox and stores data into
492 * given can frame. "mb" and "cf" must be valid.
493 */
494static void at91_read_mb(struct net_device *dev, unsigned int mb,
495 struct can_frame *cf)
496{
497 const struct at91_priv *priv = netdev_priv(dev);
498 u32 reg_msr, reg_mid;
499
500 reg_mid = at91_read(priv, AT91_MID(mb));
501 if (reg_mid & AT91_MID_MIDE)
502 cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
503 else
504 cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK;
505
506 reg_msr = at91_read(priv, AT91_MSR(mb));
507 if (reg_msr & AT91_MSR_MRTR)
508 cf->can_id |= CAN_RTR_FLAG;
509 cf->can_dlc = min_t(__u8, (reg_msr >> 16) & 0xf, 8);
510
511 *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
512 *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
513
514 if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
515 at91_rx_overflow_err(dev);
516}
517
518/**
519 * at91_read_msg - read CAN message from mailbox
520 * @dev: net device
521 * @mb: mail box to read from
522 *
523 * Reads a CAN message from given mailbox, and put into linux network
524 * RX queue, does all housekeeping chores (stats, ...)
525 */
526static void at91_read_msg(struct net_device *dev, unsigned int mb)
527{
528 struct net_device_stats *stats = &dev->stats;
529 struct can_frame *cf;
530 struct sk_buff *skb;
531
532 skb = alloc_can_skb(dev, &cf);
533 if (unlikely(!skb)) {
534 stats->rx_dropped++;
535 return;
536 }
537
538 at91_read_mb(dev, mb, cf);
539 netif_receive_skb(skb);
540
541 stats->rx_packets++;
542 stats->rx_bytes += cf->can_dlc;
543}
544
545/**
546 * at91_poll_rx - read multiple CAN messages from mailboxes
547 * @dev: net device
548 * @quota: max number of pkgs we're allowed to receive
549 *
550 * Theory of Operation:
551 *
552 * 12 of the 16 mailboxes on the chip are reserved for RX. we split
553 * them into 2 groups. The lower group holds 8 and upper 4 mailboxes.
554 *
555 * Like it or not, but the chip always saves a received CAN message
556 * into the first free mailbox it finds (starting with the
557 * lowest). This makes it very difficult to read the messages in the
558 * right order from the chip. This is how we work around that problem:
559 *
560 * The first message goes into mb nr. 0 and issues an interrupt. All
561 * rx ints are disabled in the interrupt handler and a napi poll is
562 * scheduled. We read the mailbox, but do _not_ reenable the mb (to
563 * receive another message).
564 *
565 * lower mbxs upper
566 * ______^______ __^__
567 * / \ / \
568 * +-+-+-+-+-+-+-+-++-+-+-+-+
569 * |x|x|x|x|x|x|x|x|| | | | |
570 * +-+-+-+-+-+-+-+-++-+-+-+-+
571 * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail
572 * 0 1 2 3 4 5 6 7 8 9 0 1 / box
573 *
574 * The variable priv->rx_next points to the next mailbox to read a
575 * message from. As long we're in the lower mailboxes we just read the
576 * mailbox but not reenable it.
577 *
578 * With completion of the last of the lower mailboxes, we reenable the
579 * whole first group, but continue to look for filled mailboxes in the
580 * upper mailboxes. Imagine the second group like overflow mailboxes,
581 * which takes CAN messages if the lower goup is full. While in the
582 * upper group we reenable the mailbox right after reading it. Giving
583 * the chip more room to store messages.
584 *
585 * After finishing we look again in the lower group if we've still
586 * quota.
587 *
588 */
589static int at91_poll_rx(struct net_device *dev, int quota)
590{
591 struct at91_priv *priv = netdev_priv(dev);
592 u32 reg_sr = at91_read(priv, AT91_SR);
593 const unsigned long *addr = (unsigned long *)&reg_sr;
594 unsigned int mb;
595 int received = 0;
596
597 if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
598 reg_sr & AT91_MB_RX_LOW_MASK)
599 dev_info(dev->dev.parent,
600 "order of incoming frames cannot be guaranteed\n");
601
602 again:
603 for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
604 mb < AT91_MB_RX_NUM && quota > 0;
605 reg_sr = at91_read(priv, AT91_SR),
606 mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) {
607 at91_read_msg(dev, mb);
608
609 /* reactivate mailboxes */
610 if (mb == AT91_MB_RX_LOW_LAST)
611 /* all lower mailboxed, if just finished it */
612 at91_activate_rx_low(priv);
613 else if (mb > AT91_MB_RX_LOW_LAST)
614 /* only the mailbox we read */
615 at91_activate_rx_mb(priv, mb);
616
617 received++;
618 quota--;
619 }
620
621 /* upper group completed, look again in lower */
622 if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
623 quota > 0 && mb >= AT91_MB_RX_NUM) {
624 priv->rx_next = 0;
625 goto again;
626 }
627
628 return received;
629}
630
631static void at91_poll_err_frame(struct net_device *dev,
632 struct can_frame *cf, u32 reg_sr)
633{
634 struct at91_priv *priv = netdev_priv(dev);
635
636 /* CRC error */
637 if (reg_sr & AT91_IRQ_CERR) {
638 dev_dbg(dev->dev.parent, "CERR irq\n");
639 dev->stats.rx_errors++;
640 priv->can.can_stats.bus_error++;
641 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
642 }
643
644 /* Stuffing Error */
645 if (reg_sr & AT91_IRQ_SERR) {
646 dev_dbg(dev->dev.parent, "SERR irq\n");
647 dev->stats.rx_errors++;
648 priv->can.can_stats.bus_error++;
649 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
650 cf->data[2] |= CAN_ERR_PROT_STUFF;
651 }
652
653 /* Acknowledgement Error */
654 if (reg_sr & AT91_IRQ_AERR) {
655 dev_dbg(dev->dev.parent, "AERR irq\n");
656 dev->stats.tx_errors++;
657 cf->can_id |= CAN_ERR_ACK;
658 }
659
660 /* Form error */
661 if (reg_sr & AT91_IRQ_FERR) {
662 dev_dbg(dev->dev.parent, "FERR irq\n");
663 dev->stats.rx_errors++;
664 priv->can.can_stats.bus_error++;
665 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
666 cf->data[2] |= CAN_ERR_PROT_FORM;
667 }
668
669 /* Bit Error */
670 if (reg_sr & AT91_IRQ_BERR) {
671 dev_dbg(dev->dev.parent, "BERR irq\n");
672 dev->stats.tx_errors++;
673 priv->can.can_stats.bus_error++;
674 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
675 cf->data[2] |= CAN_ERR_PROT_BIT;
676 }
677}
678
679static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
680{
681 struct sk_buff *skb;
682 struct can_frame *cf;
683
684 if (quota == 0)
685 return 0;
686
687 skb = alloc_can_err_skb(dev, &cf);
688 if (unlikely(!skb))
689 return 0;
690
691 at91_poll_err_frame(dev, cf, reg_sr);
692 netif_receive_skb(skb);
693
694 dev->last_rx = jiffies;
695 dev->stats.rx_packets++;
696 dev->stats.rx_bytes += cf->can_dlc;
697
698 return 1;
699}
700
701static int at91_poll(struct napi_struct *napi, int quota)
702{
703 struct net_device *dev = napi->dev;
704 const struct at91_priv *priv = netdev_priv(dev);
705 u32 reg_sr = at91_read(priv, AT91_SR);
706 int work_done = 0;
707
708 if (reg_sr & AT91_IRQ_MB_RX)
709 work_done += at91_poll_rx(dev, quota - work_done);
710
711 /*
712 * The error bits are clear on read,
713 * so use saved value from irq handler.
714 */
715 reg_sr |= priv->reg_sr;
716 if (reg_sr & AT91_IRQ_ERR_FRAME)
717 work_done += at91_poll_err(dev, quota - work_done, reg_sr);
718
719 if (work_done < quota) {
720 /* enable IRQs for frame errors and all mailboxes >= rx_next */
721 u32 reg_ier = AT91_IRQ_ERR_FRAME;
722 reg_ier |= AT91_IRQ_MB_RX & ~AT91_MB_RX_MASK(priv->rx_next);
723
724 napi_complete(napi);
725 at91_write(priv, AT91_IER, reg_ier);
726 }
727
728 return work_done;
729}
730
731/*
732 * theory of operation:
733 *
734 * priv->tx_echo holds the number of the oldest can_frame put for
735 * transmission into the hardware, but not yet ACKed by the CAN tx
736 * complete IRQ.
737 *
738 * We iterate from priv->tx_echo to priv->tx_next and check if the
739 * packet has been transmitted, echo it back to the CAN framework. If
740 * we discover a not yet transmitted package, stop looking for more.
741 *
742 */
743static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
744{
745 struct at91_priv *priv = netdev_priv(dev);
746 u32 reg_msr;
747 unsigned int mb;
748
749 /* masking of reg_sr not needed, already done by at91_irq */
750
751 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
752 mb = get_tx_echo_mb(priv);
753
754 /* no event in mailbox? */
755 if (!(reg_sr & (1 << mb)))
756 break;
757
758 /* Disable irq for this TX mailbox */
759 at91_write(priv, AT91_IDR, 1 << mb);
760
761 /*
762 * only echo if mailbox signals us a transfer
763 * complete (MSR_MRDY). Otherwise it's a tansfer
764 * abort. "can_bus_off()" takes care about the skbs
765 * parked in the echo queue.
766 */
767 reg_msr = at91_read(priv, AT91_MSR(mb));
768 if (likely(reg_msr & AT91_MSR_MRDY &&
769 ~reg_msr & AT91_MSR_MABT)) {
770 /* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
771 can_get_echo_skb(dev, mb - AT91_MB_TX_FIRST);
772 dev->stats.tx_packets++;
773 }
774 }
775
776 /*
777 * restart queue if we don't have a wrap around but restart if
778 * we get a TX int for the last can frame directly before a
779 * wrap around.
780 */
781 if ((priv->tx_next & AT91_NEXT_MASK) != 0 ||
782 (priv->tx_echo & AT91_NEXT_MASK) == 0)
783 netif_wake_queue(dev);
784}
785
786static void at91_irq_err_state(struct net_device *dev,
787 struct can_frame *cf, enum can_state new_state)
788{
789 struct at91_priv *priv = netdev_priv(dev);
790 u32 reg_idr, reg_ier, reg_ecr;
791 u8 tec, rec;
792
793 reg_ecr = at91_read(priv, AT91_ECR);
794 rec = reg_ecr & 0xff;
795 tec = reg_ecr >> 16;
796
797 switch (priv->can.state) {
798 case CAN_STATE_ERROR_ACTIVE:
799 /*
800 * from: ERROR_ACTIVE
801 * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
802 * => : there was a warning int
803 */
804 if (new_state >= CAN_STATE_ERROR_WARNING &&
805 new_state <= CAN_STATE_BUS_OFF) {
806 dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
807 priv->can.can_stats.error_warning++;
808
809 cf->can_id |= CAN_ERR_CRTL;
810 cf->data[1] = (tec > rec) ?
811 CAN_ERR_CRTL_TX_WARNING :
812 CAN_ERR_CRTL_RX_WARNING;
813 }
814 case CAN_STATE_ERROR_WARNING: /* fallthrough */
815 /*
816 * from: ERROR_ACTIVE, ERROR_WARNING
817 * to : ERROR_PASSIVE, BUS_OFF
818 * => : error passive int
819 */
820 if (new_state >= CAN_STATE_ERROR_PASSIVE &&
821 new_state <= CAN_STATE_BUS_OFF) {
822 dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
823 priv->can.can_stats.error_passive++;
824
825 cf->can_id |= CAN_ERR_CRTL;
826 cf->data[1] = (tec > rec) ?
827 CAN_ERR_CRTL_TX_PASSIVE :
828 CAN_ERR_CRTL_RX_PASSIVE;
829 }
830 break;
831 case CAN_STATE_BUS_OFF:
832 /*
833 * from: BUS_OFF
834 * to : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE
835 */
836 if (new_state <= CAN_STATE_ERROR_PASSIVE) {
837 cf->can_id |= CAN_ERR_RESTARTED;
838
839 dev_dbg(dev->dev.parent, "restarted\n");
840 priv->can.can_stats.restarts++;
841
842 netif_carrier_on(dev);
843 netif_wake_queue(dev);
844 }
845 break;
846 default:
847 break;
848 }
849
850
851 /* process state changes depending on the new state */
852 switch (new_state) {
853 case CAN_STATE_ERROR_ACTIVE:
854 /*
855 * actually we want to enable AT91_IRQ_WARN here, but
856 * it screws up the system under certain
857 * circumstances. so just enable AT91_IRQ_ERRP, thus
858 * the "fallthrough"
859 */
860 dev_dbg(dev->dev.parent, "Error Active\n");
861 cf->can_id |= CAN_ERR_PROT;
862 cf->data[2] = CAN_ERR_PROT_ACTIVE;
863 case CAN_STATE_ERROR_WARNING: /* fallthrough */
864 reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF;
865 reg_ier = AT91_IRQ_ERRP;
866 break;
867 case CAN_STATE_ERROR_PASSIVE:
868 reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP;
869 reg_ier = AT91_IRQ_BOFF;
870 break;
871 case CAN_STATE_BUS_OFF:
872 reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP |
873 AT91_IRQ_WARN | AT91_IRQ_BOFF;
874 reg_ier = 0;
875
876 cf->can_id |= CAN_ERR_BUSOFF;
877
878 dev_dbg(dev->dev.parent, "bus-off\n");
879 netif_carrier_off(dev);
880 priv->can.can_stats.bus_off++;
881
882 /* turn off chip, if restart is disabled */
883 if (!priv->can.restart_ms) {
884 at91_chip_stop(dev, CAN_STATE_BUS_OFF);
885 return;
886 }
887 break;
888 default:
889 break;
890 }
891
892 at91_write(priv, AT91_IDR, reg_idr);
893 at91_write(priv, AT91_IER, reg_ier);
894}
895
896static void at91_irq_err(struct net_device *dev)
897{
898 struct at91_priv *priv = netdev_priv(dev);
899 struct sk_buff *skb;
900 struct can_frame *cf;
901 enum can_state new_state;
902 u32 reg_sr;
903
904 reg_sr = at91_read(priv, AT91_SR);
905
906 /* we need to look at the unmasked reg_sr */
907 if (unlikely(reg_sr & AT91_IRQ_BOFF))
908 new_state = CAN_STATE_BUS_OFF;
909 else if (unlikely(reg_sr & AT91_IRQ_ERRP))
910 new_state = CAN_STATE_ERROR_PASSIVE;
911 else if (unlikely(reg_sr & AT91_IRQ_WARN))
912 new_state = CAN_STATE_ERROR_WARNING;
913 else if (likely(reg_sr & AT91_IRQ_ERRA))
914 new_state = CAN_STATE_ERROR_ACTIVE;
915 else {
916 dev_err(dev->dev.parent, "BUG! hardware in undefined state\n");
917 return;
918 }
919
920 /* state hasn't changed */
921 if (likely(new_state == priv->can.state))
922 return;
923
924 skb = alloc_can_err_skb(dev, &cf);
925 if (unlikely(!skb))
926 return;
927
928 at91_irq_err_state(dev, cf, new_state);
929 netif_rx(skb);
930
931 dev->last_rx = jiffies;
932 dev->stats.rx_packets++;
933 dev->stats.rx_bytes += cf->can_dlc;
934
935 priv->can.state = new_state;
936}
937
938/*
939 * interrupt handler
940 */
941static irqreturn_t at91_irq(int irq, void *dev_id)
942{
943 struct net_device *dev = dev_id;
944 struct at91_priv *priv = netdev_priv(dev);
945 irqreturn_t handled = IRQ_NONE;
946 u32 reg_sr, reg_imr;
947
948 reg_sr = at91_read(priv, AT91_SR);
949 reg_imr = at91_read(priv, AT91_IMR);
950
951 /* Ignore masked interrupts */
952 reg_sr &= reg_imr;
953 if (!reg_sr)
954 goto exit;
955
956 handled = IRQ_HANDLED;
957
958 /* Receive or error interrupt? -> napi */
959 if (reg_sr & (AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME)) {
960 /*
961 * The error bits are clear on read,
962 * save for later use.
963 */
964 priv->reg_sr = reg_sr;
965 at91_write(priv, AT91_IDR,
966 AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME);
967 napi_schedule(&priv->napi);
968 }
969
970 /* Transmission complete interrupt */
971 if (reg_sr & AT91_IRQ_MB_TX)
972 at91_irq_tx(dev, reg_sr);
973
974 at91_irq_err(dev);
975
976 exit:
977 return handled;
978}
979
980static int at91_open(struct net_device *dev)
981{
982 struct at91_priv *priv = netdev_priv(dev);
983 int err;
984
985 clk_enable(priv->clk);
986
987 /* check or determine and set bittime */
988 err = open_candev(dev);
989 if (err)
990 goto out;
991
992 /* register interrupt handler */
993 if (request_irq(dev->irq, at91_irq, IRQF_SHARED,
994 dev->name, dev)) {
995 err = -EAGAIN;
996 goto out_close;
997 }
998
999 /* start chip and queuing */
1000 at91_chip_start(dev);
1001 napi_enable(&priv->napi);
1002 netif_start_queue(dev);
1003
1004 return 0;
1005
1006 out_close:
1007 close_candev(dev);
1008 out:
1009 clk_disable(priv->clk);
1010
1011 return err;
1012}
1013
1014/*
1015 * stop CAN bus activity
1016 */
1017static int at91_close(struct net_device *dev)
1018{
1019 struct at91_priv *priv = netdev_priv(dev);
1020
1021 netif_stop_queue(dev);
1022 napi_disable(&priv->napi);
1023 at91_chip_stop(dev, CAN_STATE_STOPPED);
1024
1025 free_irq(dev->irq, dev);
1026 clk_disable(priv->clk);
1027
1028 close_candev(dev);
1029
1030 return 0;
1031}
1032
1033static int at91_set_mode(struct net_device *dev, enum can_mode mode)
1034{
1035 switch (mode) {
1036 case CAN_MODE_START:
1037 at91_chip_start(dev);
1038 netif_wake_queue(dev);
1039 break;
1040
1041 default:
1042 return -EOPNOTSUPP;
1043 }
1044
1045 return 0;
1046}
1047
1048static const struct net_device_ops at91_netdev_ops = {
1049 .ndo_open = at91_open,
1050 .ndo_stop = at91_close,
1051 .ndo_start_xmit = at91_start_xmit,
1052};
1053
1054static int __init at91_can_probe(struct platform_device *pdev)
1055{
1056 struct net_device *dev;
1057 struct at91_priv *priv;
1058 struct resource *res;
1059 struct clk *clk;
1060 void __iomem *addr;
1061 int err, irq;
1062
1063 clk = clk_get(&pdev->dev, "can_clk");
1064 if (IS_ERR(clk)) {
1065 dev_err(&pdev->dev, "no clock defined\n");
1066 err = -ENODEV;
1067 goto exit;
1068 }
1069
1070 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1071 irq = platform_get_irq(pdev, 0);
1072 if (!res || !irq) {
1073 err = -ENODEV;
1074 goto exit_put;
1075 }
1076
1077 if (!request_mem_region(res->start,
1078 resource_size(res),
1079 pdev->name)) {
1080 err = -EBUSY;
1081 goto exit_put;
1082 }
1083
1084 addr = ioremap_nocache(res->start, resource_size(res));
1085 if (!addr) {
1086 err = -ENOMEM;
1087 goto exit_release;
1088 }
1089
1090 dev = alloc_candev(sizeof(struct at91_priv));
1091 if (!dev) {
1092 err = -ENOMEM;
1093 goto exit_iounmap;
1094 }
1095
1096 dev->netdev_ops = &at91_netdev_ops;
1097 dev->irq = irq;
1098 dev->flags |= IFF_ECHO;
1099
1100 priv = netdev_priv(dev);
1101 priv->can.clock.freq = clk_get_rate(clk);
1102 priv->can.bittiming_const = &at91_bittiming_const;
1103 priv->can.do_set_bittiming = at91_set_bittiming;
1104 priv->can.do_set_mode = at91_set_mode;
1105 priv->reg_base = addr;
1106 priv->dev = dev;
1107 priv->clk = clk;
1108 priv->pdata = pdev->dev.platform_data;
1109
1110 netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
1111
1112 dev_set_drvdata(&pdev->dev, dev);
1113 SET_NETDEV_DEV(dev, &pdev->dev);
1114
1115 err = register_candev(dev);
1116 if (err) {
1117 dev_err(&pdev->dev, "registering netdev failed\n");
1118 goto exit_free;
1119 }
1120
1121 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
1122 priv->reg_base, dev->irq);
1123
1124 return 0;
1125
1126 exit_free:
1127 free_netdev(dev);
1128 exit_iounmap:
1129 iounmap(addr);
1130 exit_release:
1131 release_mem_region(res->start, resource_size(res));
1132 exit_put:
1133 clk_put(clk);
1134 exit:
1135 return err;
1136}
1137
1138static int __devexit at91_can_remove(struct platform_device *pdev)
1139{
1140 struct net_device *dev = platform_get_drvdata(pdev);
1141 struct at91_priv *priv = netdev_priv(dev);
1142 struct resource *res;
1143
1144 unregister_netdev(dev);
1145
1146 platform_set_drvdata(pdev, NULL);
1147
1148 free_netdev(dev);
1149
1150 iounmap(priv->reg_base);
1151
1152 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1153 release_mem_region(res->start, resource_size(res));
1154
1155 clk_put(priv->clk);
1156
1157 return 0;
1158}
1159
1160static struct platform_driver at91_can_driver = {
1161 .probe = at91_can_probe,
1162 .remove = __devexit_p(at91_can_remove),
1163 .driver = {
1164 .name = DRV_NAME,
1165 .owner = THIS_MODULE,
1166 },
1167};
1168
1169static int __init at91_can_module_init(void)
1170{
1171 printk(KERN_INFO "%s netdevice driver\n", DRV_NAME);
1172 return platform_driver_register(&at91_can_driver);
1173}
1174
1175static void __exit at91_can_module_exit(void)
1176{
1177 platform_driver_unregister(&at91_can_driver);
1178 printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
1179}
1180
1181module_init(at91_can_module_init);
1182module_exit(at91_can_module_exit);
1183
1184MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
1185MODULE_LICENSE("GPL v2");
1186MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver");
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 7d84b8ac9c1c..fd04789d3370 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -94,12 +94,14 @@ struct ems_pci_card {
94#define EMS_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK) 94#define EMS_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
95 95
96#define EMS_PCI_V1_BASE_BAR 1 96#define EMS_PCI_V1_BASE_BAR 1
97#define EMS_PCI_V1_MEM_SIZE 4096 97#define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */
98#define EMS_PCI_V2_BASE_BAR 2 98#define EMS_PCI_V2_BASE_BAR 2
99#define EMS_PCI_V2_MEM_SIZE 128 99#define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */
100#define EMS_PCI_CAN_BASE_OFFSET 0x400 /* offset where the controllers starts */ 100#define EMS_PCI_CAN_BASE_OFFSET 0x400 /* offset where the controllers starts */
101#define EMS_PCI_CAN_CTRL_SIZE 0x200 /* memory size for each controller */ 101#define EMS_PCI_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
102 102
103#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
104
103static struct pci_device_id ems_pci_tbl[] = { 105static struct pci_device_id ems_pci_tbl[] = {
104 /* CPC-PCI v1 */ 106 /* CPC-PCI v1 */
105 {PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,}, 107 {PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
@@ -224,7 +226,7 @@ static int __devinit ems_pci_add_card(struct pci_dev *pdev,
224 struct sja1000_priv *priv; 226 struct sja1000_priv *priv;
225 struct net_device *dev; 227 struct net_device *dev;
226 struct ems_pci_card *card; 228 struct ems_pci_card *card;
227 int max_chan, mem_size, base_bar; 229 int max_chan, conf_size, base_bar;
228 int err, i; 230 int err, i;
229 231
230 /* Enabling PCI device */ 232 /* Enabling PCI device */
@@ -251,22 +253,22 @@ static int __devinit ems_pci_add_card(struct pci_dev *pdev,
251 card->version = 2; /* CPC-PCI v2 */ 253 card->version = 2; /* CPC-PCI v2 */
252 max_chan = EMS_PCI_V2_MAX_CHAN; 254 max_chan = EMS_PCI_V2_MAX_CHAN;
253 base_bar = EMS_PCI_V2_BASE_BAR; 255 base_bar = EMS_PCI_V2_BASE_BAR;
254 mem_size = EMS_PCI_V2_MEM_SIZE; 256 conf_size = EMS_PCI_V2_CONF_SIZE;
255 } else { 257 } else {
256 card->version = 1; /* CPC-PCI v1 */ 258 card->version = 1; /* CPC-PCI v1 */
257 max_chan = EMS_PCI_V1_MAX_CHAN; 259 max_chan = EMS_PCI_V1_MAX_CHAN;
258 base_bar = EMS_PCI_V1_BASE_BAR; 260 base_bar = EMS_PCI_V1_BASE_BAR;
259 mem_size = EMS_PCI_V1_MEM_SIZE; 261 conf_size = EMS_PCI_V1_CONF_SIZE;
260 } 262 }
261 263
262 /* Remap configuration space and controller memory area */ 264 /* Remap configuration space and controller memory area */
263 card->conf_addr = pci_iomap(pdev, 0, mem_size); 265 card->conf_addr = pci_iomap(pdev, 0, conf_size);
264 if (card->conf_addr == NULL) { 266 if (card->conf_addr == NULL) {
265 err = -ENOMEM; 267 err = -ENOMEM;
266 goto failure_cleanup; 268 goto failure_cleanup;
267 } 269 }
268 270
269 card->base_addr = pci_iomap(pdev, base_bar, mem_size); 271 card->base_addr = pci_iomap(pdev, base_bar, EMS_PCI_BASE_SIZE);
270 if (card->base_addr == NULL) { 272 if (card->base_addr == NULL) {
271 err = -ENOMEM; 273 err = -ENOMEM;
272 goto failure_cleanup; 274 goto failure_cleanup;
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 3373560405ba..9dd076a626a5 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -213,6 +213,7 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = {
213 {.compatible = "nxp,sja1000"}, 213 {.compatible = "nxp,sja1000"},
214 {}, 214 {},
215}; 215};
216MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
216 217
217static struct of_platform_driver sja1000_ofp_driver = { 218static struct of_platform_driver sja1000_ofp_driver = {
218 .owner = THIS_MODULE, 219 .owner = THIS_MODULE,
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
new file mode 100644
index 000000000000..c3f75ba701b1
--- /dev/null
+++ b/drivers/net/can/usb/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the Linux Controller Area Network USB drivers.
3#
4
5obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
new file mode 100644
index 000000000000..9012e0abc626
--- /dev/null
+++ b/drivers/net/can/usb/ems_usb.c
@@ -0,0 +1,1155 @@
1/*
2 * CAN driver for EMS Dr. Thomas Wuensche CPC-USB/ARM7
3 *
4 * Copyright (C) 2004-2009 EMS Dr. Thomas Wuensche
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published
8 * by the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19#include <linux/init.h>
20#include <linux/signal.h>
21#include <linux/slab.h>
22#include <linux/module.h>
23#include <linux/netdevice.h>
24#include <linux/usb.h>
25
26#include <linux/can.h>
27#include <linux/can/dev.h>
28#include <linux/can/error.h>
29
30MODULE_AUTHOR("Sebastian Haas <haas@ems-wuensche.com>");
31MODULE_DESCRIPTION("CAN driver for EMS Dr. Thomas Wuensche CAN/USB interfaces");
32MODULE_LICENSE("GPL v2");
33
34/* Control-Values for CPC_Control() Command Subject Selection */
35#define CONTR_CAN_MESSAGE 0x04
36#define CONTR_CAN_STATE 0x0C
37#define CONTR_BUS_ERROR 0x1C
38
39/* Control Command Actions */
40#define CONTR_CONT_OFF 0
41#define CONTR_CONT_ON 1
42#define CONTR_ONCE 2
43
44/* Messages from CPC to PC */
45#define CPC_MSG_TYPE_CAN_FRAME 1 /* CAN data frame */
46#define CPC_MSG_TYPE_RTR_FRAME 8 /* CAN remote frame */
47#define CPC_MSG_TYPE_CAN_PARAMS 12 /* Actual CAN parameters */
48#define CPC_MSG_TYPE_CAN_STATE 14 /* CAN state message */
49#define CPC_MSG_TYPE_EXT_CAN_FRAME 16 /* Extended CAN data frame */
50#define CPC_MSG_TYPE_EXT_RTR_FRAME 17 /* Extended remote frame */
51#define CPC_MSG_TYPE_CONTROL 19 /* change interface behavior */
52#define CPC_MSG_TYPE_CONFIRM 20 /* command processed confirmation */
53#define CPC_MSG_TYPE_OVERRUN 21 /* overrun events */
54#define CPC_MSG_TYPE_CAN_FRAME_ERROR 23 /* detected bus errors */
55#define CPC_MSG_TYPE_ERR_COUNTER 25 /* RX/TX error counter */
56
57/* Messages from the PC to the CPC interface */
58#define CPC_CMD_TYPE_CAN_FRAME 1 /* CAN data frame */
59#define CPC_CMD_TYPE_CONTROL 3 /* control of interface behavior */
60#define CPC_CMD_TYPE_CAN_PARAMS 6 /* set CAN parameters */
61#define CPC_CMD_TYPE_RTR_FRAME 13 /* CAN remote frame */
62#define CPC_CMD_TYPE_CAN_STATE 14 /* CAN state message */
63#define CPC_CMD_TYPE_EXT_CAN_FRAME 15 /* Extended CAN data frame */
64#define CPC_CMD_TYPE_EXT_RTR_FRAME 16 /* Extended CAN remote frame */
65#define CPC_CMD_TYPE_CAN_EXIT 200 /* exit the CAN */
66
67#define CPC_CMD_TYPE_INQ_ERR_COUNTER 25 /* request the CAN error counters */
68#define CPC_CMD_TYPE_CLEAR_MSG_QUEUE 8 /* clear CPC_MSG queue */
69#define CPC_CMD_TYPE_CLEAR_CMD_QUEUE 28 /* clear CPC_CMD queue */
70
71#define CPC_CC_TYPE_SJA1000 2 /* Philips basic CAN controller */
72
73#define CPC_CAN_ECODE_ERRFRAME 0x01 /* Ecode type */
74
75/* Overrun types */
76#define CPC_OVR_EVENT_CAN 0x01
77#define CPC_OVR_EVENT_CANSTATE 0x02
78#define CPC_OVR_EVENT_BUSERROR 0x04
79
80/*
81 * If the CAN controller lost a message we indicate it with the highest bit
82 * set in the count field.
83 */
84#define CPC_OVR_HW 0x80
85
86/* Size of the "struct ems_cpc_msg" without the union */
87#define CPC_MSG_HEADER_LEN 11
88#define CPC_CAN_MSG_MIN_SIZE 5
89
90/* Define these values to match your devices */
91#define USB_CPCUSB_VENDOR_ID 0x12D6
92
93#define USB_CPCUSB_ARM7_PRODUCT_ID 0x0444
94
95/* Mode register NXP LPC2119/SJA1000 CAN Controller */
96#define SJA1000_MOD_NORMAL 0x00
97#define SJA1000_MOD_RM 0x01
98
99/* ECC register NXP LPC2119/SJA1000 CAN Controller */
100#define SJA1000_ECC_SEG 0x1F
101#define SJA1000_ECC_DIR 0x20
102#define SJA1000_ECC_ERR 0x06
103#define SJA1000_ECC_BIT 0x00
104#define SJA1000_ECC_FORM 0x40
105#define SJA1000_ECC_STUFF 0x80
106#define SJA1000_ECC_MASK 0xc0
107
108/* Status register content */
109#define SJA1000_SR_BS 0x80
110#define SJA1000_SR_ES 0x40
111
112#define SJA1000_DEFAULT_OUTPUT_CONTROL 0xDA
113
114/*
115 * The device actually uses a 16MHz clock to generate the CAN clock
116 * but it expects SJA1000 bit settings based on 8MHz (is internally
117 * converted).
118 */
119#define EMS_USB_ARM7_CLOCK 8000000
120
121/*
122 * CAN-Message representation in a CPC_MSG. Message object type is
123 * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
124 * CPC_MSG_TYPE_EXT_CAN_FRAME or CPC_MSG_TYPE_EXT_RTR_FRAME.
125 */
126struct cpc_can_msg {
127 u32 id;
128 u8 length;
129 u8 msg[8];
130};
131
132/* Representation of the CAN parameters for the SJA1000 controller */
133struct cpc_sja1000_params {
134 u8 mode;
135 u8 acc_code0;
136 u8 acc_code1;
137 u8 acc_code2;
138 u8 acc_code3;
139 u8 acc_mask0;
140 u8 acc_mask1;
141 u8 acc_mask2;
142 u8 acc_mask3;
143 u8 btr0;
144 u8 btr1;
145 u8 outp_contr;
146};
147
148/* CAN params message representation */
149struct cpc_can_params {
150 u8 cc_type;
151
152 /* Will support M16C CAN controller in the future */
153 union {
154 struct cpc_sja1000_params sja1000;
155 } cc_params;
156};
157
158/* Structure for confirmed message handling */
159struct cpc_confirm {
160 u8 error; /* error code */
161};
162
163/* Structure for overrun conditions */
164struct cpc_overrun {
165 u8 event;
166 u8 count;
167};
168
169/* SJA1000 CAN errors (compatible to NXP LPC2119) */
170struct cpc_sja1000_can_error {
171 u8 ecc;
172 u8 rxerr;
173 u8 txerr;
174};
175
176/* structure for CAN error conditions */
177struct cpc_can_error {
178 u8 ecode;
179
180 struct {
181 u8 cc_type;
182
183 /* Other controllers may also provide error code capture regs */
184 union {
185 struct cpc_sja1000_can_error sja1000;
186 } regs;
187 } cc;
188};
189
190/*
191 * Structure containing RX/TX error counter. This structure is used to request
192 * the values of the CAN controllers TX and RX error counter.
193 */
194struct cpc_can_err_counter {
195 u8 rx;
196 u8 tx;
197};
198
199/* Main message type used between library and application */
200struct __attribute__ ((packed)) ems_cpc_msg {
201 u8 type; /* type of message */
202 u8 length; /* length of data within union 'msg' */
203 u8 msgid; /* confirmation handle */
204 u32 ts_sec; /* timestamp in seconds */
205 u32 ts_nsec; /* timestamp in nano seconds */
206
207 union {
208 u8 generic[64];
209 struct cpc_can_msg can_msg;
210 struct cpc_can_params can_params;
211 struct cpc_confirm confirmation;
212 struct cpc_overrun overrun;
213 struct cpc_can_error error;
214 struct cpc_can_err_counter err_counter;
215 u8 can_state;
216 } msg;
217};
218
219/*
220 * Table of devices that work with this driver
221 * NOTE: This driver supports only CPC-USB/ARM7 (LPC2119) yet.
222 */
223static struct usb_device_id ems_usb_table[] = {
224 {USB_DEVICE(USB_CPCUSB_VENDOR_ID, USB_CPCUSB_ARM7_PRODUCT_ID)},
225 {} /* Terminating entry */
226};
227
228MODULE_DEVICE_TABLE(usb, ems_usb_table);
229
230#define RX_BUFFER_SIZE 64
231#define CPC_HEADER_SIZE 4
232#define INTR_IN_BUFFER_SIZE 4
233
234#define MAX_RX_URBS 10
235#define MAX_TX_URBS CAN_ECHO_SKB_MAX
236
237struct ems_usb;
238
239struct ems_tx_urb_context {
240 struct ems_usb *dev;
241
242 u32 echo_index;
243 u8 dlc;
244};
245
246struct ems_usb {
247 struct can_priv can; /* must be the first member */
248 int open_time;
249
250 struct sk_buff *echo_skb[MAX_TX_URBS];
251
252 struct usb_device *udev;
253 struct net_device *netdev;
254
255 atomic_t active_tx_urbs;
256 struct usb_anchor tx_submitted;
257 struct ems_tx_urb_context tx_contexts[MAX_TX_URBS];
258
259 struct usb_anchor rx_submitted;
260
261 struct urb *intr_urb;
262
263 u8 *tx_msg_buffer;
264
265 u8 *intr_in_buffer;
266 unsigned int free_slots; /* remember number of available slots */
267
268 struct ems_cpc_msg active_params; /* active controller parameters */
269};
270
271static void ems_usb_read_interrupt_callback(struct urb *urb)
272{
273 struct ems_usb *dev = urb->context;
274 struct net_device *netdev = dev->netdev;
275 int err;
276
277 if (!netif_device_present(netdev))
278 return;
279
280 switch (urb->status) {
281 case 0:
282 dev->free_slots = dev->intr_in_buffer[1];
283 break;
284
285 case -ECONNRESET: /* unlink */
286 case -ENOENT:
287 case -ESHUTDOWN:
288 return;
289
290 default:
291 dev_info(netdev->dev.parent, "Rx interrupt aborted %d\n",
292 urb->status);
293 break;
294 }
295
296 err = usb_submit_urb(urb, GFP_ATOMIC);
297
298 if (err == -ENODEV)
299 netif_device_detach(netdev);
300 else if (err)
301 dev_err(netdev->dev.parent,
302 "failed resubmitting intr urb: %d\n", err);
303
304 return;
305}
306
307static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
308{
309 struct can_frame *cf;
310 struct sk_buff *skb;
311 int i;
312 struct net_device_stats *stats = &dev->netdev->stats;
313
314 skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame));
315 if (skb == NULL)
316 return;
317
318 skb->protocol = htons(ETH_P_CAN);
319
320 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
321
322 cf->can_id = msg->msg.can_msg.id;
323 cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8);
324
325 if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME
326 || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME)
327 cf->can_id |= CAN_EFF_FLAG;
328
329 if (msg->type == CPC_MSG_TYPE_RTR_FRAME
330 || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) {
331 cf->can_id |= CAN_RTR_FLAG;
332 } else {
333 for (i = 0; i < cf->can_dlc; i++)
334 cf->data[i] = msg->msg.can_msg.msg[i];
335 }
336
337 netif_rx(skb);
338
339 stats->rx_packets++;
340 stats->rx_bytes += cf->can_dlc;
341}
342
343static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
344{
345 struct can_frame *cf;
346 struct sk_buff *skb;
347 struct net_device_stats *stats = &dev->netdev->stats;
348
349 skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame));
350 if (skb == NULL)
351 return;
352
353 skb->protocol = htons(ETH_P_CAN);
354
355 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
356 memset(cf, 0, sizeof(struct can_frame));
357
358 cf->can_id = CAN_ERR_FLAG;
359 cf->can_dlc = CAN_ERR_DLC;
360
361 if (msg->type == CPC_MSG_TYPE_CAN_STATE) {
362 u8 state = msg->msg.can_state;
363
364 if (state & SJA1000_SR_BS) {
365 dev->can.state = CAN_STATE_BUS_OFF;
366 cf->can_id |= CAN_ERR_BUSOFF;
367
368 can_bus_off(dev->netdev);
369 } else if (state & SJA1000_SR_ES) {
370 dev->can.state = CAN_STATE_ERROR_WARNING;
371 dev->can.can_stats.error_warning++;
372 } else {
373 dev->can.state = CAN_STATE_ERROR_ACTIVE;
374 dev->can.can_stats.error_passive++;
375 }
376 } else if (msg->type == CPC_MSG_TYPE_CAN_FRAME_ERROR) {
377 u8 ecc = msg->msg.error.cc.regs.sja1000.ecc;
378 u8 txerr = msg->msg.error.cc.regs.sja1000.txerr;
379 u8 rxerr = msg->msg.error.cc.regs.sja1000.rxerr;
380
381 /* bus error interrupt */
382 dev->can.can_stats.bus_error++;
383 stats->rx_errors++;
384
385 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
386
387 switch (ecc & SJA1000_ECC_MASK) {
388 case SJA1000_ECC_BIT:
389 cf->data[2] |= CAN_ERR_PROT_BIT;
390 break;
391 case SJA1000_ECC_FORM:
392 cf->data[2] |= CAN_ERR_PROT_FORM;
393 break;
394 case SJA1000_ECC_STUFF:
395 cf->data[2] |= CAN_ERR_PROT_STUFF;
396 break;
397 default:
398 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
399 cf->data[3] = ecc & SJA1000_ECC_SEG;
400 break;
401 }
402
403 /* Error occured during transmission? */
404 if ((ecc & SJA1000_ECC_DIR) == 0)
405 cf->data[2] |= CAN_ERR_PROT_TX;
406
407 if (dev->can.state == CAN_STATE_ERROR_WARNING ||
408 dev->can.state == CAN_STATE_ERROR_PASSIVE) {
409 cf->data[1] = (txerr > rxerr) ?
410 CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
411 }
412 } else if (msg->type == CPC_MSG_TYPE_OVERRUN) {
413 cf->can_id |= CAN_ERR_CRTL;
414 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
415
416 stats->rx_over_errors++;
417 stats->rx_errors++;
418 }
419
420 netif_rx(skb);
421
422 stats->rx_packets++;
423 stats->rx_bytes += cf->can_dlc;
424}
425
426/*
427 * callback for bulk IN urb
428 */
429static void ems_usb_read_bulk_callback(struct urb *urb)
430{
431 struct ems_usb *dev = urb->context;
432 struct net_device *netdev;
433 int retval;
434
435 netdev = dev->netdev;
436
437 if (!netif_device_present(netdev))
438 return;
439
440 switch (urb->status) {
441 case 0: /* success */
442 break;
443
444 case -ENOENT:
445 return;
446
447 default:
448 dev_info(netdev->dev.parent, "Rx URB aborted (%d)\n",
449 urb->status);
450 goto resubmit_urb;
451 }
452
453 if (urb->actual_length > CPC_HEADER_SIZE) {
454 struct ems_cpc_msg *msg;
455 u8 *ibuf = urb->transfer_buffer;
456 u8 msg_count, again, start;
457
458 msg_count = ibuf[0] & ~0x80;
459 again = ibuf[0] & 0x80;
460
461 start = CPC_HEADER_SIZE;
462
463 while (msg_count) {
464 msg = (struct ems_cpc_msg *)&ibuf[start];
465
466 switch (msg->type) {
467 case CPC_MSG_TYPE_CAN_STATE:
468 /* Process CAN state changes */
469 ems_usb_rx_err(dev, msg);
470 break;
471
472 case CPC_MSG_TYPE_CAN_FRAME:
473 case CPC_MSG_TYPE_EXT_CAN_FRAME:
474 case CPC_MSG_TYPE_RTR_FRAME:
475 case CPC_MSG_TYPE_EXT_RTR_FRAME:
476 ems_usb_rx_can_msg(dev, msg);
477 break;
478
479 case CPC_MSG_TYPE_CAN_FRAME_ERROR:
480 /* Process errorframe */
481 ems_usb_rx_err(dev, msg);
482 break;
483
484 case CPC_MSG_TYPE_OVERRUN:
485 /* Message lost while receiving */
486 ems_usb_rx_err(dev, msg);
487 break;
488 }
489
490 start += CPC_MSG_HEADER_LEN + msg->length;
491 msg_count--;
492
493 if (start > urb->transfer_buffer_length) {
494 dev_err(netdev->dev.parent, "format error\n");
495 break;
496 }
497 }
498 }
499
500resubmit_urb:
501 usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2),
502 urb->transfer_buffer, RX_BUFFER_SIZE,
503 ems_usb_read_bulk_callback, dev);
504
505 retval = usb_submit_urb(urb, GFP_ATOMIC);
506
507 if (retval == -ENODEV)
508 netif_device_detach(netdev);
509 else if (retval)
510 dev_err(netdev->dev.parent,
511 "failed resubmitting read bulk urb: %d\n", retval);
512
513 return;
514}
515
516/*
517 * callback for bulk IN urb
518 */
519static void ems_usb_write_bulk_callback(struct urb *urb)
520{
521 struct ems_tx_urb_context *context = urb->context;
522 struct ems_usb *dev;
523 struct net_device *netdev;
524
525 BUG_ON(!context);
526
527 dev = context->dev;
528 netdev = dev->netdev;
529
530 /* free up our allocated buffer */
531 usb_buffer_free(urb->dev, urb->transfer_buffer_length,
532 urb->transfer_buffer, urb->transfer_dma);
533
534 atomic_dec(&dev->active_tx_urbs);
535
536 if (!netif_device_present(netdev))
537 return;
538
539 if (urb->status)
540 dev_info(netdev->dev.parent, "Tx URB aborted (%d)\n",
541 urb->status);
542
543 netdev->trans_start = jiffies;
544
545 /* transmission complete interrupt */
546 netdev->stats.tx_packets++;
547 netdev->stats.tx_bytes += context->dlc;
548
549 can_get_echo_skb(netdev, context->echo_index);
550
551 /* Release context */
552 context->echo_index = MAX_TX_URBS;
553
554 if (netif_queue_stopped(netdev))
555 netif_wake_queue(netdev);
556}
557
558/*
559 * Send the given CPC command synchronously
560 */
561static int ems_usb_command_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
562{
563 int actual_length;
564
565 /* Copy payload */
566 memcpy(&dev->tx_msg_buffer[CPC_HEADER_SIZE], msg,
567 msg->length + CPC_MSG_HEADER_LEN);
568
569 /* Clear header */
570 memset(&dev->tx_msg_buffer[0], 0, CPC_HEADER_SIZE);
571
572 return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
573 &dev->tx_msg_buffer[0],
574 msg->length + CPC_MSG_HEADER_LEN + CPC_HEADER_SIZE,
575 &actual_length, 1000);
576}
577
578/*
579 * Change CAN controllers' mode register
580 */
581static int ems_usb_write_mode(struct ems_usb *dev, u8 mode)
582{
583 dev->active_params.msg.can_params.cc_params.sja1000.mode = mode;
584
585 return ems_usb_command_msg(dev, &dev->active_params);
586}
587
588/*
589 * Send a CPC_Control command to change behaviour when interface receives a CAN
590 * message, bus error or CAN state changed notifications.
591 */
592static int ems_usb_control_cmd(struct ems_usb *dev, u8 val)
593{
594 struct ems_cpc_msg cmd;
595
596 cmd.type = CPC_CMD_TYPE_CONTROL;
597 cmd.length = CPC_MSG_HEADER_LEN + 1;
598
599 cmd.msgid = 0;
600
601 cmd.msg.generic[0] = val;
602
603 return ems_usb_command_msg(dev, &cmd);
604}
605
606/*
607 * Start interface
608 */
609static int ems_usb_start(struct ems_usb *dev)
610{
611 struct net_device *netdev = dev->netdev;
612 int err, i;
613
614 dev->intr_in_buffer[0] = 0;
615 dev->free_slots = 15; /* initial size */
616
617 for (i = 0; i < MAX_RX_URBS; i++) {
618 struct urb *urb = NULL;
619 u8 *buf = NULL;
620
621 /* create a URB, and a buffer for it */
622 urb = usb_alloc_urb(0, GFP_KERNEL);
623 if (!urb) {
624 dev_err(netdev->dev.parent,
625 "No memory left for URBs\n");
626 return -ENOMEM;
627 }
628
629 buf = usb_buffer_alloc(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
630 &urb->transfer_dma);
631 if (!buf) {
632 dev_err(netdev->dev.parent,
633 "No memory left for USB buffer\n");
634 usb_free_urb(urb);
635 return -ENOMEM;
636 }
637
638 usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2),
639 buf, RX_BUFFER_SIZE,
640 ems_usb_read_bulk_callback, dev);
641 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
642 usb_anchor_urb(urb, &dev->rx_submitted);
643
644 err = usb_submit_urb(urb, GFP_KERNEL);
645 if (err) {
646 if (err == -ENODEV)
647 netif_device_detach(dev->netdev);
648
649 usb_unanchor_urb(urb);
650 usb_buffer_free(dev->udev, RX_BUFFER_SIZE, buf,
651 urb->transfer_dma);
652 break;
653 }
654
655 /* Drop reference, USB core will take care of freeing it */
656 usb_free_urb(urb);
657 }
658
659 /* Did we submit any URBs */
660 if (i == 0) {
661 dev_warn(netdev->dev.parent, "couldn't setup read URBs\n");
662 return err;
663 }
664
665 /* Warn if we've couldn't transmit all the URBs */
666 if (i < MAX_RX_URBS)
667 dev_warn(netdev->dev.parent, "rx performance may be slow\n");
668
669 /* Setup and start interrupt URB */
670 usb_fill_int_urb(dev->intr_urb, dev->udev,
671 usb_rcvintpipe(dev->udev, 1),
672 dev->intr_in_buffer,
673 INTR_IN_BUFFER_SIZE,
674 ems_usb_read_interrupt_callback, dev, 1);
675
676 err = usb_submit_urb(dev->intr_urb, GFP_KERNEL);
677 if (err) {
678 if (err == -ENODEV)
679 netif_device_detach(dev->netdev);
680
681 dev_warn(netdev->dev.parent, "intr URB submit failed: %d\n",
682 err);
683
684 return err;
685 }
686
687 /* CPC-USB will transfer received message to host */
688 err = ems_usb_control_cmd(dev, CONTR_CAN_MESSAGE | CONTR_CONT_ON);
689 if (err)
690 goto failed;
691
692 /* CPC-USB will transfer CAN state changes to host */
693 err = ems_usb_control_cmd(dev, CONTR_CAN_STATE | CONTR_CONT_ON);
694 if (err)
695 goto failed;
696
697 /* CPC-USB will transfer bus errors to host */
698 err = ems_usb_control_cmd(dev, CONTR_BUS_ERROR | CONTR_CONT_ON);
699 if (err)
700 goto failed;
701
702 err = ems_usb_write_mode(dev, SJA1000_MOD_NORMAL);
703 if (err)
704 goto failed;
705
706 dev->can.state = CAN_STATE_ERROR_ACTIVE;
707
708 return 0;
709
710failed:
711 if (err == -ENODEV)
712 netif_device_detach(dev->netdev);
713
714 dev_warn(netdev->dev.parent, "couldn't submit control: %d\n", err);
715
716 return err;
717}
718
719static void unlink_all_urbs(struct ems_usb *dev)
720{
721 int i;
722
723 usb_unlink_urb(dev->intr_urb);
724
725 usb_kill_anchored_urbs(&dev->rx_submitted);
726
727 usb_kill_anchored_urbs(&dev->tx_submitted);
728 atomic_set(&dev->active_tx_urbs, 0);
729
730 for (i = 0; i < MAX_TX_URBS; i++)
731 dev->tx_contexts[i].echo_index = MAX_TX_URBS;
732}
733
734static int ems_usb_open(struct net_device *netdev)
735{
736 struct ems_usb *dev = netdev_priv(netdev);
737 int err;
738
739 err = ems_usb_write_mode(dev, SJA1000_MOD_RM);
740 if (err)
741 return err;
742
743 /* common open */
744 err = open_candev(netdev);
745 if (err)
746 return err;
747
748 /* finally start device */
749 err = ems_usb_start(dev);
750 if (err) {
751 if (err == -ENODEV)
752 netif_device_detach(dev->netdev);
753
754 dev_warn(netdev->dev.parent, "couldn't start device: %d\n",
755 err);
756
757 close_candev(netdev);
758
759 return err;
760 }
761
762 dev->open_time = jiffies;
763
764 netif_start_queue(netdev);
765
766 return 0;
767}
768
769static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *netdev)
770{
771 struct ems_usb *dev = netdev_priv(netdev);
772 struct ems_tx_urb_context *context = NULL;
773 struct net_device_stats *stats = &netdev->stats;
774 struct can_frame *cf = (struct can_frame *)skb->data;
775 struct ems_cpc_msg *msg;
776 struct urb *urb;
777 u8 *buf;
778 int i, err;
779 size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
780 + sizeof(struct cpc_can_msg);
781
782 /* create a URB, and a buffer for it, and copy the data to the URB */
783 urb = usb_alloc_urb(0, GFP_ATOMIC);
784 if (!urb) {
785 dev_err(netdev->dev.parent, "No memory left for URBs\n");
786 goto nomem;
787 }
788
789 buf = usb_buffer_alloc(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma);
790 if (!buf) {
791 dev_err(netdev->dev.parent, "No memory left for USB buffer\n");
792 usb_free_urb(urb);
793 goto nomem;
794 }
795
796 msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE];
797
798 msg->msg.can_msg.id = cf->can_id & CAN_ERR_MASK;
799 msg->msg.can_msg.length = cf->can_dlc;
800
801 if (cf->can_id & CAN_RTR_FLAG) {
802 msg->type = cf->can_id & CAN_EFF_FLAG ?
803 CPC_CMD_TYPE_EXT_RTR_FRAME : CPC_CMD_TYPE_RTR_FRAME;
804
805 msg->length = CPC_CAN_MSG_MIN_SIZE;
806 } else {
807 msg->type = cf->can_id & CAN_EFF_FLAG ?
808 CPC_CMD_TYPE_EXT_CAN_FRAME : CPC_CMD_TYPE_CAN_FRAME;
809
810 for (i = 0; i < cf->can_dlc; i++)
811 msg->msg.can_msg.msg[i] = cf->data[i];
812
813 msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc;
814 }
815
816 for (i = 0; i < MAX_TX_URBS; i++) {
817 if (dev->tx_contexts[i].echo_index == MAX_TX_URBS) {
818 context = &dev->tx_contexts[i];
819 break;
820 }
821 }
822
823 /*
824 * May never happen! When this happens we'd more URBs in flight as
825 * allowed (MAX_TX_URBS).
826 */
827 if (!context) {
828 usb_unanchor_urb(urb);
829 usb_buffer_free(dev->udev, size, buf, urb->transfer_dma);
830
831 dev_warn(netdev->dev.parent, "couldn't find free context\n");
832
833 return NETDEV_TX_BUSY;
834 }
835
836 context->dev = dev;
837 context->echo_index = i;
838 context->dlc = cf->can_dlc;
839
840 usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
841 size, ems_usb_write_bulk_callback, context);
842 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
843 usb_anchor_urb(urb, &dev->tx_submitted);
844
845 can_put_echo_skb(skb, netdev, context->echo_index);
846
847 atomic_inc(&dev->active_tx_urbs);
848
849 err = usb_submit_urb(urb, GFP_ATOMIC);
850 if (unlikely(err)) {
851 can_free_echo_skb(netdev, context->echo_index);
852
853 usb_unanchor_urb(urb);
854 usb_buffer_free(dev->udev, size, buf, urb->transfer_dma);
855 dev_kfree_skb(skb);
856
857 atomic_dec(&dev->active_tx_urbs);
858
859 if (err == -ENODEV) {
860 netif_device_detach(netdev);
861 } else {
862 dev_warn(netdev->dev.parent, "failed tx_urb %d\n", err);
863
864 stats->tx_dropped++;
865 }
866 } else {
867 netdev->trans_start = jiffies;
868
869 /* Slow down tx path */
870 if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
871 dev->free_slots < 5) {
872 netif_stop_queue(netdev);
873 }
874 }
875
876 /*
877 * Release our reference to this URB, the USB core will eventually free
878 * it entirely.
879 */
880 usb_free_urb(urb);
881
882 return NETDEV_TX_OK;
883
884nomem:
885 if (skb)
886 dev_kfree_skb(skb);
887
888 stats->tx_dropped++;
889
890 return NETDEV_TX_OK;
891}
892
893static int ems_usb_close(struct net_device *netdev)
894{
895 struct ems_usb *dev = netdev_priv(netdev);
896
897 /* Stop polling */
898 unlink_all_urbs(dev);
899
900 netif_stop_queue(netdev);
901
902 /* Set CAN controller to reset mode */
903 if (ems_usb_write_mode(dev, SJA1000_MOD_RM))
904 dev_warn(netdev->dev.parent, "couldn't stop device");
905
906 close_candev(netdev);
907
908 dev->open_time = 0;
909
910 return 0;
911}
912
913static const struct net_device_ops ems_usb_netdev_ops = {
914 .ndo_open = ems_usb_open,
915 .ndo_stop = ems_usb_close,
916 .ndo_start_xmit = ems_usb_start_xmit,
917};
918
919static struct can_bittiming_const ems_usb_bittiming_const = {
920 .name = "ems_usb",
921 .tseg1_min = 1,
922 .tseg1_max = 16,
923 .tseg2_min = 1,
924 .tseg2_max = 8,
925 .sjw_max = 4,
926 .brp_min = 1,
927 .brp_max = 64,
928 .brp_inc = 1,
929};
930
931static int ems_usb_set_mode(struct net_device *netdev, enum can_mode mode)
932{
933 struct ems_usb *dev = netdev_priv(netdev);
934
935 if (!dev->open_time)
936 return -EINVAL;
937
938 switch (mode) {
939 case CAN_MODE_START:
940 if (ems_usb_write_mode(dev, SJA1000_MOD_NORMAL))
941 dev_warn(netdev->dev.parent, "couldn't start device");
942
943 if (netif_queue_stopped(netdev))
944 netif_wake_queue(netdev);
945 break;
946
947 default:
948 return -EOPNOTSUPP;
949 }
950
951 return 0;
952}
953
954static int ems_usb_set_bittiming(struct net_device *netdev)
955{
956 struct ems_usb *dev = netdev_priv(netdev);
957 struct can_bittiming *bt = &dev->can.bittiming;
958 u8 btr0, btr1;
959
960 btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
961 btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
962 (((bt->phase_seg2 - 1) & 0x7) << 4);
963 if (dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
964 btr1 |= 0x80;
965
966 dev_info(netdev->dev.parent, "setting BTR0=0x%02x BTR1=0x%02x\n",
967 btr0, btr1);
968
969 dev->active_params.msg.can_params.cc_params.sja1000.btr0 = btr0;
970 dev->active_params.msg.can_params.cc_params.sja1000.btr1 = btr1;
971
972 return ems_usb_command_msg(dev, &dev->active_params);
973}
974
975static void init_params_sja1000(struct ems_cpc_msg *msg)
976{
977 struct cpc_sja1000_params *sja1000 =
978 &msg->msg.can_params.cc_params.sja1000;
979
980 msg->type = CPC_CMD_TYPE_CAN_PARAMS;
981 msg->length = sizeof(struct cpc_can_params);
982 msg->msgid = 0;
983
984 msg->msg.can_params.cc_type = CPC_CC_TYPE_SJA1000;
985
986 /* Acceptance filter open */
987 sja1000->acc_code0 = 0x00;
988 sja1000->acc_code1 = 0x00;
989 sja1000->acc_code2 = 0x00;
990 sja1000->acc_code3 = 0x00;
991
992 /* Acceptance filter open */
993 sja1000->acc_mask0 = 0xFF;
994 sja1000->acc_mask1 = 0xFF;
995 sja1000->acc_mask2 = 0xFF;
996 sja1000->acc_mask3 = 0xFF;
997
998 sja1000->btr0 = 0;
999 sja1000->btr1 = 0;
1000
1001 sja1000->outp_contr = SJA1000_DEFAULT_OUTPUT_CONTROL;
1002 sja1000->mode = SJA1000_MOD_RM;
1003}
1004
1005/*
1006 * probe function for new CPC-USB devices
1007 */
1008static int ems_usb_probe(struct usb_interface *intf,
1009 const struct usb_device_id *id)
1010{
1011 struct net_device *netdev;
1012 struct ems_usb *dev;
1013 int i, err = -ENOMEM;
1014
1015 netdev = alloc_candev(sizeof(struct ems_usb));
1016 if (!netdev) {
1017 dev_err(netdev->dev.parent, "Couldn't alloc candev\n");
1018 return -ENOMEM;
1019 }
1020
1021 dev = netdev_priv(netdev);
1022
1023 dev->udev = interface_to_usbdev(intf);
1024 dev->netdev = netdev;
1025
1026 dev->can.state = CAN_STATE_STOPPED;
1027 dev->can.clock.freq = EMS_USB_ARM7_CLOCK;
1028 dev->can.bittiming_const = &ems_usb_bittiming_const;
1029 dev->can.do_set_bittiming = ems_usb_set_bittiming;
1030 dev->can.do_set_mode = ems_usb_set_mode;
1031
1032 netdev->flags |= IFF_ECHO; /* we support local echo */
1033
1034 netdev->netdev_ops = &ems_usb_netdev_ops;
1035
1036 netdev->flags |= IFF_ECHO; /* we support local echo */
1037
1038 init_usb_anchor(&dev->rx_submitted);
1039
1040 init_usb_anchor(&dev->tx_submitted);
1041 atomic_set(&dev->active_tx_urbs, 0);
1042
1043 for (i = 0; i < MAX_TX_URBS; i++)
1044 dev->tx_contexts[i].echo_index = MAX_TX_URBS;
1045
1046 dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
1047 if (!dev->intr_urb) {
1048 dev_err(netdev->dev.parent, "Couldn't alloc intr URB\n");
1049 goto cleanup_candev;
1050 }
1051
1052 dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL);
1053 if (!dev->intr_in_buffer) {
1054 dev_err(netdev->dev.parent, "Couldn't alloc Intr buffer\n");
1055 goto cleanup_intr_urb;
1056 }
1057
1058 dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE +
1059 sizeof(struct ems_cpc_msg), GFP_KERNEL);
1060 if (!dev->tx_msg_buffer) {
1061 dev_err(netdev->dev.parent, "Couldn't alloc Tx buffer\n");
1062 goto cleanup_intr_in_buffer;
1063 }
1064
1065 usb_set_intfdata(intf, dev);
1066
1067 SET_NETDEV_DEV(netdev, &intf->dev);
1068
1069 init_params_sja1000(&dev->active_params);
1070
1071 err = ems_usb_command_msg(dev, &dev->active_params);
1072 if (err) {
1073 dev_err(netdev->dev.parent,
1074 "couldn't initialize controller: %d\n", err);
1075 goto cleanup_tx_msg_buffer;
1076 }
1077
1078 err = register_candev(netdev);
1079 if (err) {
1080 dev_err(netdev->dev.parent,
1081 "couldn't register CAN device: %d\n", err);
1082 goto cleanup_tx_msg_buffer;
1083 }
1084
1085 return 0;
1086
1087cleanup_tx_msg_buffer:
1088 kfree(dev->tx_msg_buffer);
1089
1090cleanup_intr_in_buffer:
1091 kfree(dev->intr_in_buffer);
1092
1093cleanup_intr_urb:
1094 usb_free_urb(dev->intr_urb);
1095
1096cleanup_candev:
1097 free_candev(netdev);
1098
1099 return err;
1100}
1101
1102/*
1103 * called by the usb core when the device is removed from the system
1104 */
1105static void ems_usb_disconnect(struct usb_interface *intf)
1106{
1107 struct ems_usb *dev = usb_get_intfdata(intf);
1108
1109 usb_set_intfdata(intf, NULL);
1110
1111 if (dev) {
1112 unregister_netdev(dev->netdev);
1113 free_candev(dev->netdev);
1114
1115 unlink_all_urbs(dev);
1116
1117 usb_free_urb(dev->intr_urb);
1118
1119 kfree(dev->intr_in_buffer);
1120 }
1121}
1122
1123/* usb specific object needed to register this driver with the usb subsystem */
1124static struct usb_driver ems_usb_driver = {
1125 .name = "ems_usb",
1126 .probe = ems_usb_probe,
1127 .disconnect = ems_usb_disconnect,
1128 .id_table = ems_usb_table,
1129};
1130
1131static int __init ems_usb_init(void)
1132{
1133 int err;
1134
1135 printk(KERN_INFO "CPC-USB kernel driver loaded\n");
1136
1137 /* register this driver with the USB subsystem */
1138 err = usb_register(&ems_usb_driver);
1139
1140 if (err) {
1141 err("usb_register failed. Error number %d\n", err);
1142 return err;
1143 }
1144
1145 return 0;
1146}
1147
1148static void __exit ems_usb_exit(void)
1149{
1150 /* deregister this driver with the USB subsystem */
1151 usb_deregister(&ems_usb_driver);
1152}
1153
1154module_init(ems_usb_init);
1155module_exit(ems_usb_exit);
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index d45eacb76702..3bf1b04f2cab 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -85,8 +85,6 @@ static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
85 85
86 cp->uio_dev = iminor(inode); 86 cp->uio_dev = iminor(inode);
87 87
88 cnic_shutdown_bnx2_rx_ring(dev);
89
90 cnic_init_bnx2_tx_ring(dev); 88 cnic_init_bnx2_tx_ring(dev);
91 cnic_init_bnx2_rx_ring(dev); 89 cnic_init_bnx2_rx_ring(dev);
92 90
@@ -98,6 +96,8 @@ static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
98 struct cnic_dev *dev = uinfo->priv; 96 struct cnic_dev *dev = uinfo->priv;
99 struct cnic_local *cp = dev->cnic_priv; 97 struct cnic_local *cp = dev->cnic_priv;
100 98
99 cnic_shutdown_bnx2_rx_ring(dev);
100
101 cp->uio_dev = -1; 101 cp->uio_dev = -1;
102 return 0; 102 return 0;
103} 103}
@@ -2264,9 +2264,9 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
2264 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); 2264 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
2265 2265
2266 if (sb_id == 0) 2266 if (sb_id == 0)
2267 val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT; 2267 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
2268 else 2268 else
2269 val = BNX2_L2CTX_STATUSB_NUM(sb_id); 2269 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
2270 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); 2270 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
2271 2271
2272 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE); 2272 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
@@ -2423,7 +2423,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
2423 cp->int_num = 0; 2423 cp->int_num = 0;
2424 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 2424 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2425 u32 sb_id = cp->status_blk_num; 2425 u32 sb_id = cp->status_blk_num;
2426 u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id); 2426 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
2427 2427
2428 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; 2428 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
2429 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 2429 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
@@ -2733,7 +2733,8 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
2733 cnic_ulp_init(dev); 2733 cnic_ulp_init(dev);
2734 else if (event == NETDEV_UNREGISTER) 2734 else if (event == NETDEV_UNREGISTER)
2735 cnic_ulp_exit(dev); 2735 cnic_ulp_exit(dev);
2736 else if (event == NETDEV_UP) { 2736
2737 if (event == NETDEV_UP) {
2737 if (cnic_register_netdev(dev) != 0) { 2738 if (cnic_register_netdev(dev) != 0) {
2738 cnic_put(dev); 2739 cnic_put(dev);
2739 goto done; 2740 goto done;
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index a49235739eef..d8b09efdcb52 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.0.0" 15#define CNIC_MODULE_VERSION "2.0.1"
16#define CNIC_MODULE_RELDATE "May 21, 2009" 16#define CNIC_MODULE_RELDATE "Oct 01, 2009"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 3e3fab8afb1e..61f9da2b4943 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -1109,7 +1109,7 @@ static int external_switch;
1109static int __devinit cpmac_probe(struct platform_device *pdev) 1109static int __devinit cpmac_probe(struct platform_device *pdev)
1110{ 1110{
1111 int rc, phy_id; 1111 int rc, phy_id;
1112 char mdio_bus_id[BUS_ID_SIZE]; 1112 char mdio_bus_id[MII_BUS_ID_SIZE];
1113 struct resource *mem; 1113 struct resource *mem;
1114 struct cpmac_priv *priv; 1114 struct cpmac_priv *priv;
1115 struct net_device *dev; 1115 struct net_device *dev;
@@ -1118,7 +1118,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1118 pdata = pdev->dev.platform_data; 1118 pdata = pdev->dev.platform_data;
1119 1119
1120 if (external_switch || dumb_switch) { 1120 if (external_switch || dumb_switch) {
1121 strncpy(mdio_bus_id, "0", BUS_ID_SIZE); /* fixed phys bus */ 1121 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
1122 phy_id = pdev->id; 1122 phy_id = pdev->id;
1123 } else { 1123 } else {
1124 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { 1124 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
@@ -1126,7 +1126,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1126 continue; 1126 continue;
1127 if (!cpmac_mii->phy_map[phy_id]) 1127 if (!cpmac_mii->phy_map[phy_id])
1128 continue; 1128 continue;
1129 strncpy(mdio_bus_id, cpmac_mii->id, BUS_ID_SIZE); 1129 strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
1130 break; 1130 break;
1131 } 1131 }
1132 } 1132 }
@@ -1167,7 +1167,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1167 priv->msg_enable = netif_msg_init(debug_level, 0xff); 1167 priv->msg_enable = netif_msg_init(debug_level, 0xff);
1168 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); 1168 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr));
1169 1169
1170 snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); 1170 snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
1171 1171
1172 priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0, 1172 priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0,
1173 PHY_INTERFACE_MODE_MII); 1173 PHY_INTERFACE_MODE_MII);
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 15c0195ebd31..a24be34a3f7a 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -768,10 +768,24 @@ e100_negotiate(struct net_device* dev)
768 768
769 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data); 769 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data);
770 770
771 /* Renegotiate with link partner */ 771 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
772 if (autoneg_normal) { 772 if (autoneg_normal) {
773 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR); 773 /* Renegotiate with link partner */
774 data |= BMCR_ANENABLE | BMCR_ANRESTART; 774 data |= BMCR_ANENABLE | BMCR_ANRESTART;
775 } else {
776 /* Don't negotiate speed or duplex */
777 data &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
778
779 /* Set speed and duplex static */
780 if (current_speed_selection == 10)
781 data &= ~BMCR_SPEED100;
782 else
783 data |= BMCR_SPEED100;
784
785 if (current_duplex != full)
786 data &= ~BMCR_FULLDPLX;
787 else
788 data |= BMCR_FULLDPLX;
775 } 789 }
776 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data); 790 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data);
777} 791}
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index d465eaa796c4..f72c56dec33c 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -200,6 +200,9 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
200/** NOTE:: For DM646x the IN_VECTOR has changed */ 200/** NOTE:: For DM646x the IN_VECTOR has changed */
201#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH) 201#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH)
202#define EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC BIT(16 + EMAC_DEF_TX_CH) 202#define EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC BIT(16 + EMAC_DEF_TX_CH)
203#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT BIT(26)
204#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT BIT(27)
205
203 206
204/* CPPI bit positions */ 207/* CPPI bit positions */
205#define EMAC_CPPI_SOP_BIT BIT(31) 208#define EMAC_CPPI_SOP_BIT BIT(31)
@@ -330,6 +333,9 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
330#define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01) 333#define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01)
331#define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02) 334#define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02)
332 335
336/* EMAC Stats Clear Mask */
337#define EMAC_STATS_CLR_MASK (0xFFFFFFFF)
338
333/** net_buf_obj: EMAC network bufferdata structure 339/** net_buf_obj: EMAC network bufferdata structure
334 * 340 *
335 * EMAC network buffer data structure 341 * EMAC network buffer data structure
@@ -2167,7 +2173,11 @@ static int emac_poll(struct napi_struct *napi, int budget)
2167 emac_int_enable(priv); 2173 emac_int_enable(priv);
2168 } 2174 }
2169 2175
2170 if (unlikely(status & EMAC_DM644X_MAC_IN_VECTOR_HOST_INT)) { 2176 mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
2177 if (priv->version == EMAC_VERSION_2)
2178 mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT;
2179
2180 if (unlikely(status & mask)) {
2171 u32 ch, cause; 2181 u32 ch, cause;
2172 dev_err(emac_dev, "DaVinci EMAC: Fatal Hardware Error\n"); 2182 dev_err(emac_dev, "DaVinci EMAC: Fatal Hardware Error\n");
2173 netif_stop_queue(ndev); 2183 netif_stop_queue(ndev);
@@ -2541,40 +2551,49 @@ static int emac_dev_stop(struct net_device *ndev)
2541static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) 2551static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
2542{ 2552{
2543 struct emac_priv *priv = netdev_priv(ndev); 2553 struct emac_priv *priv = netdev_priv(ndev);
2554 u32 mac_control;
2555 u32 stats_clear_mask;
2544 2556
2545 /* update emac hardware stats and reset the registers*/ 2557 /* update emac hardware stats and reset the registers*/
2546 2558
2559 mac_control = emac_read(EMAC_MACCONTROL);
2560
2561 if (mac_control & EMAC_MACCONTROL_GMIIEN)
2562 stats_clear_mask = EMAC_STATS_CLR_MASK;
2563 else
2564 stats_clear_mask = 0;
2565
2547 priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES); 2566 priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES);
2548 emac_write(EMAC_RXMCASTFRAMES, EMAC_ALL_MULTI_REG_VALUE); 2567 emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask);
2549 2568
2550 priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) + 2569 priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) +
2551 emac_read(EMAC_TXSINGLECOLL) + 2570 emac_read(EMAC_TXSINGLECOLL) +
2552 emac_read(EMAC_TXMULTICOLL)); 2571 emac_read(EMAC_TXMULTICOLL));
2553 emac_write(EMAC_TXCOLLISION, EMAC_ALL_MULTI_REG_VALUE); 2572 emac_write(EMAC_TXCOLLISION, stats_clear_mask);
2554 emac_write(EMAC_TXSINGLECOLL, EMAC_ALL_MULTI_REG_VALUE); 2573 emac_write(EMAC_TXSINGLECOLL, stats_clear_mask);
2555 emac_write(EMAC_TXMULTICOLL, EMAC_ALL_MULTI_REG_VALUE); 2574 emac_write(EMAC_TXMULTICOLL, stats_clear_mask);
2556 2575
2557 priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) + 2576 priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) +
2558 emac_read(EMAC_RXJABBER) + 2577 emac_read(EMAC_RXJABBER) +
2559 emac_read(EMAC_RXUNDERSIZED)); 2578 emac_read(EMAC_RXUNDERSIZED));
2560 emac_write(EMAC_RXOVERSIZED, EMAC_ALL_MULTI_REG_VALUE); 2579 emac_write(EMAC_RXOVERSIZED, stats_clear_mask);
2561 emac_write(EMAC_RXJABBER, EMAC_ALL_MULTI_REG_VALUE); 2580 emac_write(EMAC_RXJABBER, stats_clear_mask);
2562 emac_write(EMAC_RXUNDERSIZED, EMAC_ALL_MULTI_REG_VALUE); 2581 emac_write(EMAC_RXUNDERSIZED, stats_clear_mask);
2563 2582
2564 priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) + 2583 priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) +
2565 emac_read(EMAC_RXMOFOVERRUNS)); 2584 emac_read(EMAC_RXMOFOVERRUNS));
2566 emac_write(EMAC_RXSOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); 2585 emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask);
2567 emac_write(EMAC_RXMOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); 2586 emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask);
2568 2587
2569 priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS); 2588 priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS);
2570 emac_write(EMAC_RXDMAOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); 2589 emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask);
2571 2590
2572 priv->net_dev_stats.tx_carrier_errors += 2591 priv->net_dev_stats.tx_carrier_errors +=
2573 emac_read(EMAC_TXCARRIERSENSE); 2592 emac_read(EMAC_TXCARRIERSENSE);
2574 emac_write(EMAC_TXCARRIERSENSE, EMAC_ALL_MULTI_REG_VALUE); 2593 emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
2575 2594
2576 priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN); 2595 priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN);
2577 emac_write(EMAC_TXUNDERRUN, EMAC_ALL_MULTI_REG_VALUE); 2596 emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
2578 2597
2579 return &priv->net_dev_stats; 2598 return &priv->net_dev_stats;
2580} 2599}
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 9686c1fa28f1..7a3bdac84abe 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -237,6 +237,7 @@
237 237
238#include <linux/module.h> 238#include <linux/module.h>
239#include <linux/kernel.h> 239#include <linux/kernel.h>
240#include <linux/sched.h>
240#include <linux/string.h> 241#include <linux/string.h>
241#include <linux/errno.h> 242#include <linux/errno.h>
242#include <linux/ioport.h> 243#include <linux/ioport.h>
diff --git a/drivers/net/dm9000.h b/drivers/net/dm9000.h
index 80817c2edfb3..fb1c924d79b4 100644
--- a/drivers/net/dm9000.h
+++ b/drivers/net/dm9000.h
@@ -50,7 +50,7 @@
50#define DM9000_RCSR 0x32 50#define DM9000_RCSR 0x32
51 51
52#define CHIPR_DM9000A 0x19 52#define CHIPR_DM9000A 0x19
53#define CHIPR_DM9000B 0x1B 53#define CHIPR_DM9000B 0x1A
54 54
55#define DM9000_MRCMDX 0xF0 55#define DM9000_MRCMDX 0xF0
56#define DM9000_MRCMD 0xF2 56#define DM9000_MRCMD 0xF2
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 679965c2bb86..5d2f48f02251 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -151,6 +151,7 @@
151#include <linux/moduleparam.h> 151#include <linux/moduleparam.h>
152#include <linux/kernel.h> 152#include <linux/kernel.h>
153#include <linux/types.h> 153#include <linux/types.h>
154#include <linux/sched.h>
154#include <linux/slab.h> 155#include <linux/slab.h>
155#include <linux/delay.h> 156#include <linux/delay.h>
156#include <linux/init.h> 157#include <linux/init.h>
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 1a4f89c66a26..42e2b7e21c29 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -149,7 +149,6 @@ do { \
149 149
150#define AUTO_ALL_MODES 0 150#define AUTO_ALL_MODES 0
151#define E1000_EEPROM_82544_APM 0x0004 151#define E1000_EEPROM_82544_APM 0x0004
152#define E1000_EEPROM_ICH8_APME 0x0004
153#define E1000_EEPROM_APME 0x0400 152#define E1000_EEPROM_APME 0x0400
154 153
155#ifndef E1000_MASTER_SLAVE 154#ifndef E1000_MASTER_SLAVE
@@ -293,7 +292,6 @@ struct e1000_adapter {
293 292
294 u64 hw_csum_err; 293 u64 hw_csum_err;
295 u64 hw_csum_good; 294 u64 hw_csum_good;
296 u64 rx_hdr_split;
297 u32 alloc_rx_buff_failed; 295 u32 alloc_rx_buff_failed;
298 u32 rx_int_delay; 296 u32 rx_int_delay;
299 u32 rx_abs_int_delay; 297 u32 rx_abs_int_delay;
@@ -317,7 +315,6 @@ struct e1000_adapter {
317 struct e1000_rx_ring test_rx_ring; 315 struct e1000_rx_ring test_rx_ring;
318 316
319 int msg_enable; 317 int msg_enable;
320 bool have_msi;
321 318
322 /* to not mess up cache alignment, always add to the bottom */ 319 /* to not mess up cache alignment, always add to the bottom */
323 bool tso_force; 320 bool tso_force;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 27f996a2010f..490b2b7cd3ab 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -82,7 +82,6 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
82 { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, 82 { "rx_long_byte_count", E1000_STAT(stats.gorcl) },
83 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, 83 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
84 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, 84 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
85 { "rx_header_split", E1000_STAT(rx_hdr_split) },
86 { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, 85 { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
87 { "tx_smbus", E1000_STAT(stats.mgptc) }, 86 { "tx_smbus", E1000_STAT(stats.mgptc) },
88 { "rx_smbus", E1000_STAT(stats.mgprc) }, 87 { "rx_smbus", E1000_STAT(stats.mgprc) },
@@ -114,8 +113,6 @@ static int e1000_get_settings(struct net_device *netdev,
114 SUPPORTED_1000baseT_Full| 113 SUPPORTED_1000baseT_Full|
115 SUPPORTED_Autoneg | 114 SUPPORTED_Autoneg |
116 SUPPORTED_TP); 115 SUPPORTED_TP);
117 if (hw->phy_type == e1000_phy_ife)
118 ecmd->supported &= ~SUPPORTED_1000baseT_Full;
119 ecmd->advertising = ADVERTISED_TP; 116 ecmd->advertising = ADVERTISED_TP;
120 117
121 if (hw->autoneg == 1) { 118 if (hw->autoneg == 1) {
@@ -178,14 +175,6 @@ static int e1000_set_settings(struct net_device *netdev,
178 struct e1000_adapter *adapter = netdev_priv(netdev); 175 struct e1000_adapter *adapter = netdev_priv(netdev);
179 struct e1000_hw *hw = &adapter->hw; 176 struct e1000_hw *hw = &adapter->hw;
180 177
181 /* When SoL/IDER sessions are active, autoneg/speed/duplex
182 * cannot be changed */
183 if (e1000_check_phy_reset_block(hw)) {
184 DPRINTK(DRV, ERR, "Cannot change link characteristics "
185 "when SoL/IDER is active.\n");
186 return -EINVAL;
187 }
188
189 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 178 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
190 msleep(1); 179 msleep(1);
191 180
@@ -330,10 +319,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
330 else 319 else
331 netdev->features &= ~NETIF_F_TSO; 320 netdev->features &= ~NETIF_F_TSO;
332 321
333 if (data && (adapter->hw.mac_type > e1000_82547_rev_2)) 322 netdev->features &= ~NETIF_F_TSO6;
334 netdev->features |= NETIF_F_TSO6;
335 else
336 netdev->features &= ~NETIF_F_TSO6;
337 323
338 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled"); 324 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
339 adapter->tso_force = true; 325 adapter->tso_force = true;
@@ -441,7 +427,6 @@ static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
441 regs_buff[24] = (u32)phy_data; /* phy local receiver status */ 427 regs_buff[24] = (u32)phy_data; /* phy local receiver status */
442 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ 428 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
443 if (hw->mac_type >= e1000_82540 && 429 if (hw->mac_type >= e1000_82540 &&
444 hw->mac_type < e1000_82571 &&
445 hw->media_type == e1000_media_type_copper) { 430 hw->media_type == e1000_media_type_copper) {
446 regs_buff[26] = er32(MANC); 431 regs_buff[26] = er32(MANC);
447 } 432 }
@@ -554,10 +539,8 @@ static int e1000_set_eeprom(struct net_device *netdev,
554 ret_val = e1000_write_eeprom(hw, first_word, 539 ret_val = e1000_write_eeprom(hw, first_word,
555 last_word - first_word + 1, eeprom_buff); 540 last_word - first_word + 1, eeprom_buff);
556 541
557 /* Update the checksum over the first part of the EEPROM if needed 542 /* Update the checksum over the first part of the EEPROM if needed */
558 * and flush shadow RAM for 82573 conrollers */ 543 if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG))
559 if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
560 (hw->mac_type == e1000_82573)))
561 e1000_update_eeprom_checksum(hw); 544 e1000_update_eeprom_checksum(hw);
562 545
563 kfree(eeprom_buff); 546 kfree(eeprom_buff);
@@ -568,31 +551,12 @@ static void e1000_get_drvinfo(struct net_device *netdev,
568 struct ethtool_drvinfo *drvinfo) 551 struct ethtool_drvinfo *drvinfo)
569{ 552{
570 struct e1000_adapter *adapter = netdev_priv(netdev); 553 struct e1000_adapter *adapter = netdev_priv(netdev);
571 struct e1000_hw *hw = &adapter->hw;
572 char firmware_version[32]; 554 char firmware_version[32];
573 u16 eeprom_data;
574 555
575 strncpy(drvinfo->driver, e1000_driver_name, 32); 556 strncpy(drvinfo->driver, e1000_driver_name, 32);
576 strncpy(drvinfo->version, e1000_driver_version, 32); 557 strncpy(drvinfo->version, e1000_driver_version, 32);
577 558
578 /* EEPROM image version # is reported as firmware version # for 559 sprintf(firmware_version, "N/A");
579 * 8257{1|2|3} controllers */
580 e1000_read_eeprom(hw, 5, 1, &eeprom_data);
581 switch (hw->mac_type) {
582 case e1000_82571:
583 case e1000_82572:
584 case e1000_82573:
585 case e1000_80003es2lan:
586 case e1000_ich8lan:
587 sprintf(firmware_version, "%d.%d-%d",
588 (eeprom_data & 0xF000) >> 12,
589 (eeprom_data & 0x0FF0) >> 4,
590 eeprom_data & 0x000F);
591 break;
592 default:
593 sprintf(firmware_version, "N/A");
594 }
595
596 strncpy(drvinfo->fw_version, firmware_version, 32); 560 strncpy(drvinfo->fw_version, firmware_version, 32);
597 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 561 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
598 drvinfo->regdump_len = e1000_get_regs_len(netdev); 562 drvinfo->regdump_len = e1000_get_regs_len(netdev);
@@ -781,21 +745,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
781 /* The status register is Read Only, so a write should fail. 745 /* The status register is Read Only, so a write should fail.
782 * Some bits that get toggled are ignored. 746 * Some bits that get toggled are ignored.
783 */ 747 */
784 switch (hw->mac_type) { 748
785 /* there are several bits on newer hardware that are r/w */ 749 /* there are several bits on newer hardware that are r/w */
786 case e1000_82571: 750 toggle = 0xFFFFF833;
787 case e1000_82572:
788 case e1000_80003es2lan:
789 toggle = 0x7FFFF3FF;
790 break;
791 case e1000_82573:
792 case e1000_ich8lan:
793 toggle = 0x7FFFF033;
794 break;
795 default:
796 toggle = 0xFFFFF833;
797 break;
798 }
799 751
800 before = er32(STATUS); 752 before = er32(STATUS);
801 value = (er32(STATUS) & toggle); 753 value = (er32(STATUS) & toggle);
@@ -810,12 +762,10 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
810 /* restore previous status */ 762 /* restore previous status */
811 ew32(STATUS, before); 763 ew32(STATUS, before);
812 764
813 if (hw->mac_type != e1000_ich8lan) { 765 REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
814 REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); 766 REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
815 REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); 767 REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
816 REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); 768 REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
817 REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
818 }
819 769
820 REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); 770 REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
821 REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); 771 REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
@@ -830,8 +780,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
830 780
831 REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); 781 REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
832 782
833 before = (hw->mac_type == e1000_ich8lan ? 783 before = 0x06DFB3FE;
834 0x06C3B33E : 0x06DFB3FE);
835 REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); 784 REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
836 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); 785 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
837 786
@@ -839,12 +788,10 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
839 788
840 REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); 789 REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
841 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 790 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
842 if (hw->mac_type != e1000_ich8lan) 791 REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
843 REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
844 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 792 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
845 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); 793 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
846 value = (hw->mac_type == e1000_ich8lan ? 794 value = E1000_RAR_ENTRIES;
847 E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
848 for (i = 0; i < value; i++) { 795 for (i = 0; i < value; i++) {
849 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, 796 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
850 0xFFFFFFFF); 797 0xFFFFFFFF);
@@ -859,8 +806,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
859 806
860 } 807 }
861 808
862 value = (hw->mac_type == e1000_ich8lan ? 809 value = E1000_MC_TBL_SIZE;
863 E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE);
864 for (i = 0; i < value; i++) 810 for (i = 0; i < value; i++)
865 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); 811 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
866 812
@@ -933,9 +879,6 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
933 /* Test each interrupt */ 879 /* Test each interrupt */
934 for (; i < 10; i++) { 880 for (; i < 10; i++) {
935 881
936 if (hw->mac_type == e1000_ich8lan && i == 8)
937 continue;
938
939 /* Interrupt to test */ 882 /* Interrupt to test */
940 mask = 1 << i; 883 mask = 1 << i;
941 884
@@ -1289,35 +1232,20 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1289 e1000_write_phy_reg(hw, PHY_CTRL, 0x9140); 1232 e1000_write_phy_reg(hw, PHY_CTRL, 0x9140);
1290 /* autoneg off */ 1233 /* autoneg off */
1291 e1000_write_phy_reg(hw, PHY_CTRL, 0x8140); 1234 e1000_write_phy_reg(hw, PHY_CTRL, 0x8140);
1292 } else if (hw->phy_type == e1000_phy_gg82563) 1235 }
1293 e1000_write_phy_reg(hw,
1294 GG82563_PHY_KMRN_MODE_CTRL,
1295 0x1CC);
1296 1236
1297 ctrl_reg = er32(CTRL); 1237 ctrl_reg = er32(CTRL);
1298 1238
1299 if (hw->phy_type == e1000_phy_ife) { 1239 /* force 1000, set loopback */
1300 /* force 100, set loopback */ 1240 e1000_write_phy_reg(hw, PHY_CTRL, 0x4140);
1301 e1000_write_phy_reg(hw, PHY_CTRL, 0x6100);
1302 1241
1303 /* Now set up the MAC to the same speed/duplex as the PHY. */ 1242 /* Now set up the MAC to the same speed/duplex as the PHY. */
1304 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 1243 ctrl_reg = er32(CTRL);
1305 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 1244 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1306 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 1245 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1307 E1000_CTRL_SPD_100 |/* Force Speed to 100 */ 1246 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1308 E1000_CTRL_FD); /* Force Duplex to FULL */ 1247 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1309 } else { 1248 E1000_CTRL_FD); /* Force Duplex to FULL */
1310 /* force 1000, set loopback */
1311 e1000_write_phy_reg(hw, PHY_CTRL, 0x4140);
1312
1313 /* Now set up the MAC to the same speed/duplex as the PHY. */
1314 ctrl_reg = er32(CTRL);
1315 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1316 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1317 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1318 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1319 E1000_CTRL_FD); /* Force Duplex to FULL */
1320 }
1321 1249
1322 if (hw->media_type == e1000_media_type_copper && 1250 if (hw->media_type == e1000_media_type_copper &&
1323 hw->phy_type == e1000_phy_m88) 1251 hw->phy_type == e1000_phy_m88)
@@ -1373,14 +1301,8 @@ static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
1373 case e1000_82541_rev_2: 1301 case e1000_82541_rev_2:
1374 case e1000_82547: 1302 case e1000_82547:
1375 case e1000_82547_rev_2: 1303 case e1000_82547_rev_2:
1376 case e1000_82571:
1377 case e1000_82572:
1378 case e1000_82573:
1379 case e1000_80003es2lan:
1380 case e1000_ich8lan:
1381 return e1000_integrated_phy_loopback(adapter); 1304 return e1000_integrated_phy_loopback(adapter);
1382 break; 1305 break;
1383
1384 default: 1306 default:
1385 /* Default PHY loopback work is to read the MII 1307 /* Default PHY loopback work is to read the MII
1386 * control register and assert bit 14 (loopback mode). 1308 * control register and assert bit 14 (loopback mode).
@@ -1409,14 +1331,6 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
1409 case e1000_82546_rev_3: 1331 case e1000_82546_rev_3:
1410 return e1000_set_phy_loopback(adapter); 1332 return e1000_set_phy_loopback(adapter);
1411 break; 1333 break;
1412 case e1000_82571:
1413 case e1000_82572:
1414#define E1000_SERDES_LB_ON 0x410
1415 e1000_set_phy_loopback(adapter);
1416 ew32(SCTL, E1000_SERDES_LB_ON);
1417 msleep(10);
1418 return 0;
1419 break;
1420 default: 1334 default:
1421 rctl = er32(RCTL); 1335 rctl = er32(RCTL);
1422 rctl |= E1000_RCTL_LBM_TCVR; 1336 rctl |= E1000_RCTL_LBM_TCVR;
@@ -1440,26 +1354,12 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
1440 ew32(RCTL, rctl); 1354 ew32(RCTL, rctl);
1441 1355
1442 switch (hw->mac_type) { 1356 switch (hw->mac_type) {
1443 case e1000_82571:
1444 case e1000_82572:
1445 if (hw->media_type == e1000_media_type_fiber ||
1446 hw->media_type == e1000_media_type_internal_serdes) {
1447#define E1000_SERDES_LB_OFF 0x400
1448 ew32(SCTL, E1000_SERDES_LB_OFF);
1449 msleep(10);
1450 break;
1451 }
1452 /* Fall Through */
1453 case e1000_82545: 1357 case e1000_82545:
1454 case e1000_82546: 1358 case e1000_82546:
1455 case e1000_82545_rev_3: 1359 case e1000_82545_rev_3:
1456 case e1000_82546_rev_3: 1360 case e1000_82546_rev_3:
1457 default: 1361 default:
1458 hw->autoneg = true; 1362 hw->autoneg = true;
1459 if (hw->phy_type == e1000_phy_gg82563)
1460 e1000_write_phy_reg(hw,
1461 GG82563_PHY_KMRN_MODE_CTRL,
1462 0x180);
1463 e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); 1363 e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
1464 if (phy_reg & MII_CR_LOOPBACK) { 1364 if (phy_reg & MII_CR_LOOPBACK) {
1465 phy_reg &= ~MII_CR_LOOPBACK; 1365 phy_reg &= ~MII_CR_LOOPBACK;
@@ -1560,17 +1460,6 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1560 1460
1561static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) 1461static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1562{ 1462{
1563 struct e1000_hw *hw = &adapter->hw;
1564
1565 /* PHY loopback cannot be performed if SoL/IDER
1566 * sessions are active */
1567 if (e1000_check_phy_reset_block(hw)) {
1568 DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
1569 "when SoL/IDER is active.\n");
1570 *data = 0;
1571 goto out;
1572 }
1573
1574 *data = e1000_setup_desc_rings(adapter); 1463 *data = e1000_setup_desc_rings(adapter);
1575 if (*data) 1464 if (*data)
1576 goto out; 1465 goto out;
@@ -1592,13 +1481,13 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1592 *data = 0; 1481 *data = 0;
1593 if (hw->media_type == e1000_media_type_internal_serdes) { 1482 if (hw->media_type == e1000_media_type_internal_serdes) {
1594 int i = 0; 1483 int i = 0;
1595 hw->serdes_link_down = true; 1484 hw->serdes_has_link = false;
1596 1485
1597 /* On some blade server designs, link establishment 1486 /* On some blade server designs, link establishment
1598 * could take as long as 2-3 minutes */ 1487 * could take as long as 2-3 minutes */
1599 do { 1488 do {
1600 e1000_check_for_link(hw); 1489 e1000_check_for_link(hw);
1601 if (!hw->serdes_link_down) 1490 if (hw->serdes_has_link)
1602 return *data; 1491 return *data;
1603 msleep(20); 1492 msleep(20);
1604 } while (i++ < 3750); 1493 } while (i++ < 3750);
@@ -1716,15 +1605,11 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
1716 case E1000_DEV_ID_82545EM_COPPER: 1605 case E1000_DEV_ID_82545EM_COPPER:
1717 case E1000_DEV_ID_82546GB_QUAD_COPPER: 1606 case E1000_DEV_ID_82546GB_QUAD_COPPER:
1718 case E1000_DEV_ID_82546GB_PCIE: 1607 case E1000_DEV_ID_82546GB_PCIE:
1719 case E1000_DEV_ID_82571EB_SERDES_QUAD:
1720 /* these don't support WoL at all */ 1608 /* these don't support WoL at all */
1721 wol->supported = 0; 1609 wol->supported = 0;
1722 break; 1610 break;
1723 case E1000_DEV_ID_82546EB_FIBER: 1611 case E1000_DEV_ID_82546EB_FIBER:
1724 case E1000_DEV_ID_82546GB_FIBER: 1612 case E1000_DEV_ID_82546GB_FIBER:
1725 case E1000_DEV_ID_82571EB_FIBER:
1726 case E1000_DEV_ID_82571EB_SERDES:
1727 case E1000_DEV_ID_82571EB_COPPER:
1728 /* Wake events not supported on port B */ 1613 /* Wake events not supported on port B */
1729 if (er32(STATUS) & E1000_STATUS_FUNC_1) { 1614 if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1730 wol->supported = 0; 1615 wol->supported = 0;
@@ -1733,10 +1618,6 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
1733 /* return success for non excluded adapter ports */ 1618 /* return success for non excluded adapter ports */
1734 retval = 0; 1619 retval = 0;
1735 break; 1620 break;
1736 case E1000_DEV_ID_82571EB_QUAD_COPPER:
1737 case E1000_DEV_ID_82571EB_QUAD_FIBER:
1738 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
1739 case E1000_DEV_ID_82571PT_QUAD_COPPER:
1740 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1621 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1741 /* quad port adapters only support WoL on port A */ 1622 /* quad port adapters only support WoL on port A */
1742 if (!adapter->quad_port_a) { 1623 if (!adapter->quad_port_a) {
@@ -1872,30 +1753,15 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
1872 if (!data) 1753 if (!data)
1873 data = INT_MAX; 1754 data = INT_MAX;
1874 1755
1875 if (hw->mac_type < e1000_82571) { 1756 if (!adapter->blink_timer.function) {
1876 if (!adapter->blink_timer.function) { 1757 init_timer(&adapter->blink_timer);
1877 init_timer(&adapter->blink_timer); 1758 adapter->blink_timer.function = e1000_led_blink_callback;
1878 adapter->blink_timer.function = e1000_led_blink_callback; 1759 adapter->blink_timer.data = (unsigned long)adapter;
1879 adapter->blink_timer.data = (unsigned long)adapter;
1880 }
1881 e1000_setup_led(hw);
1882 mod_timer(&adapter->blink_timer, jiffies);
1883 msleep_interruptible(data * 1000);
1884 del_timer_sync(&adapter->blink_timer);
1885 } else if (hw->phy_type == e1000_phy_ife) {
1886 if (!adapter->blink_timer.function) {
1887 init_timer(&adapter->blink_timer);
1888 adapter->blink_timer.function = e1000_led_blink_callback;
1889 adapter->blink_timer.data = (unsigned long)adapter;
1890 }
1891 mod_timer(&adapter->blink_timer, jiffies);
1892 msleep_interruptible(data * 1000);
1893 del_timer_sync(&adapter->blink_timer);
1894 e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0);
1895 } else {
1896 e1000_blink_led_start(hw);
1897 msleep_interruptible(data * 1000);
1898 } 1760 }
1761 e1000_setup_led(hw);
1762 mod_timer(&adapter->blink_timer, jiffies);
1763 msleep_interruptible(data * 1000);
1764 del_timer_sync(&adapter->blink_timer);
1899 1765
1900 e1000_led_off(hw); 1766 e1000_led_off(hw);
1901 clear_bit(E1000_LED_ON, &adapter->led_status); 1767 clear_bit(E1000_LED_ON, &adapter->led_status);
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 45ac225a7aaa..8d7d87f12827 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -24,88 +24,34 @@
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 26
27*******************************************************************************/ 27 */
28 28
29/* e1000_hw.c 29/* e1000_hw.c
30 * Shared functions for accessing and configuring the MAC 30 * Shared functions for accessing and configuring the MAC
31 */ 31 */
32 32
33
34#include "e1000_hw.h" 33#include "e1000_hw.h"
35 34
36static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask);
37static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask);
38static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data);
39static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
40static s32 e1000_get_software_semaphore(struct e1000_hw *hw);
41static void e1000_release_software_semaphore(struct e1000_hw *hw);
42
43static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw);
44static s32 e1000_check_downshift(struct e1000_hw *hw); 35static s32 e1000_check_downshift(struct e1000_hw *hw);
45static s32 e1000_check_polarity(struct e1000_hw *hw, 36static s32 e1000_check_polarity(struct e1000_hw *hw,
46 e1000_rev_polarity *polarity); 37 e1000_rev_polarity *polarity);
47static void e1000_clear_hw_cntrs(struct e1000_hw *hw); 38static void e1000_clear_hw_cntrs(struct e1000_hw *hw);
48static void e1000_clear_vfta(struct e1000_hw *hw); 39static void e1000_clear_vfta(struct e1000_hw *hw);
49static s32 e1000_commit_shadow_ram(struct e1000_hw *hw);
50static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, 40static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw,
51 bool link_up); 41 bool link_up);
52static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); 42static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw);
53static s32 e1000_detect_gig_phy(struct e1000_hw *hw); 43static s32 e1000_detect_gig_phy(struct e1000_hw *hw);
54static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank);
55static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); 44static s32 e1000_get_auto_rd_done(struct e1000_hw *hw);
56static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, 45static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
57 u16 *max_length); 46 u16 *max_length);
58static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw);
59static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); 47static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
60static s32 e1000_get_software_flag(struct e1000_hw *hw);
61static s32 e1000_ich8_cycle_init(struct e1000_hw *hw);
62static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout);
63static s32 e1000_id_led_init(struct e1000_hw *hw); 48static s32 e1000_id_led_init(struct e1000_hw *hw);
64static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
65 u32 cnf_base_addr,
66 u32 cnf_size);
67static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw);
68static void e1000_init_rx_addrs(struct e1000_hw *hw); 49static void e1000_init_rx_addrs(struct e1000_hw *hw);
69static void e1000_initialize_hardware_bits(struct e1000_hw *hw);
70static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
71static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
72static s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
73static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
74 u16 offset, u8 *sum);
75static s32 e1000_mng_write_cmd_header(struct e1000_hw* hw,
76 struct e1000_host_mng_command_header
77 *hdr);
78static s32 e1000_mng_write_commit(struct e1000_hw *hw);
79static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
80 struct e1000_phy_info *phy_info);
81static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, 50static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
82 struct e1000_phy_info *phy_info); 51 struct e1000_phy_info *phy_info);
83static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
84 u16 *data);
85static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
86 u16 *data);
87static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd);
88static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, 52static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
89 struct e1000_phy_info *phy_info); 53 struct e1000_phy_info *phy_info);
90static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw);
91static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data);
92static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index,
93 u8 byte);
94static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte);
95static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data);
96static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
97 u16 *data);
98static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
99 u16 data);
100static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
101 u16 *data);
102static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
103 u16 *data);
104static void e1000_release_software_flag(struct e1000_hw *hw);
105static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); 54static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
106static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
107static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop);
108static void e1000_set_pci_express_master_disable(struct e1000_hw *hw);
109static s32 e1000_wait_autoneg(struct e1000_hw *hw); 55static s32 e1000_wait_autoneg(struct e1000_hw *hw);
110static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value); 56static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value);
111static s32 e1000_set_phy_type(struct e1000_hw *hw); 57static s32 e1000_set_phy_type(struct e1000_hw *hw);
@@ -117,12 +63,11 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
117static s32 e1000_config_mac_to_phy(struct e1000_hw *hw); 63static s32 e1000_config_mac_to_phy(struct e1000_hw *hw);
118static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); 64static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
119static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); 65static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
120static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, 66static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count);
121 u16 count);
122static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); 67static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw);
123static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); 68static s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
124static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, 69static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset,
125 u16 words, u16 *data); 70 u16 words, u16 *data);
126static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, 71static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
127 u16 words, u16 *data); 72 u16 words, u16 *data);
128static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); 73static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw);
@@ -131,7 +76,7 @@ static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd);
131static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count); 76static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count);
132static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, 77static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
133 u16 phy_data); 78 u16 phy_data);
134static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw,u32 reg_addr, 79static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
135 u16 *phy_data); 80 u16 *phy_data);
136static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); 81static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count);
137static s32 e1000_acquire_eeprom(struct e1000_hw *hw); 82static s32 e1000_acquire_eeprom(struct e1000_hw *hw);
@@ -140,188 +85,164 @@ static void e1000_standby_eeprom(struct e1000_hw *hw);
140static s32 e1000_set_vco_speed(struct e1000_hw *hw); 85static s32 e1000_set_vco_speed(struct e1000_hw *hw);
141static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw); 86static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw);
142static s32 e1000_set_phy_mode(struct e1000_hw *hw); 87static s32 e1000_set_phy_mode(struct e1000_hw *hw);
143static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer); 88static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
144static u8 e1000_calculate_mng_checksum(char *buffer, u32 length); 89 u16 *data);
145static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex); 90static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
146static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw); 91 u16 *data);
147static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
148static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
149 92
150/* IGP cable length table */ 93/* IGP cable length table */
151static const 94static const
152u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = 95u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = {
153 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 96 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
154 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, 97 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
155 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, 98 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
156 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, 99 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
157 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, 100 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
158 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 101 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100,
159 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 102 100,
160 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120}; 103 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
161 104 110, 110,
162static const 105 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120,
163u16 e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] = 106 120, 120
164 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 107};
165 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
166 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
167 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
168 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
169 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
170 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
171 104, 109, 114, 118, 121, 124};
172 108
173static DEFINE_SPINLOCK(e1000_eeprom_lock); 109static DEFINE_SPINLOCK(e1000_eeprom_lock);
174 110
175/****************************************************************************** 111/**
176 * Set the phy type member in the hw struct. 112 * e1000_set_phy_type - Set the phy type member in the hw struct.
177 * 113 * @hw: Struct containing variables accessed by shared code
178 * hw - Struct containing variables accessed by shared code 114 */
179 *****************************************************************************/
180static s32 e1000_set_phy_type(struct e1000_hw *hw) 115static s32 e1000_set_phy_type(struct e1000_hw *hw)
181{ 116{
182 DEBUGFUNC("e1000_set_phy_type"); 117 DEBUGFUNC("e1000_set_phy_type");
183
184 if (hw->mac_type == e1000_undefined)
185 return -E1000_ERR_PHY_TYPE;
186
187 switch (hw->phy_id) {
188 case M88E1000_E_PHY_ID:
189 case M88E1000_I_PHY_ID:
190 case M88E1011_I_PHY_ID:
191 case M88E1111_I_PHY_ID:
192 hw->phy_type = e1000_phy_m88;
193 break;
194 case IGP01E1000_I_PHY_ID:
195 if (hw->mac_type == e1000_82541 ||
196 hw->mac_type == e1000_82541_rev_2 ||
197 hw->mac_type == e1000_82547 ||
198 hw->mac_type == e1000_82547_rev_2) {
199 hw->phy_type = e1000_phy_igp;
200 break;
201 }
202 case IGP03E1000_E_PHY_ID:
203 hw->phy_type = e1000_phy_igp_3;
204 break;
205 case IFE_E_PHY_ID:
206 case IFE_PLUS_E_PHY_ID:
207 case IFE_C_E_PHY_ID:
208 hw->phy_type = e1000_phy_ife;
209 break;
210 case GG82563_E_PHY_ID:
211 if (hw->mac_type == e1000_80003es2lan) {
212 hw->phy_type = e1000_phy_gg82563;
213 break;
214 }
215 /* Fall Through */
216 default:
217 /* Should never have loaded on this device */
218 hw->phy_type = e1000_phy_undefined;
219 return -E1000_ERR_PHY_TYPE;
220 }
221
222 return E1000_SUCCESS;
223}
224
225/******************************************************************************
226 * IGP phy init script - initializes the GbE PHY
227 *
228 * hw - Struct containing variables accessed by shared code
229 *****************************************************************************/
230static void e1000_phy_init_script(struct e1000_hw *hw)
231{
232 u32 ret_val;
233 u16 phy_saved_data;
234
235 DEBUGFUNC("e1000_phy_init_script");
236
237 if (hw->phy_init_script) {
238 msleep(20);
239
240 /* Save off the current value of register 0x2F5B to be restored at
241 * the end of this routine. */
242 ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
243
244 /* Disabled the PHY transmitter */
245 e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
246 118
247 msleep(20); 119 if (hw->mac_type == e1000_undefined)
120 return -E1000_ERR_PHY_TYPE;
248 121
249 e1000_write_phy_reg(hw,0x0000,0x0140); 122 switch (hw->phy_id) {
250 123 case M88E1000_E_PHY_ID:
251 msleep(5); 124 case M88E1000_I_PHY_ID:
252 125 case M88E1011_I_PHY_ID:
253 switch (hw->mac_type) { 126 case M88E1111_I_PHY_ID:
254 case e1000_82541: 127 hw->phy_type = e1000_phy_m88;
255 case e1000_82547: 128 break;
256 e1000_write_phy_reg(hw, 0x1F95, 0x0001); 129 case IGP01E1000_I_PHY_ID:
257 130 if (hw->mac_type == e1000_82541 ||
258 e1000_write_phy_reg(hw, 0x1F71, 0xBD21); 131 hw->mac_type == e1000_82541_rev_2 ||
259 132 hw->mac_type == e1000_82547 ||
260 e1000_write_phy_reg(hw, 0x1F79, 0x0018); 133 hw->mac_type == e1000_82547_rev_2) {
261 134 hw->phy_type = e1000_phy_igp;
262 e1000_write_phy_reg(hw, 0x1F30, 0x1600); 135 break;
263 136 }
264 e1000_write_phy_reg(hw, 0x1F31, 0x0014); 137 default:
265 138 /* Should never have loaded on this device */
266 e1000_write_phy_reg(hw, 0x1F32, 0x161C); 139 hw->phy_type = e1000_phy_undefined;
267 140 return -E1000_ERR_PHY_TYPE;
268 e1000_write_phy_reg(hw, 0x1F94, 0x0003); 141 }
269
270 e1000_write_phy_reg(hw, 0x1F96, 0x003F);
271
272 e1000_write_phy_reg(hw, 0x2010, 0x0008);
273 break;
274
275 case e1000_82541_rev_2:
276 case e1000_82547_rev_2:
277 e1000_write_phy_reg(hw, 0x1F73, 0x0099);
278 break;
279 default:
280 break;
281 }
282
283 e1000_write_phy_reg(hw, 0x0000, 0x3300);
284
285 msleep(20);
286
287 /* Now enable the transmitter */
288 e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
289
290 if (hw->mac_type == e1000_82547) {
291 u16 fused, fine, coarse;
292
293 /* Move to analog registers page */
294 e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused);
295
296 if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
297 e1000_read_phy_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused);
298 142
299 fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; 143 return E1000_SUCCESS;
300 coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; 144}
301 145
302 if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { 146/**
303 coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10; 147 * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY
304 fine -= IGP01E1000_ANALOG_FUSE_FINE_1; 148 * @hw: Struct containing variables accessed by shared code
305 } else if (coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH) 149 */
306 fine -= IGP01E1000_ANALOG_FUSE_FINE_10; 150static void e1000_phy_init_script(struct e1000_hw *hw)
151{
152 u32 ret_val;
153 u16 phy_saved_data;
154
155 DEBUGFUNC("e1000_phy_init_script");
156
157 if (hw->phy_init_script) {
158 msleep(20);
159
160 /* Save off the current value of register 0x2F5B to be restored at
161 * the end of this routine. */
162 ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
163
164 /* Disabled the PHY transmitter */
165 e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
166 msleep(20);
167
168 e1000_write_phy_reg(hw, 0x0000, 0x0140);
169 msleep(5);
170
171 switch (hw->mac_type) {
172 case e1000_82541:
173 case e1000_82547:
174 e1000_write_phy_reg(hw, 0x1F95, 0x0001);
175 e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
176 e1000_write_phy_reg(hw, 0x1F79, 0x0018);
177 e1000_write_phy_reg(hw, 0x1F30, 0x1600);
178 e1000_write_phy_reg(hw, 0x1F31, 0x0014);
179 e1000_write_phy_reg(hw, 0x1F32, 0x161C);
180 e1000_write_phy_reg(hw, 0x1F94, 0x0003);
181 e1000_write_phy_reg(hw, 0x1F96, 0x003F);
182 e1000_write_phy_reg(hw, 0x2010, 0x0008);
183 break;
307 184
308 fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | 185 case e1000_82541_rev_2:
309 (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | 186 case e1000_82547_rev_2:
310 (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK); 187 e1000_write_phy_reg(hw, 0x1F73, 0x0099);
188 break;
189 default:
190 break;
191 }
311 192
312 e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_CONTROL, fused); 193 e1000_write_phy_reg(hw, 0x0000, 0x3300);
313 e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_BYPASS, 194 msleep(20);
314 IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); 195
315 } 196 /* Now enable the transmitter */
316 } 197 e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
317 } 198
199 if (hw->mac_type == e1000_82547) {
200 u16 fused, fine, coarse;
201
202 /* Move to analog registers page */
203 e1000_read_phy_reg(hw,
204 IGP01E1000_ANALOG_SPARE_FUSE_STATUS,
205 &fused);
206
207 if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
208 e1000_read_phy_reg(hw,
209 IGP01E1000_ANALOG_FUSE_STATUS,
210 &fused);
211
212 fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
213 coarse =
214 fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
215
216 if (coarse >
217 IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
218 coarse -=
219 IGP01E1000_ANALOG_FUSE_COARSE_10;
220 fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
221 } else if (coarse ==
222 IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
223 fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
224
225 fused =
226 (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
227 (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
228 (coarse &
229 IGP01E1000_ANALOG_FUSE_COARSE_MASK);
230
231 e1000_write_phy_reg(hw,
232 IGP01E1000_ANALOG_FUSE_CONTROL,
233 fused);
234 e1000_write_phy_reg(hw,
235 IGP01E1000_ANALOG_FUSE_BYPASS,
236 IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
237 }
238 }
239 }
318} 240}
319 241
320/****************************************************************************** 242/**
321 * Set the mac type member in the hw struct. 243 * e1000_set_mac_type - Set the mac type member in the hw struct.
322 * 244 * @hw: Struct containing variables accessed by shared code
323 * hw - Struct containing variables accessed by shared code 245 */
324 *****************************************************************************/
325s32 e1000_set_mac_type(struct e1000_hw *hw) 246s32 e1000_set_mac_type(struct e1000_hw *hw)
326{ 247{
327 DEBUGFUNC("e1000_set_mac_type"); 248 DEBUGFUNC("e1000_set_mac_type");
@@ -397,61 +318,12 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
397 case E1000_DEV_ID_82547GI: 318 case E1000_DEV_ID_82547GI:
398 hw->mac_type = e1000_82547_rev_2; 319 hw->mac_type = e1000_82547_rev_2;
399 break; 320 break;
400 case E1000_DEV_ID_82571EB_COPPER:
401 case E1000_DEV_ID_82571EB_FIBER:
402 case E1000_DEV_ID_82571EB_SERDES:
403 case E1000_DEV_ID_82571EB_SERDES_DUAL:
404 case E1000_DEV_ID_82571EB_SERDES_QUAD:
405 case E1000_DEV_ID_82571EB_QUAD_COPPER:
406 case E1000_DEV_ID_82571PT_QUAD_COPPER:
407 case E1000_DEV_ID_82571EB_QUAD_FIBER:
408 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
409 hw->mac_type = e1000_82571;
410 break;
411 case E1000_DEV_ID_82572EI_COPPER:
412 case E1000_DEV_ID_82572EI_FIBER:
413 case E1000_DEV_ID_82572EI_SERDES:
414 case E1000_DEV_ID_82572EI:
415 hw->mac_type = e1000_82572;
416 break;
417 case E1000_DEV_ID_82573E:
418 case E1000_DEV_ID_82573E_IAMT:
419 case E1000_DEV_ID_82573L:
420 hw->mac_type = e1000_82573;
421 break;
422 case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
423 case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
424 case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
425 case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
426 hw->mac_type = e1000_80003es2lan;
427 break;
428 case E1000_DEV_ID_ICH8_IGP_M_AMT:
429 case E1000_DEV_ID_ICH8_IGP_AMT:
430 case E1000_DEV_ID_ICH8_IGP_C:
431 case E1000_DEV_ID_ICH8_IFE:
432 case E1000_DEV_ID_ICH8_IFE_GT:
433 case E1000_DEV_ID_ICH8_IFE_G:
434 case E1000_DEV_ID_ICH8_IGP_M:
435 hw->mac_type = e1000_ich8lan;
436 break;
437 default: 321 default:
438 /* Should never have loaded on this device */ 322 /* Should never have loaded on this device */
439 return -E1000_ERR_MAC_TYPE; 323 return -E1000_ERR_MAC_TYPE;
440 } 324 }
441 325
442 switch (hw->mac_type) { 326 switch (hw->mac_type) {
443 case e1000_ich8lan:
444 hw->swfwhw_semaphore_present = true;
445 hw->asf_firmware_present = true;
446 break;
447 case e1000_80003es2lan:
448 hw->swfw_sync_present = true;
449 /* fall through */
450 case e1000_82571:
451 case e1000_82572:
452 case e1000_82573:
453 hw->eeprom_semaphore_present = true;
454 /* fall through */
455 case e1000_82541: 327 case e1000_82541:
456 case e1000_82547: 328 case e1000_82547:
457 case e1000_82541_rev_2: 329 case e1000_82541_rev_2:
@@ -468,6058 +340,4500 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
468 if (hw->mac_type == e1000_82543) 340 if (hw->mac_type == e1000_82543)
469 hw->bad_tx_carr_stats_fd = true; 341 hw->bad_tx_carr_stats_fd = true;
470 342
471 /* capable of receiving management packets to the host */
472 if (hw->mac_type >= e1000_82571)
473 hw->has_manc2h = true;
474
475 /* In rare occasions, ESB2 systems would end up started without
476 * the RX unit being turned on.
477 */
478 if (hw->mac_type == e1000_80003es2lan)
479 hw->rx_needs_kicking = true;
480
481 if (hw->mac_type > e1000_82544) 343 if (hw->mac_type > e1000_82544)
482 hw->has_smbus = true; 344 hw->has_smbus = true;
483 345
484 return E1000_SUCCESS; 346 return E1000_SUCCESS;
485} 347}
486 348
487/***************************************************************************** 349/**
488 * Set media type and TBI compatibility. 350 * e1000_set_media_type - Set media type and TBI compatibility.
489 * 351 * @hw: Struct containing variables accessed by shared code
490 * hw - Struct containing variables accessed by shared code 352 */
491 * **************************************************************************/
492void e1000_set_media_type(struct e1000_hw *hw) 353void e1000_set_media_type(struct e1000_hw *hw)
493{ 354{
494 u32 status; 355 u32 status;
495 356
496 DEBUGFUNC("e1000_set_media_type"); 357 DEBUGFUNC("e1000_set_media_type");
497 358
498 if (hw->mac_type != e1000_82543) { 359 if (hw->mac_type != e1000_82543) {
499 /* tbi_compatibility is only valid on 82543 */ 360 /* tbi_compatibility is only valid on 82543 */
500 hw->tbi_compatibility_en = false; 361 hw->tbi_compatibility_en = false;
501 } 362 }
502 363
503 switch (hw->device_id) { 364 switch (hw->device_id) {
504 case E1000_DEV_ID_82545GM_SERDES: 365 case E1000_DEV_ID_82545GM_SERDES:
505 case E1000_DEV_ID_82546GB_SERDES: 366 case E1000_DEV_ID_82546GB_SERDES:
506 case E1000_DEV_ID_82571EB_SERDES: 367 hw->media_type = e1000_media_type_internal_serdes;
507 case E1000_DEV_ID_82571EB_SERDES_DUAL: 368 break;
508 case E1000_DEV_ID_82571EB_SERDES_QUAD: 369 default:
509 case E1000_DEV_ID_82572EI_SERDES: 370 switch (hw->mac_type) {
510 case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: 371 case e1000_82542_rev2_0:
511 hw->media_type = e1000_media_type_internal_serdes; 372 case e1000_82542_rev2_1:
512 break; 373 hw->media_type = e1000_media_type_fiber;
513 default: 374 break;
514 switch (hw->mac_type) { 375 default:
515 case e1000_82542_rev2_0: 376 status = er32(STATUS);
516 case e1000_82542_rev2_1: 377 if (status & E1000_STATUS_TBIMODE) {
517 hw->media_type = e1000_media_type_fiber; 378 hw->media_type = e1000_media_type_fiber;
518 break; 379 /* tbi_compatibility not valid on fiber */
519 case e1000_ich8lan: 380 hw->tbi_compatibility_en = false;
520 case e1000_82573: 381 } else {
521 /* The STATUS_TBIMODE bit is reserved or reused for the this 382 hw->media_type = e1000_media_type_copper;
522 * device. 383 }
523 */ 384 break;
524 hw->media_type = e1000_media_type_copper; 385 }
525 break; 386 }
526 default:
527 status = er32(STATUS);
528 if (status & E1000_STATUS_TBIMODE) {
529 hw->media_type = e1000_media_type_fiber;
530 /* tbi_compatibility not valid on fiber */
531 hw->tbi_compatibility_en = false;
532 } else {
533 hw->media_type = e1000_media_type_copper;
534 }
535 break;
536 }
537 }
538} 387}
539 388
540/****************************************************************************** 389/**
541 * Reset the transmit and receive units; mask and clear all interrupts. 390 * e1000_reset_hw: reset the hardware completely
391 * @hw: Struct containing variables accessed by shared code
542 * 392 *
543 * hw - Struct containing variables accessed by shared code 393 * Reset the transmit and receive units; mask and clear all interrupts.
544 *****************************************************************************/ 394 */
545s32 e1000_reset_hw(struct e1000_hw *hw) 395s32 e1000_reset_hw(struct e1000_hw *hw)
546{ 396{
547 u32 ctrl; 397 u32 ctrl;
548 u32 ctrl_ext; 398 u32 ctrl_ext;
549 u32 icr; 399 u32 icr;
550 u32 manc; 400 u32 manc;
551 u32 led_ctrl; 401 u32 led_ctrl;
552 u32 timeout; 402 s32 ret_val;
553 u32 extcnf_ctrl;
554 s32 ret_val;
555
556 DEBUGFUNC("e1000_reset_hw");
557
558 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
559 if (hw->mac_type == e1000_82542_rev2_0) {
560 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
561 e1000_pci_clear_mwi(hw);
562 }
563
564 if (hw->bus_type == e1000_bus_type_pci_express) {
565 /* Prevent the PCI-E bus from sticking if there is no TLP connection
566 * on the last TLP read/write transaction when MAC is reset.
567 */
568 if (e1000_disable_pciex_master(hw) != E1000_SUCCESS) {
569 DEBUGOUT("PCI-E Master disable polling has failed.\n");
570 }
571 }
572
573 /* Clear interrupt mask to stop board from generating interrupts */
574 DEBUGOUT("Masking off all interrupts\n");
575 ew32(IMC, 0xffffffff);
576
577 /* Disable the Transmit and Receive units. Then delay to allow
578 * any pending transactions to complete before we hit the MAC with
579 * the global reset.
580 */
581 ew32(RCTL, 0);
582 ew32(TCTL, E1000_TCTL_PSP);
583 E1000_WRITE_FLUSH();
584
585 /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
586 hw->tbi_compatibility_on = false;
587
588 /* Delay to allow any outstanding PCI transactions to complete before
589 * resetting the device
590 */
591 msleep(10);
592
593 ctrl = er32(CTRL);
594
595 /* Must reset the PHY before resetting the MAC */
596 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
597 ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
598 msleep(5);
599 }
600
601 /* Must acquire the MDIO ownership before MAC reset.
602 * Ownership defaults to firmware after a reset. */
603 if (hw->mac_type == e1000_82573) {
604 timeout = 10;
605
606 extcnf_ctrl = er32(EXTCNF_CTRL);
607 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
608
609 do {
610 ew32(EXTCNF_CTRL, extcnf_ctrl);
611 extcnf_ctrl = er32(EXTCNF_CTRL);
612
613 if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
614 break;
615 else
616 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
617
618 msleep(2);
619 timeout--;
620 } while (timeout);
621 }
622
623 /* Workaround for ICH8 bit corruption issue in FIFO memory */
624 if (hw->mac_type == e1000_ich8lan) {
625 /* Set Tx and Rx buffer allocation to 8k apiece. */
626 ew32(PBA, E1000_PBA_8K);
627 /* Set Packet Buffer Size to 16k. */
628 ew32(PBS, E1000_PBS_16K);
629 }
630
631 /* Issue a global reset to the MAC. This will reset the chip's
632 * transmit, receive, DMA, and link units. It will not effect
633 * the current PCI configuration. The global reset bit is self-
634 * clearing, and should clear within a microsecond.
635 */
636 DEBUGOUT("Issuing a global reset to MAC\n");
637
638 switch (hw->mac_type) {
639 case e1000_82544:
640 case e1000_82540:
641 case e1000_82545:
642 case e1000_82546:
643 case e1000_82541:
644 case e1000_82541_rev_2:
645 /* These controllers can't ack the 64-bit write when issuing the
646 * reset, so use IO-mapping as a workaround to issue the reset */
647 E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
648 break;
649 case e1000_82545_rev_3:
650 case e1000_82546_rev_3:
651 /* Reset is performed on a shadow of the control register */
652 ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
653 break;
654 case e1000_ich8lan:
655 if (!hw->phy_reset_disable &&
656 e1000_check_phy_reset_block(hw) == E1000_SUCCESS) {
657 /* e1000_ich8lan PHY HW reset requires MAC CORE reset
658 * at the same time to make sure the interface between
659 * MAC and the external PHY is reset.
660 */
661 ctrl |= E1000_CTRL_PHY_RST;
662 }
663
664 e1000_get_software_flag(hw);
665 ew32(CTRL, (ctrl | E1000_CTRL_RST));
666 msleep(5);
667 break;
668 default:
669 ew32(CTRL, (ctrl | E1000_CTRL_RST));
670 break;
671 }
672
673 /* After MAC reset, force reload of EEPROM to restore power-on settings to
674 * device. Later controllers reload the EEPROM automatically, so just wait
675 * for reload to complete.
676 */
677 switch (hw->mac_type) {
678 case e1000_82542_rev2_0:
679 case e1000_82542_rev2_1:
680 case e1000_82543:
681 case e1000_82544:
682 /* Wait for reset to complete */
683 udelay(10);
684 ctrl_ext = er32(CTRL_EXT);
685 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
686 ew32(CTRL_EXT, ctrl_ext);
687 E1000_WRITE_FLUSH();
688 /* Wait for EEPROM reload */
689 msleep(2);
690 break;
691 case e1000_82541:
692 case e1000_82541_rev_2:
693 case e1000_82547:
694 case e1000_82547_rev_2:
695 /* Wait for EEPROM reload */
696 msleep(20);
697 break;
698 case e1000_82573:
699 if (!e1000_is_onboard_nvm_eeprom(hw)) {
700 udelay(10);
701 ctrl_ext = er32(CTRL_EXT);
702 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
703 ew32(CTRL_EXT, ctrl_ext);
704 E1000_WRITE_FLUSH();
705 }
706 /* fall through */
707 default:
708 /* Auto read done will delay 5ms or poll based on mac type */
709 ret_val = e1000_get_auto_rd_done(hw);
710 if (ret_val)
711 return ret_val;
712 break;
713 }
714
715 /* Disable HW ARPs on ASF enabled adapters */
716 if (hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) {
717 manc = er32(MANC);
718 manc &= ~(E1000_MANC_ARP_EN);
719 ew32(MANC, manc);
720 }
721
722 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
723 e1000_phy_init_script(hw);
724
725 /* Configure activity LED after PHY reset */
726 led_ctrl = er32(LEDCTL);
727 led_ctrl &= IGP_ACTIVITY_LED_MASK;
728 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
729 ew32(LEDCTL, led_ctrl);
730 }
731
732 /* Clear interrupt mask to stop board from generating interrupts */
733 DEBUGOUT("Masking off all interrupts\n");
734 ew32(IMC, 0xffffffff);
735
736 /* Clear any pending interrupt events. */
737 icr = er32(ICR);
738
739 /* If MWI was previously enabled, reenable it. */
740 if (hw->mac_type == e1000_82542_rev2_0) {
741 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
742 e1000_pci_set_mwi(hw);
743 }
744
745 if (hw->mac_type == e1000_ich8lan) {
746 u32 kab = er32(KABGTXD);
747 kab |= E1000_KABGTXD_BGSQLBIAS;
748 ew32(KABGTXD, kab);
749 }
750
751 return E1000_SUCCESS;
752}
753 403
754/****************************************************************************** 404 DEBUGFUNC("e1000_reset_hw");
755 * 405
756 * Initialize a number of hardware-dependent bits 406 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
757 * 407 if (hw->mac_type == e1000_82542_rev2_0) {
758 * hw: Struct containing variables accessed by shared code 408 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
759 * 409 e1000_pci_clear_mwi(hw);
760 * This function contains hardware limitation workarounds for PCI-E adapters 410 }
761 * 411
762 *****************************************************************************/ 412 /* Clear interrupt mask to stop board from generating interrupts */
763static void e1000_initialize_hardware_bits(struct e1000_hw *hw) 413 DEBUGOUT("Masking off all interrupts\n");
764{ 414 ew32(IMC, 0xffffffff);
765 if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) { 415
766 /* Settings common to all PCI-express silicon */ 416 /* Disable the Transmit and Receive units. Then delay to allow
767 u32 reg_ctrl, reg_ctrl_ext; 417 * any pending transactions to complete before we hit the MAC with
768 u32 reg_tarc0, reg_tarc1; 418 * the global reset.
769 u32 reg_tctl; 419 */
770 u32 reg_txdctl, reg_txdctl1; 420 ew32(RCTL, 0);
771 421 ew32(TCTL, E1000_TCTL_PSP);
772 /* link autonegotiation/sync workarounds */ 422 E1000_WRITE_FLUSH();
773 reg_tarc0 = er32(TARC0); 423
774 reg_tarc0 &= ~((1 << 30)|(1 << 29)|(1 << 28)|(1 << 27)); 424 /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
775 425 hw->tbi_compatibility_on = false;
776 /* Enable not-done TX descriptor counting */ 426
777 reg_txdctl = er32(TXDCTL); 427 /* Delay to allow any outstanding PCI transactions to complete before
778 reg_txdctl |= E1000_TXDCTL_COUNT_DESC; 428 * resetting the device
779 ew32(TXDCTL, reg_txdctl); 429 */
780 reg_txdctl1 = er32(TXDCTL1); 430 msleep(10);
781 reg_txdctl1 |= E1000_TXDCTL_COUNT_DESC; 431
782 ew32(TXDCTL1, reg_txdctl1); 432 ctrl = er32(CTRL);
783 433
784 switch (hw->mac_type) { 434 /* Must reset the PHY before resetting the MAC */
785 case e1000_82571: 435 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
786 case e1000_82572: 436 ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
787 /* Clear PHY TX compatible mode bits */ 437 msleep(5);
788 reg_tarc1 = er32(TARC1); 438 }
789 reg_tarc1 &= ~((1 << 30)|(1 << 29)); 439
790 440 /* Issue a global reset to the MAC. This will reset the chip's
791 /* link autonegotiation/sync workarounds */ 441 * transmit, receive, DMA, and link units. It will not effect
792 reg_tarc0 |= ((1 << 26)|(1 << 25)|(1 << 24)|(1 << 23)); 442 * the current PCI configuration. The global reset bit is self-
793 443 * clearing, and should clear within a microsecond.
794 /* TX ring control fixes */ 444 */
795 reg_tarc1 |= ((1 << 26)|(1 << 25)|(1 << 24)); 445 DEBUGOUT("Issuing a global reset to MAC\n");
796 446
797 /* Multiple read bit is reversed polarity */ 447 switch (hw->mac_type) {
798 reg_tctl = er32(TCTL); 448 case e1000_82544:
799 if (reg_tctl & E1000_TCTL_MULR) 449 case e1000_82540:
800 reg_tarc1 &= ~(1 << 28); 450 case e1000_82545:
801 else 451 case e1000_82546:
802 reg_tarc1 |= (1 << 28); 452 case e1000_82541:
803 453 case e1000_82541_rev_2:
804 ew32(TARC1, reg_tarc1); 454 /* These controllers can't ack the 64-bit write when issuing the
805 break; 455 * reset, so use IO-mapping as a workaround to issue the reset */
806 case e1000_82573: 456 E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
807 reg_ctrl_ext = er32(CTRL_EXT); 457 break;
808 reg_ctrl_ext &= ~(1 << 23); 458 case e1000_82545_rev_3:
809 reg_ctrl_ext |= (1 << 22); 459 case e1000_82546_rev_3:
810 460 /* Reset is performed on a shadow of the control register */
811 /* TX byte count fix */ 461 ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
812 reg_ctrl = er32(CTRL); 462 break;
813 reg_ctrl &= ~(1 << 29); 463 default:
814 464 ew32(CTRL, (ctrl | E1000_CTRL_RST));
815 ew32(CTRL_EXT, reg_ctrl_ext); 465 break;
816 ew32(CTRL, reg_ctrl); 466 }
817 break; 467
818 case e1000_80003es2lan: 468 /* After MAC reset, force reload of EEPROM to restore power-on settings to
819 /* improve small packet performace for fiber/serdes */ 469 * device. Later controllers reload the EEPROM automatically, so just wait
820 if ((hw->media_type == e1000_media_type_fiber) || 470 * for reload to complete.
821 (hw->media_type == e1000_media_type_internal_serdes)) { 471 */
822 reg_tarc0 &= ~(1 << 20); 472 switch (hw->mac_type) {
823 } 473 case e1000_82542_rev2_0:
824 474 case e1000_82542_rev2_1:
825 /* Multiple read bit is reversed polarity */ 475 case e1000_82543:
826 reg_tctl = er32(TCTL); 476 case e1000_82544:
827 reg_tarc1 = er32(TARC1); 477 /* Wait for reset to complete */
828 if (reg_tctl & E1000_TCTL_MULR) 478 udelay(10);
829 reg_tarc1 &= ~(1 << 28); 479 ctrl_ext = er32(CTRL_EXT);
830 else 480 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
831 reg_tarc1 |= (1 << 28); 481 ew32(CTRL_EXT, ctrl_ext);
832 482 E1000_WRITE_FLUSH();
833 ew32(TARC1, reg_tarc1); 483 /* Wait for EEPROM reload */
834 break; 484 msleep(2);
835 case e1000_ich8lan: 485 break;
836 /* Reduce concurrent DMA requests to 3 from 4 */ 486 case e1000_82541:
837 if ((hw->revision_id < 3) || 487 case e1000_82541_rev_2:
838 ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) && 488 case e1000_82547:
839 (hw->device_id != E1000_DEV_ID_ICH8_IGP_M))) 489 case e1000_82547_rev_2:
840 reg_tarc0 |= ((1 << 29)|(1 << 28)); 490 /* Wait for EEPROM reload */
841 491 msleep(20);
842 reg_ctrl_ext = er32(CTRL_EXT); 492 break;
843 reg_ctrl_ext |= (1 << 22); 493 default:
844 ew32(CTRL_EXT, reg_ctrl_ext); 494 /* Auto read done will delay 5ms or poll based on mac type */
845 495 ret_val = e1000_get_auto_rd_done(hw);
846 /* workaround TX hang with TSO=on */ 496 if (ret_val)
847 reg_tarc0 |= ((1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)); 497 return ret_val;
848 498 break;
849 /* Multiple read bit is reversed polarity */ 499 }
850 reg_tctl = er32(TCTL); 500
851 reg_tarc1 = er32(TARC1); 501 /* Disable HW ARPs on ASF enabled adapters */
852 if (reg_tctl & E1000_TCTL_MULR) 502 if (hw->mac_type >= e1000_82540) {
853 reg_tarc1 &= ~(1 << 28); 503 manc = er32(MANC);
854 else 504 manc &= ~(E1000_MANC_ARP_EN);
855 reg_tarc1 |= (1 << 28); 505 ew32(MANC, manc);
856 506 }
857 /* workaround TX hang with TSO=on */ 507
858 reg_tarc1 |= ((1 << 30)|(1 << 26)|(1 << 24)); 508 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
859 509 e1000_phy_init_script(hw);
860 ew32(TARC1, reg_tarc1); 510
861 break; 511 /* Configure activity LED after PHY reset */
862 default: 512 led_ctrl = er32(LEDCTL);
863 break; 513 led_ctrl &= IGP_ACTIVITY_LED_MASK;
864 } 514 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
865 515 ew32(LEDCTL, led_ctrl);
866 ew32(TARC0, reg_tarc0); 516 }
867 } 517
518 /* Clear interrupt mask to stop board from generating interrupts */
519 DEBUGOUT("Masking off all interrupts\n");
520 ew32(IMC, 0xffffffff);
521
522 /* Clear any pending interrupt events. */
523 icr = er32(ICR);
524
525 /* If MWI was previously enabled, reenable it. */
526 if (hw->mac_type == e1000_82542_rev2_0) {
527 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
528 e1000_pci_set_mwi(hw);
529 }
530
531 return E1000_SUCCESS;
868} 532}
869 533
870/****************************************************************************** 534/**
871 * Performs basic configuration of the adapter. 535 * e1000_init_hw: Performs basic configuration of the adapter.
872 * 536 * @hw: Struct containing variables accessed by shared code
873 * hw - Struct containing variables accessed by shared code
874 * 537 *
875 * Assumes that the controller has previously been reset and is in a 538 * Assumes that the controller has previously been reset and is in a
876 * post-reset uninitialized state. Initializes the receive address registers, 539 * post-reset uninitialized state. Initializes the receive address registers,
877 * multicast table, and VLAN filter table. Calls routines to setup link 540 * multicast table, and VLAN filter table. Calls routines to setup link
878 * configuration and flow control settings. Clears all on-chip counters. Leaves 541 * configuration and flow control settings. Clears all on-chip counters. Leaves
879 * the transmit and receive units disabled and uninitialized. 542 * the transmit and receive units disabled and uninitialized.
880 *****************************************************************************/ 543 */
881s32 e1000_init_hw(struct e1000_hw *hw) 544s32 e1000_init_hw(struct e1000_hw *hw)
882{ 545{
883 u32 ctrl; 546 u32 ctrl;
884 u32 i; 547 u32 i;
885 s32 ret_val; 548 s32 ret_val;
886 u32 mta_size; 549 u32 mta_size;
887 u32 reg_data; 550 u32 ctrl_ext;
888 u32 ctrl_ext; 551
889 552 DEBUGFUNC("e1000_init_hw");
890 DEBUGFUNC("e1000_init_hw"); 553
891 554 /* Initialize Identification LED */
892 /* force full DMA clock frequency for 10/100 on ICH8 A0-B0 */ 555 ret_val = e1000_id_led_init(hw);
893 if ((hw->mac_type == e1000_ich8lan) && 556 if (ret_val) {
894 ((hw->revision_id < 3) || 557 DEBUGOUT("Error Initializing Identification LED\n");
895 ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) && 558 return ret_val;
896 (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))) { 559 }
897 reg_data = er32(STATUS); 560
898 reg_data &= ~0x80000000; 561 /* Set the media type and TBI compatibility */
899 ew32(STATUS, reg_data); 562 e1000_set_media_type(hw);
900 } 563
901 564 /* Disabling VLAN filtering. */
902 /* Initialize Identification LED */ 565 DEBUGOUT("Initializing the IEEE VLAN\n");
903 ret_val = e1000_id_led_init(hw); 566 if (hw->mac_type < e1000_82545_rev_3)
904 if (ret_val) { 567 ew32(VET, 0);
905 DEBUGOUT("Error Initializing Identification LED\n"); 568 e1000_clear_vfta(hw);
906 return ret_val; 569
907 } 570 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
908 571 if (hw->mac_type == e1000_82542_rev2_0) {
909 /* Set the media type and TBI compatibility */ 572 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
910 e1000_set_media_type(hw); 573 e1000_pci_clear_mwi(hw);
911 574 ew32(RCTL, E1000_RCTL_RST);
912 /* Must be called after e1000_set_media_type because media_type is used */ 575 E1000_WRITE_FLUSH();
913 e1000_initialize_hardware_bits(hw); 576 msleep(5);
914 577 }
915 /* Disabling VLAN filtering. */ 578
916 DEBUGOUT("Initializing the IEEE VLAN\n"); 579 /* Setup the receive address. This involves initializing all of the Receive
917 /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */ 580 * Address Registers (RARs 0 - 15).
918 if (hw->mac_type != e1000_ich8lan) { 581 */
919 if (hw->mac_type < e1000_82545_rev_3) 582 e1000_init_rx_addrs(hw);
920 ew32(VET, 0); 583
921 e1000_clear_vfta(hw); 584 /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
922 } 585 if (hw->mac_type == e1000_82542_rev2_0) {
923 586 ew32(RCTL, 0);
924 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ 587 E1000_WRITE_FLUSH();
925 if (hw->mac_type == e1000_82542_rev2_0) { 588 msleep(1);
926 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); 589 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
927 e1000_pci_clear_mwi(hw); 590 e1000_pci_set_mwi(hw);
928 ew32(RCTL, E1000_RCTL_RST); 591 }
929 E1000_WRITE_FLUSH(); 592
930 msleep(5); 593 /* Zero out the Multicast HASH table */
931 } 594 DEBUGOUT("Zeroing the MTA\n");
932 595 mta_size = E1000_MC_TBL_SIZE;
933 /* Setup the receive address. This involves initializing all of the Receive 596 for (i = 0; i < mta_size; i++) {
934 * Address Registers (RARs 0 - 15). 597 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
935 */ 598 /* use write flush to prevent Memory Write Block (MWB) from
936 e1000_init_rx_addrs(hw); 599 * occurring when accessing our register space */
937 600 E1000_WRITE_FLUSH();
938 /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ 601 }
939 if (hw->mac_type == e1000_82542_rev2_0) { 602
940 ew32(RCTL, 0); 603 /* Set the PCI priority bit correctly in the CTRL register. This
941 E1000_WRITE_FLUSH(); 604 * determines if the adapter gives priority to receives, or if it
942 msleep(1); 605 * gives equal priority to transmits and receives. Valid only on
943 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) 606 * 82542 and 82543 silicon.
944 e1000_pci_set_mwi(hw); 607 */
945 } 608 if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
946 609 ctrl = er32(CTRL);
947 /* Zero out the Multicast HASH table */ 610 ew32(CTRL, ctrl | E1000_CTRL_PRIOR);
948 DEBUGOUT("Zeroing the MTA\n"); 611 }
949 mta_size = E1000_MC_TBL_SIZE; 612
950 if (hw->mac_type == e1000_ich8lan) 613 switch (hw->mac_type) {
951 mta_size = E1000_MC_TBL_SIZE_ICH8LAN; 614 case e1000_82545_rev_3:
952 for (i = 0; i < mta_size; i++) { 615 case e1000_82546_rev_3:
953 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 616 break;
954 /* use write flush to prevent Memory Write Block (MWB) from 617 default:
955 * occuring when accessing our register space */ 618 /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
956 E1000_WRITE_FLUSH(); 619 if (hw->bus_type == e1000_bus_type_pcix
957 } 620 && e1000_pcix_get_mmrbc(hw) > 2048)
958 621 e1000_pcix_set_mmrbc(hw, 2048);
959 /* Set the PCI priority bit correctly in the CTRL register. This 622 break;
960 * determines if the adapter gives priority to receives, or if it 623 }
961 * gives equal priority to transmits and receives. Valid only on 624
962 * 82542 and 82543 silicon. 625 /* Call a subroutine to configure the link and setup flow control. */
963 */ 626 ret_val = e1000_setup_link(hw);
964 if (hw->dma_fairness && hw->mac_type <= e1000_82543) { 627
965 ctrl = er32(CTRL); 628 /* Set the transmit descriptor write-back policy */
966 ew32(CTRL, ctrl | E1000_CTRL_PRIOR); 629 if (hw->mac_type > e1000_82544) {
967 } 630 ctrl = er32(TXDCTL);
968 631 ctrl =
969 switch (hw->mac_type) { 632 (ctrl & ~E1000_TXDCTL_WTHRESH) |
970 case e1000_82545_rev_3: 633 E1000_TXDCTL_FULL_TX_DESC_WB;
971 case e1000_82546_rev_3: 634 ew32(TXDCTL, ctrl);
972 break; 635 }
973 default: 636
974 /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */ 637 /* Clear all of the statistics registers (clear on read). It is
975 if (hw->bus_type == e1000_bus_type_pcix && e1000_pcix_get_mmrbc(hw) > 2048) 638 * important that we do this after we have tried to establish link
976 e1000_pcix_set_mmrbc(hw, 2048); 639 * because the symbol error count will increment wildly if there
977 break; 640 * is no link.
978 } 641 */
979 642 e1000_clear_hw_cntrs(hw);
980 /* More time needed for PHY to initialize */ 643
981 if (hw->mac_type == e1000_ich8lan) 644 if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
982 msleep(15); 645 hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
983 646 ctrl_ext = er32(CTRL_EXT);
984 /* Call a subroutine to configure the link and setup flow control. */ 647 /* Relaxed ordering must be disabled to avoid a parity
985 ret_val = e1000_setup_link(hw); 648 * error crash in a PCI slot. */
986 649 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
987 /* Set the transmit descriptor write-back policy */ 650 ew32(CTRL_EXT, ctrl_ext);
988 if (hw->mac_type > e1000_82544) { 651 }
989 ctrl = er32(TXDCTL); 652
990 ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; 653 return ret_val;
991 ew32(TXDCTL, ctrl);
992 }
993
994 if (hw->mac_type == e1000_82573) {
995 e1000_enable_tx_pkt_filtering(hw);
996 }
997
998 switch (hw->mac_type) {
999 default:
1000 break;
1001 case e1000_80003es2lan:
1002 /* Enable retransmit on late collisions */
1003 reg_data = er32(TCTL);
1004 reg_data |= E1000_TCTL_RTLC;
1005 ew32(TCTL, reg_data);
1006
1007 /* Configure Gigabit Carry Extend Padding */
1008 reg_data = er32(TCTL_EXT);
1009 reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
1010 reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
1011 ew32(TCTL_EXT, reg_data);
1012
1013 /* Configure Transmit Inter-Packet Gap */
1014 reg_data = er32(TIPG);
1015 reg_data &= ~E1000_TIPG_IPGT_MASK;
1016 reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
1017 ew32(TIPG, reg_data);
1018
1019 reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001);
1020 reg_data &= ~0x00100000;
1021 E1000_WRITE_REG_ARRAY(hw, FFLT, 0x0001, reg_data);
1022 /* Fall through */
1023 case e1000_82571:
1024 case e1000_82572:
1025 case e1000_ich8lan:
1026 ctrl = er32(TXDCTL1);
1027 ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
1028 ew32(TXDCTL1, ctrl);
1029 break;
1030 }
1031
1032
1033 if (hw->mac_type == e1000_82573) {
1034 u32 gcr = er32(GCR);
1035 gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
1036 ew32(GCR, gcr);
1037 }
1038
1039 /* Clear all of the statistics registers (clear on read). It is
1040 * important that we do this after we have tried to establish link
1041 * because the symbol error count will increment wildly if there
1042 * is no link.
1043 */
1044 e1000_clear_hw_cntrs(hw);
1045
1046 /* ICH8 No-snoop bits are opposite polarity.
1047 * Set to snoop by default after reset. */
1048 if (hw->mac_type == e1000_ich8lan)
1049 e1000_set_pci_ex_no_snoop(hw, PCI_EX_82566_SNOOP_ALL);
1050
1051 if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
1052 hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
1053 ctrl_ext = er32(CTRL_EXT);
1054 /* Relaxed ordering must be disabled to avoid a parity
1055 * error crash in a PCI slot. */
1056 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
1057 ew32(CTRL_EXT, ctrl_ext);
1058 }
1059
1060 return ret_val;
1061} 654}
1062 655
1063/****************************************************************************** 656/**
1064 * Adjust SERDES output amplitude based on EEPROM setting. 657 * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting.
1065 * 658 * @hw: Struct containing variables accessed by shared code.
1066 * hw - Struct containing variables accessed by shared code. 659 */
1067 *****************************************************************************/
1068static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) 660static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
1069{ 661{
1070 u16 eeprom_data; 662 u16 eeprom_data;
1071 s32 ret_val; 663 s32 ret_val;
1072 664
1073 DEBUGFUNC("e1000_adjust_serdes_amplitude"); 665 DEBUGFUNC("e1000_adjust_serdes_amplitude");
1074 666
1075 if (hw->media_type != e1000_media_type_internal_serdes) 667 if (hw->media_type != e1000_media_type_internal_serdes)
1076 return E1000_SUCCESS; 668 return E1000_SUCCESS;
1077 669
1078 switch (hw->mac_type) { 670 switch (hw->mac_type) {
1079 case e1000_82545_rev_3: 671 case e1000_82545_rev_3:
1080 case e1000_82546_rev_3: 672 case e1000_82546_rev_3:
1081 break; 673 break;
1082 default: 674 default:
1083 return E1000_SUCCESS; 675 return E1000_SUCCESS;
1084 } 676 }
1085 677
1086 ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, &eeprom_data); 678 ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1,
1087 if (ret_val) { 679 &eeprom_data);
1088 return ret_val; 680 if (ret_val) {
1089 } 681 return ret_val;
1090 682 }
1091 if (eeprom_data != EEPROM_RESERVED_WORD) { 683
1092 /* Adjust SERDES output amplitude only. */ 684 if (eeprom_data != EEPROM_RESERVED_WORD) {
1093 eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; 685 /* Adjust SERDES output amplitude only. */
1094 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); 686 eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
1095 if (ret_val) 687 ret_val =
1096 return ret_val; 688 e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
1097 } 689 if (ret_val)
1098 690 return ret_val;
1099 return E1000_SUCCESS; 691 }
692
693 return E1000_SUCCESS;
1100} 694}
1101 695
1102/****************************************************************************** 696/**
1103 * Configures flow control and link settings. 697 * e1000_setup_link - Configures flow control and link settings.
1104 * 698 * @hw: Struct containing variables accessed by shared code
1105 * hw - Struct containing variables accessed by shared code
1106 * 699 *
1107 * Determines which flow control settings to use. Calls the apropriate media- 700 * Determines which flow control settings to use. Calls the appropriate media-
1108 * specific link configuration function. Configures the flow control settings. 701 * specific link configuration function. Configures the flow control settings.
1109 * Assuming the adapter has a valid link partner, a valid link should be 702 * Assuming the adapter has a valid link partner, a valid link should be
1110 * established. Assumes the hardware has previously been reset and the 703 * established. Assumes the hardware has previously been reset and the
1111 * transmitter and receiver are not enabled. 704 * transmitter and receiver are not enabled.
1112 *****************************************************************************/ 705 */
1113s32 e1000_setup_link(struct e1000_hw *hw) 706s32 e1000_setup_link(struct e1000_hw *hw)
1114{ 707{
1115 u32 ctrl_ext; 708 u32 ctrl_ext;
1116 s32 ret_val; 709 s32 ret_val;
1117 u16 eeprom_data; 710 u16 eeprom_data;
1118 711
1119 DEBUGFUNC("e1000_setup_link"); 712 DEBUGFUNC("e1000_setup_link");
1120 713
1121 /* In the case of the phy reset being blocked, we already have a link. 714 /* Read and store word 0x0F of the EEPROM. This word contains bits
1122 * We do not have to set it up again. */ 715 * that determine the hardware's default PAUSE (flow control) mode,
1123 if (e1000_check_phy_reset_block(hw)) 716 * a bit that determines whether the HW defaults to enabling or
1124 return E1000_SUCCESS; 717 * disabling auto-negotiation, and the direction of the
1125 718 * SW defined pins. If there is no SW over-ride of the flow
1126 /* Read and store word 0x0F of the EEPROM. This word contains bits 719 * control setting, then the variable hw->fc will
1127 * that determine the hardware's default PAUSE (flow control) mode, 720 * be initialized based on a value in the EEPROM.
1128 * a bit that determines whether the HW defaults to enabling or 721 */
1129 * disabling auto-negotiation, and the direction of the 722 if (hw->fc == E1000_FC_DEFAULT) {
1130 * SW defined pins. If there is no SW over-ride of the flow 723 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
1131 * control setting, then the variable hw->fc will 724 1, &eeprom_data);
1132 * be initialized based on a value in the EEPROM. 725 if (ret_val) {
1133 */ 726 DEBUGOUT("EEPROM Read Error\n");
1134 if (hw->fc == E1000_FC_DEFAULT) { 727 return -E1000_ERR_EEPROM;
1135 switch (hw->mac_type) { 728 }
1136 case e1000_ich8lan: 729 if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
1137 case e1000_82573: 730 hw->fc = E1000_FC_NONE;
1138 hw->fc = E1000_FC_FULL; 731 else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) ==
1139 break; 732 EEPROM_WORD0F_ASM_DIR)
1140 default: 733 hw->fc = E1000_FC_TX_PAUSE;
1141 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 734 else
1142 1, &eeprom_data); 735 hw->fc = E1000_FC_FULL;
1143 if (ret_val) { 736 }
1144 DEBUGOUT("EEPROM Read Error\n"); 737
1145 return -E1000_ERR_EEPROM; 738 /* We want to save off the original Flow Control configuration just
1146 } 739 * in case we get disconnected and then reconnected into a different
1147 if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) 740 * hub or switch with different Flow Control capabilities.
1148 hw->fc = E1000_FC_NONE; 741 */
1149 else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 742 if (hw->mac_type == e1000_82542_rev2_0)
1150 EEPROM_WORD0F_ASM_DIR) 743 hw->fc &= (~E1000_FC_TX_PAUSE);
1151 hw->fc = E1000_FC_TX_PAUSE; 744
1152 else 745 if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
1153 hw->fc = E1000_FC_FULL; 746 hw->fc &= (~E1000_FC_RX_PAUSE);
1154 break; 747
1155 } 748 hw->original_fc = hw->fc;
1156 } 749
1157 750 DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc);
1158 /* We want to save off the original Flow Control configuration just 751
1159 * in case we get disconnected and then reconnected into a different 752 /* Take the 4 bits from EEPROM word 0x0F that determine the initial
1160 * hub or switch with different Flow Control capabilities. 753 * polarity value for the SW controlled pins, and setup the
1161 */ 754 * Extended Device Control reg with that info.
1162 if (hw->mac_type == e1000_82542_rev2_0) 755 * This is needed because one of the SW controlled pins is used for
1163 hw->fc &= (~E1000_FC_TX_PAUSE); 756 * signal detection. So this should be done before e1000_setup_pcs_link()
1164 757 * or e1000_phy_setup() is called.
1165 if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) 758 */
1166 hw->fc &= (~E1000_FC_RX_PAUSE); 759 if (hw->mac_type == e1000_82543) {
1167 760 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
1168 hw->original_fc = hw->fc; 761 1, &eeprom_data);
1169 762 if (ret_val) {
1170 DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc); 763 DEBUGOUT("EEPROM Read Error\n");
1171 764 return -E1000_ERR_EEPROM;
1172 /* Take the 4 bits from EEPROM word 0x0F that determine the initial 765 }
1173 * polarity value for the SW controlled pins, and setup the 766 ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
1174 * Extended Device Control reg with that info. 767 SWDPIO__EXT_SHIFT);
1175 * This is needed because one of the SW controlled pins is used for 768 ew32(CTRL_EXT, ctrl_ext);
1176 * signal detection. So this should be done before e1000_setup_pcs_link() 769 }
1177 * or e1000_phy_setup() is called. 770
1178 */ 771 /* Call the necessary subroutine to configure the link. */
1179 if (hw->mac_type == e1000_82543) { 772 ret_val = (hw->media_type == e1000_media_type_copper) ?
1180 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 773 e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw);
1181 1, &eeprom_data); 774
1182 if (ret_val) { 775 /* Initialize the flow control address, type, and PAUSE timer
1183 DEBUGOUT("EEPROM Read Error\n"); 776 * registers to their default values. This is done even if flow
1184 return -E1000_ERR_EEPROM; 777 * control is disabled, because it does not hurt anything to
1185 } 778 * initialize these registers.
1186 ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << 779 */
1187 SWDPIO__EXT_SHIFT); 780 DEBUGOUT
1188 ew32(CTRL_EXT, ctrl_ext); 781 ("Initializing the Flow Control address, type and timer regs\n");
1189 } 782
1190 783 ew32(FCT, FLOW_CONTROL_TYPE);
1191 /* Call the necessary subroutine to configure the link. */ 784 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
1192 ret_val = (hw->media_type == e1000_media_type_copper) ? 785 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
1193 e1000_setup_copper_link(hw) : 786
1194 e1000_setup_fiber_serdes_link(hw); 787 ew32(FCTTV, hw->fc_pause_time);
1195 788
1196 /* Initialize the flow control address, type, and PAUSE timer 789 /* Set the flow control receive threshold registers. Normally,
1197 * registers to their default values. This is done even if flow 790 * these registers will be set to a default threshold that may be
1198 * control is disabled, because it does not hurt anything to 791 * adjusted later by the driver's runtime code. However, if the
1199 * initialize these registers. 792 * ability to transmit pause frames in not enabled, then these
1200 */ 793 * registers will be set to 0.
1201 DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); 794 */
1202 795 if (!(hw->fc & E1000_FC_TX_PAUSE)) {
1203 /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */ 796 ew32(FCRTL, 0);
1204 if (hw->mac_type != e1000_ich8lan) { 797 ew32(FCRTH, 0);
1205 ew32(FCT, FLOW_CONTROL_TYPE); 798 } else {
1206 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); 799 /* We need to set up the Receive Threshold high and low water marks
1207 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); 800 * as well as (optionally) enabling the transmission of XON frames.
1208 } 801 */
1209 802 if (hw->fc_send_xon) {
1210 ew32(FCTTV, hw->fc_pause_time); 803 ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
1211 804 ew32(FCRTH, hw->fc_high_water);
1212 /* Set the flow control receive threshold registers. Normally, 805 } else {
1213 * these registers will be set to a default threshold that may be 806 ew32(FCRTL, hw->fc_low_water);
1214 * adjusted later by the driver's runtime code. However, if the 807 ew32(FCRTH, hw->fc_high_water);
1215 * ability to transmit pause frames in not enabled, then these 808 }
1216 * registers will be set to 0. 809 }
1217 */ 810 return ret_val;
1218 if (!(hw->fc & E1000_FC_TX_PAUSE)) {
1219 ew32(FCRTL, 0);
1220 ew32(FCRTH, 0);
1221 } else {
1222 /* We need to set up the Receive Threshold high and low water marks
1223 * as well as (optionally) enabling the transmission of XON frames.
1224 */
1225 if (hw->fc_send_xon) {
1226 ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
1227 ew32(FCRTH, hw->fc_high_water);
1228 } else {
1229 ew32(FCRTL, hw->fc_low_water);
1230 ew32(FCRTH, hw->fc_high_water);
1231 }
1232 }
1233 return ret_val;
1234} 811}
1235 812
1236/****************************************************************************** 813/**
1237 * Sets up link for a fiber based or serdes based adapter 814 * e1000_setup_fiber_serdes_link - prepare fiber or serdes link
1238 * 815 * @hw: Struct containing variables accessed by shared code
1239 * hw - Struct containing variables accessed by shared code
1240 * 816 *
1241 * Manipulates Physical Coding Sublayer functions in order to configure 817 * Manipulates Physical Coding Sublayer functions in order to configure
1242 * link. Assumes the hardware has been previously reset and the transmitter 818 * link. Assumes the hardware has been previously reset and the transmitter
1243 * and receiver are not enabled. 819 * and receiver are not enabled.
1244 *****************************************************************************/ 820 */
1245static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) 821static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
1246{ 822{
1247 u32 ctrl; 823 u32 ctrl;
1248 u32 status; 824 u32 status;
1249 u32 txcw = 0; 825 u32 txcw = 0;
1250 u32 i; 826 u32 i;
1251 u32 signal = 0; 827 u32 signal = 0;
1252 s32 ret_val; 828 s32 ret_val;
1253 829
1254 DEBUGFUNC("e1000_setup_fiber_serdes_link"); 830 DEBUGFUNC("e1000_setup_fiber_serdes_link");
1255 831
1256 /* On 82571 and 82572 Fiber connections, SerDes loopback mode persists 832 /* On adapters with a MAC newer than 82544, SWDP 1 will be
1257 * until explicitly turned off or a power cycle is performed. A read to 833 * set when the optics detect a signal. On older adapters, it will be
1258 * the register does not indicate its status. Therefore, we ensure 834 * cleared when there is a signal. This applies to fiber media only.
1259 * loopback mode is disabled during initialization. 835 * If we're on serdes media, adjust the output amplitude to value
1260 */ 836 * set in the EEPROM.
1261 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) 837 */
1262 ew32(SCTL, E1000_DISABLE_SERDES_LOOPBACK); 838 ctrl = er32(CTRL);
1263 839 if (hw->media_type == e1000_media_type_fiber)
1264 /* On adapters with a MAC newer than 82544, SWDP 1 will be 840 signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
1265 * set when the optics detect a signal. On older adapters, it will be 841
1266 * cleared when there is a signal. This applies to fiber media only. 842 ret_val = e1000_adjust_serdes_amplitude(hw);
1267 * If we're on serdes media, adjust the output amplitude to value 843 if (ret_val)
1268 * set in the EEPROM. 844 return ret_val;
1269 */ 845
1270 ctrl = er32(CTRL); 846 /* Take the link out of reset */
1271 if (hw->media_type == e1000_media_type_fiber) 847 ctrl &= ~(E1000_CTRL_LRST);
1272 signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; 848
1273 849 /* Adjust VCO speed to improve BER performance */
1274 ret_val = e1000_adjust_serdes_amplitude(hw); 850 ret_val = e1000_set_vco_speed(hw);
1275 if (ret_val) 851 if (ret_val)
1276 return ret_val; 852 return ret_val;
1277 853
1278 /* Take the link out of reset */ 854 e1000_config_collision_dist(hw);
1279 ctrl &= ~(E1000_CTRL_LRST); 855
1280 856 /* Check for a software override of the flow control settings, and setup
1281 /* Adjust VCO speed to improve BER performance */ 857 * the device accordingly. If auto-negotiation is enabled, then software
1282 ret_val = e1000_set_vco_speed(hw); 858 * will have to set the "PAUSE" bits to the correct value in the Tranmsit
1283 if (ret_val) 859 * Config Word Register (TXCW) and re-start auto-negotiation. However, if
1284 return ret_val; 860 * auto-negotiation is disabled, then software will have to manually
1285 861 * configure the two flow control enable bits in the CTRL register.
1286 e1000_config_collision_dist(hw); 862 *
1287 863 * The possible values of the "fc" parameter are:
1288 /* Check for a software override of the flow control settings, and setup 864 * 0: Flow control is completely disabled
1289 * the device accordingly. If auto-negotiation is enabled, then software 865 * 1: Rx flow control is enabled (we can receive pause frames, but
1290 * will have to set the "PAUSE" bits to the correct value in the Tranmsit 866 * not send pause frames).
1291 * Config Word Register (TXCW) and re-start auto-negotiation. However, if 867 * 2: Tx flow control is enabled (we can send pause frames but we do
1292 * auto-negotiation is disabled, then software will have to manually 868 * not support receiving pause frames).
1293 * configure the two flow control enable bits in the CTRL register. 869 * 3: Both Rx and TX flow control (symmetric) are enabled.
1294 * 870 */
1295 * The possible values of the "fc" parameter are: 871 switch (hw->fc) {
1296 * 0: Flow control is completely disabled 872 case E1000_FC_NONE:
1297 * 1: Rx flow control is enabled (we can receive pause frames, but 873 /* Flow control is completely disabled by a software over-ride. */
1298 * not send pause frames). 874 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
1299 * 2: Tx flow control is enabled (we can send pause frames but we do 875 break;
1300 * not support receiving pause frames). 876 case E1000_FC_RX_PAUSE:
1301 * 3: Both Rx and TX flow control (symmetric) are enabled. 877 /* RX Flow control is enabled and TX Flow control is disabled by a
1302 */ 878 * software over-ride. Since there really isn't a way to advertise
1303 switch (hw->fc) { 879 * that we are capable of RX Pause ONLY, we will advertise that we
1304 case E1000_FC_NONE: 880 * support both symmetric and asymmetric RX PAUSE. Later, we will
1305 /* Flow control is completely disabled by a software over-ride. */ 881 * disable the adapter's ability to send PAUSE frames.
1306 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); 882 */
1307 break; 883 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
1308 case E1000_FC_RX_PAUSE: 884 break;
1309 /* RX Flow control is enabled and TX Flow control is disabled by a 885 case E1000_FC_TX_PAUSE:
1310 * software over-ride. Since there really isn't a way to advertise 886 /* TX Flow control is enabled, and RX Flow control is disabled, by a
1311 * that we are capable of RX Pause ONLY, we will advertise that we 887 * software over-ride.
1312 * support both symmetric and asymmetric RX PAUSE. Later, we will 888 */
1313 * disable the adapter's ability to send PAUSE frames. 889 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
1314 */ 890 break;
1315 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 891 case E1000_FC_FULL:
1316 break; 892 /* Flow control (both RX and TX) is enabled by a software over-ride. */
1317 case E1000_FC_TX_PAUSE: 893 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
1318 /* TX Flow control is enabled, and RX Flow control is disabled, by a 894 break;
1319 * software over-ride. 895 default:
1320 */ 896 DEBUGOUT("Flow control param set incorrectly\n");
1321 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); 897 return -E1000_ERR_CONFIG;
1322 break; 898 break;
1323 case E1000_FC_FULL: 899 }
1324 /* Flow control (both RX and TX) is enabled by a software over-ride. */ 900
1325 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 901 /* Since auto-negotiation is enabled, take the link out of reset (the link
1326 break; 902 * will be in reset, because we previously reset the chip). This will
1327 default: 903 * restart auto-negotiation. If auto-negotiation is successful then the
1328 DEBUGOUT("Flow control param set incorrectly\n"); 904 * link-up status bit will be set and the flow control enable bits (RFCE
1329 return -E1000_ERR_CONFIG; 905 * and TFCE) will be set according to their negotiated value.
1330 break; 906 */
1331 } 907 DEBUGOUT("Auto-negotiation enabled\n");
1332 908
1333 /* Since auto-negotiation is enabled, take the link out of reset (the link 909 ew32(TXCW, txcw);
1334 * will be in reset, because we previously reset the chip). This will 910 ew32(CTRL, ctrl);
1335 * restart auto-negotiation. If auto-neogtiation is successful then the 911 E1000_WRITE_FLUSH();
1336 * link-up status bit will be set and the flow control enable bits (RFCE 912
1337 * and TFCE) will be set according to their negotiated value. 913 hw->txcw = txcw;
1338 */ 914 msleep(1);
1339 DEBUGOUT("Auto-negotiation enabled\n"); 915
1340 916 /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
1341 ew32(TXCW, txcw); 917 * indication in the Device Status Register. Time-out if a link isn't
1342 ew32(CTRL, ctrl); 918 * seen in 500 milliseconds seconds (Auto-negotiation should complete in
1343 E1000_WRITE_FLUSH(); 919 * less than 500 milliseconds even if the other end is doing it in SW).
1344 920 * For internal serdes, we just assume a signal is present, then poll.
1345 hw->txcw = txcw; 921 */
1346 msleep(1); 922 if (hw->media_type == e1000_media_type_internal_serdes ||
1347 923 (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
1348 /* If we have a signal (the cable is plugged in) then poll for a "Link-Up" 924 DEBUGOUT("Looking for Link\n");
1349 * indication in the Device Status Register. Time-out if a link isn't 925 for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
1350 * seen in 500 milliseconds seconds (Auto-negotiation should complete in 926 msleep(10);
1351 * less than 500 milliseconds even if the other end is doing it in SW). 927 status = er32(STATUS);
1352 * For internal serdes, we just assume a signal is present, then poll. 928 if (status & E1000_STATUS_LU)
1353 */ 929 break;
1354 if (hw->media_type == e1000_media_type_internal_serdes || 930 }
1355 (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { 931 if (i == (LINK_UP_TIMEOUT / 10)) {
1356 DEBUGOUT("Looking for Link\n"); 932 DEBUGOUT("Never got a valid link from auto-neg!!!\n");
1357 for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { 933 hw->autoneg_failed = 1;
1358 msleep(10); 934 /* AutoNeg failed to achieve a link, so we'll call
1359 status = er32(STATUS); 935 * e1000_check_for_link. This routine will force the link up if
1360 if (status & E1000_STATUS_LU) break; 936 * we detect a signal. This will allow us to communicate with
1361 } 937 * non-autonegotiating link partners.
1362 if (i == (LINK_UP_TIMEOUT / 10)) { 938 */
1363 DEBUGOUT("Never got a valid link from auto-neg!!!\n"); 939 ret_val = e1000_check_for_link(hw);
1364 hw->autoneg_failed = 1; 940 if (ret_val) {
1365 /* AutoNeg failed to achieve a link, so we'll call 941 DEBUGOUT("Error while checking for link\n");
1366 * e1000_check_for_link. This routine will force the link up if 942 return ret_val;
1367 * we detect a signal. This will allow us to communicate with 943 }
1368 * non-autonegotiating link partners. 944 hw->autoneg_failed = 0;
1369 */ 945 } else {
1370 ret_val = e1000_check_for_link(hw); 946 hw->autoneg_failed = 0;
1371 if (ret_val) { 947 DEBUGOUT("Valid Link Found\n");
1372 DEBUGOUT("Error while checking for link\n"); 948 }
1373 return ret_val; 949 } else {
1374 } 950 DEBUGOUT("No Signal Detected\n");
1375 hw->autoneg_failed = 0; 951 }
1376 } else { 952 return E1000_SUCCESS;
1377 hw->autoneg_failed = 0;
1378 DEBUGOUT("Valid Link Found\n");
1379 }
1380 } else {
1381 DEBUGOUT("No Signal Detected\n");
1382 }
1383 return E1000_SUCCESS;
1384} 953}
1385 954
1386/****************************************************************************** 955/**
1387* Make sure we have a valid PHY and change PHY mode before link setup. 956 * e1000_copper_link_preconfig - early configuration for copper
1388* 957 * @hw: Struct containing variables accessed by shared code
1389* hw - Struct containing variables accessed by shared code 958 *
1390******************************************************************************/ 959 * Make sure we have a valid PHY and change PHY mode before link setup.
960 */
1391static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) 961static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
1392{ 962{
1393 u32 ctrl; 963 u32 ctrl;
1394 s32 ret_val; 964 s32 ret_val;
1395 u16 phy_data; 965 u16 phy_data;
1396
1397 DEBUGFUNC("e1000_copper_link_preconfig");
1398
1399 ctrl = er32(CTRL);
1400 /* With 82543, we need to force speed and duplex on the MAC equal to what
1401 * the PHY speed and duplex configuration is. In addition, we need to
1402 * perform a hardware reset on the PHY to take it out of reset.
1403 */
1404 if (hw->mac_type > e1000_82543) {
1405 ctrl |= E1000_CTRL_SLU;
1406 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1407 ew32(CTRL, ctrl);
1408 } else {
1409 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
1410 ew32(CTRL, ctrl);
1411 ret_val = e1000_phy_hw_reset(hw);
1412 if (ret_val)
1413 return ret_val;
1414 }
1415
1416 /* Make sure we have a valid PHY */
1417 ret_val = e1000_detect_gig_phy(hw);
1418 if (ret_val) {
1419 DEBUGOUT("Error, did not detect valid phy.\n");
1420 return ret_val;
1421 }
1422 DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
1423
1424 /* Set PHY to class A mode (if necessary) */
1425 ret_val = e1000_set_phy_mode(hw);
1426 if (ret_val)
1427 return ret_val;
1428
1429 if ((hw->mac_type == e1000_82545_rev_3) ||
1430 (hw->mac_type == e1000_82546_rev_3)) {
1431 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1432 phy_data |= 0x00000008;
1433 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
1434 }
1435
1436 if (hw->mac_type <= e1000_82543 ||
1437 hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
1438 hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
1439 hw->phy_reset_disable = false;
1440
1441 return E1000_SUCCESS;
1442}
1443 966
967 DEBUGFUNC("e1000_copper_link_preconfig");
1444 968
1445/******************************************************************** 969 ctrl = er32(CTRL);
1446* Copper link setup for e1000_phy_igp series. 970 /* With 82543, we need to force speed and duplex on the MAC equal to what
1447* 971 * the PHY speed and duplex configuration is. In addition, we need to
1448* hw - Struct containing variables accessed by shared code 972 * perform a hardware reset on the PHY to take it out of reset.
1449*********************************************************************/ 973 */
1450static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) 974 if (hw->mac_type > e1000_82543) {
1451{ 975 ctrl |= E1000_CTRL_SLU;
1452 u32 led_ctrl; 976 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1453 s32 ret_val; 977 ew32(CTRL, ctrl);
1454 u16 phy_data; 978 } else {
1455 979 ctrl |=
1456 DEBUGFUNC("e1000_copper_link_igp_setup"); 980 (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
1457 981 ew32(CTRL, ctrl);
1458 if (hw->phy_reset_disable) 982 ret_val = e1000_phy_hw_reset(hw);
1459 return E1000_SUCCESS; 983 if (ret_val)
1460 984 return ret_val;
1461 ret_val = e1000_phy_reset(hw); 985 }
1462 if (ret_val) { 986
1463 DEBUGOUT("Error Resetting the PHY\n"); 987 /* Make sure we have a valid PHY */
1464 return ret_val; 988 ret_val = e1000_detect_gig_phy(hw);
1465 } 989 if (ret_val) {
1466 990 DEBUGOUT("Error, did not detect valid phy.\n");
1467 /* Wait 15ms for MAC to configure PHY from eeprom settings */ 991 return ret_val;
1468 msleep(15); 992 }
1469 if (hw->mac_type != e1000_ich8lan) { 993 DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
1470 /* Configure activity LED after PHY reset */ 994
1471 led_ctrl = er32(LEDCTL); 995 /* Set PHY to class A mode (if necessary) */
1472 led_ctrl &= IGP_ACTIVITY_LED_MASK; 996 ret_val = e1000_set_phy_mode(hw);
1473 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); 997 if (ret_val)
1474 ew32(LEDCTL, led_ctrl); 998 return ret_val;
1475 } 999
1476 1000 if ((hw->mac_type == e1000_82545_rev_3) ||
1477 /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ 1001 (hw->mac_type == e1000_82546_rev_3)) {
1478 if (hw->phy_type == e1000_phy_igp) { 1002 ret_val =
1479 /* disable lplu d3 during driver init */ 1003 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1480 ret_val = e1000_set_d3_lplu_state(hw, false); 1004 phy_data |= 0x00000008;
1481 if (ret_val) { 1005 ret_val =
1482 DEBUGOUT("Error Disabling LPLU D3\n"); 1006 e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
1483 return ret_val; 1007 }
1484 } 1008
1485 } 1009 if (hw->mac_type <= e1000_82543 ||
1486 1010 hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
1487 /* disable lplu d0 during driver init */ 1011 hw->mac_type == e1000_82541_rev_2
1488 ret_val = e1000_set_d0_lplu_state(hw, false); 1012 || hw->mac_type == e1000_82547_rev_2)
1489 if (ret_val) { 1013 hw->phy_reset_disable = false;
1490 DEBUGOUT("Error Disabling LPLU D0\n"); 1014
1491 return ret_val; 1015 return E1000_SUCCESS;
1492 }
1493 /* Configure mdi-mdix settings */
1494 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
1495 if (ret_val)
1496 return ret_val;
1497
1498 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
1499 hw->dsp_config_state = e1000_dsp_config_disabled;
1500 /* Force MDI for earlier revs of the IGP PHY */
1501 phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | IGP01E1000_PSCR_FORCE_MDI_MDIX);
1502 hw->mdix = 1;
1503
1504 } else {
1505 hw->dsp_config_state = e1000_dsp_config_enabled;
1506 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
1507
1508 switch (hw->mdix) {
1509 case 1:
1510 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
1511 break;
1512 case 2:
1513 phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
1514 break;
1515 case 0:
1516 default:
1517 phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
1518 break;
1519 }
1520 }
1521 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
1522 if (ret_val)
1523 return ret_val;
1524
1525 /* set auto-master slave resolution settings */
1526 if (hw->autoneg) {
1527 e1000_ms_type phy_ms_setting = hw->master_slave;
1528
1529 if (hw->ffe_config_state == e1000_ffe_config_active)
1530 hw->ffe_config_state = e1000_ffe_config_enabled;
1531
1532 if (hw->dsp_config_state == e1000_dsp_config_activated)
1533 hw->dsp_config_state = e1000_dsp_config_enabled;
1534
1535 /* when autonegotiation advertisment is only 1000Mbps then we
1536 * should disable SmartSpeed and enable Auto MasterSlave
1537 * resolution as hardware default. */
1538 if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
1539 /* Disable SmartSpeed */
1540 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1541 &phy_data);
1542 if (ret_val)
1543 return ret_val;
1544 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1545 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1546 phy_data);
1547 if (ret_val)
1548 return ret_val;
1549 /* Set auto Master/Slave resolution process */
1550 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
1551 if (ret_val)
1552 return ret_val;
1553 phy_data &= ~CR_1000T_MS_ENABLE;
1554 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
1555 if (ret_val)
1556 return ret_val;
1557 }
1558
1559 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
1560 if (ret_val)
1561 return ret_val;
1562
1563 /* load defaults for future use */
1564 hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
1565 ((phy_data & CR_1000T_MS_VALUE) ?
1566 e1000_ms_force_master :
1567 e1000_ms_force_slave) :
1568 e1000_ms_auto;
1569
1570 switch (phy_ms_setting) {
1571 case e1000_ms_force_master:
1572 phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
1573 break;
1574 case e1000_ms_force_slave:
1575 phy_data |= CR_1000T_MS_ENABLE;
1576 phy_data &= ~(CR_1000T_MS_VALUE);
1577 break;
1578 case e1000_ms_auto:
1579 phy_data &= ~CR_1000T_MS_ENABLE;
1580 default:
1581 break;
1582 }
1583 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
1584 if (ret_val)
1585 return ret_val;
1586 }
1587
1588 return E1000_SUCCESS;
1589} 1016}
1590 1017
1591/******************************************************************** 1018/**
1592* Copper link setup for e1000_phy_gg82563 series. 1019 * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series.
1593* 1020 * @hw: Struct containing variables accessed by shared code
1594* hw - Struct containing variables accessed by shared code 1021 */
1595*********************************************************************/ 1022static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
1596static s32 e1000_copper_link_ggp_setup(struct e1000_hw *hw)
1597{ 1023{
1598 s32 ret_val; 1024 u32 led_ctrl;
1599 u16 phy_data; 1025 s32 ret_val;
1600 u32 reg_data; 1026 u16 phy_data;
1601 1027
1602 DEBUGFUNC("e1000_copper_link_ggp_setup"); 1028 DEBUGFUNC("e1000_copper_link_igp_setup");
1603 1029
1604 if (!hw->phy_reset_disable) { 1030 if (hw->phy_reset_disable)
1605 1031 return E1000_SUCCESS;
1606 /* Enable CRS on TX for half-duplex operation. */ 1032
1607 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, 1033 ret_val = e1000_phy_reset(hw);
1608 &phy_data); 1034 if (ret_val) {
1609 if (ret_val) 1035 DEBUGOUT("Error Resetting the PHY\n");
1610 return ret_val; 1036 return ret_val;
1611 1037 }
1612 phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; 1038
1613 /* Use 25MHz for both link down and 1000BASE-T for Tx clock */ 1039 /* Wait 15ms for MAC to configure PHY from eeprom settings */
1614 phy_data |= GG82563_MSCR_TX_CLK_1000MBPS_25MHZ; 1040 msleep(15);
1615 1041 /* Configure activity LED after PHY reset */
1616 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, 1042 led_ctrl = er32(LEDCTL);
1617 phy_data); 1043 led_ctrl &= IGP_ACTIVITY_LED_MASK;
1618 if (ret_val) 1044 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
1619 return ret_val; 1045 ew32(LEDCTL, led_ctrl);
1620 1046
1621 /* Options: 1047 /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
1622 * MDI/MDI-X = 0 (default) 1048 if (hw->phy_type == e1000_phy_igp) {
1623 * 0 - Auto for all speeds 1049 /* disable lplu d3 during driver init */
1624 * 1 - MDI mode 1050 ret_val = e1000_set_d3_lplu_state(hw, false);
1625 * 2 - MDI-X mode 1051 if (ret_val) {
1626 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) 1052 DEBUGOUT("Error Disabling LPLU D3\n");
1627 */ 1053 return ret_val;
1628 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data); 1054 }
1629 if (ret_val) 1055 }
1630 return ret_val; 1056
1631 1057 /* Configure mdi-mdix settings */
1632 phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; 1058 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
1633 1059 if (ret_val)
1634 switch (hw->mdix) { 1060 return ret_val;
1635 case 1: 1061
1636 phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDI; 1062 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
1637 break; 1063 hw->dsp_config_state = e1000_dsp_config_disabled;
1638 case 2: 1064 /* Force MDI for earlier revs of the IGP PHY */
1639 phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; 1065 phy_data &=
1640 break; 1066 ~(IGP01E1000_PSCR_AUTO_MDIX |
1641 case 0: 1067 IGP01E1000_PSCR_FORCE_MDI_MDIX);
1642 default: 1068 hw->mdix = 1;
1643 phy_data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; 1069
1644 break; 1070 } else {
1645 } 1071 hw->dsp_config_state = e1000_dsp_config_enabled;
1646 1072 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
1647 /* Options: 1073
1648 * disable_polarity_correction = 0 (default) 1074 switch (hw->mdix) {
1649 * Automatic Correction for Reversed Cable Polarity 1075 case 1:
1650 * 0 - Disabled 1076 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
1651 * 1 - Enabled 1077 break;
1652 */ 1078 case 2:
1653 phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; 1079 phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
1654 if (hw->disable_polarity_correction == 1) 1080 break;
1655 phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; 1081 case 0:
1656 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data); 1082 default:
1657 1083 phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
1658 if (ret_val) 1084 break;
1659 return ret_val; 1085 }
1660 1086 }
1661 /* SW Reset the PHY so all changes take effect */ 1087 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
1662 ret_val = e1000_phy_reset(hw); 1088 if (ret_val)
1663 if (ret_val) { 1089 return ret_val;
1664 DEBUGOUT("Error Resetting the PHY\n"); 1090
1665 return ret_val; 1091 /* set auto-master slave resolution settings */
1666 } 1092 if (hw->autoneg) {
1667 } /* phy_reset_disable */ 1093 e1000_ms_type phy_ms_setting = hw->master_slave;
1668 1094
1669 if (hw->mac_type == e1000_80003es2lan) { 1095 if (hw->ffe_config_state == e1000_ffe_config_active)
1670 /* Bypass RX and TX FIFO's */ 1096 hw->ffe_config_state = e1000_ffe_config_enabled;
1671 ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL, 1097
1672 E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS | 1098 if (hw->dsp_config_state == e1000_dsp_config_activated)
1673 E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); 1099 hw->dsp_config_state = e1000_dsp_config_enabled;
1674 if (ret_val) 1100
1675 return ret_val; 1101 /* when autonegotiation advertisement is only 1000Mbps then we
1676 1102 * should disable SmartSpeed and enable Auto MasterSlave
1677 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, &phy_data); 1103 * resolution as hardware default. */
1678 if (ret_val) 1104 if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
1679 return ret_val; 1105 /* Disable SmartSpeed */
1680 1106 ret_val =
1681 phy_data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; 1107 e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1682 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, phy_data); 1108 &phy_data);
1683 1109 if (ret_val)
1684 if (ret_val) 1110 return ret_val;
1685 return ret_val; 1111 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1686 1112 ret_val =
1687 reg_data = er32(CTRL_EXT); 1113 e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1688 reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); 1114 phy_data);
1689 ew32(CTRL_EXT, reg_data); 1115 if (ret_val)
1690 1116 return ret_val;
1691 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, 1117 /* Set auto Master/Slave resolution process */
1692 &phy_data); 1118 ret_val =
1693 if (ret_val) 1119 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
1694 return ret_val; 1120 if (ret_val)
1695 1121 return ret_val;
1696 /* Do not init these registers when the HW is in IAMT mode, since the 1122 phy_data &= ~CR_1000T_MS_ENABLE;
1697 * firmware will have already initialized them. We only initialize 1123 ret_val =
1698 * them if the HW is not in IAMT mode. 1124 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
1699 */ 1125 if (ret_val)
1700 if (!e1000_check_mng_mode(hw)) { 1126 return ret_val;
1701 /* Enable Electrical Idle on the PHY */ 1127 }
1702 phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; 1128
1703 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, 1129 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
1704 phy_data); 1130 if (ret_val)
1705 if (ret_val) 1131 return ret_val;
1706 return ret_val; 1132
1707 1133 /* load defaults for future use */
1708 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, 1134 hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
1709 &phy_data); 1135 ((phy_data & CR_1000T_MS_VALUE) ?
1710 if (ret_val) 1136 e1000_ms_force_master :
1711 return ret_val; 1137 e1000_ms_force_slave) : e1000_ms_auto;
1712 1138
1713 phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; 1139 switch (phy_ms_setting) {
1714 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, 1140 case e1000_ms_force_master:
1715 phy_data); 1141 phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
1716 1142 break;
1717 if (ret_val) 1143 case e1000_ms_force_slave:
1718 return ret_val; 1144 phy_data |= CR_1000T_MS_ENABLE;
1719 } 1145 phy_data &= ~(CR_1000T_MS_VALUE);
1720 1146 break;
1721 /* Workaround: Disable padding in Kumeran interface in the MAC 1147 case e1000_ms_auto:
1722 * and in the PHY to avoid CRC errors. 1148 phy_data &= ~CR_1000T_MS_ENABLE;
1723 */ 1149 default:
1724 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_INBAND_CTRL, 1150 break;
1725 &phy_data); 1151 }
1726 if (ret_val) 1152 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
1727 return ret_val; 1153 if (ret_val)
1728 phy_data |= GG82563_ICR_DIS_PADDING; 1154 return ret_val;
1729 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_INBAND_CTRL, 1155 }
1730 phy_data); 1156
1731 if (ret_val) 1157 return E1000_SUCCESS;
1732 return ret_val;
1733 }
1734
1735 return E1000_SUCCESS;
1736} 1158}
1737 1159
1738/******************************************************************** 1160/**
1739* Copper link setup for e1000_phy_m88 series. 1161 * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series.
1740* 1162 * @hw: Struct containing variables accessed by shared code
1741* hw - Struct containing variables accessed by shared code 1163 */
1742*********************************************************************/
1743static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw) 1164static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
1744{ 1165{
1745 s32 ret_val; 1166 s32 ret_val;
1746 u16 phy_data; 1167 u16 phy_data;
1747 1168
1748 DEBUGFUNC("e1000_copper_link_mgp_setup"); 1169 DEBUGFUNC("e1000_copper_link_mgp_setup");
1749 1170
1750 if (hw->phy_reset_disable) 1171 if (hw->phy_reset_disable)
1751 return E1000_SUCCESS; 1172 return E1000_SUCCESS;
1752 1173
1753 /* Enable CRS on TX. This must be set for half-duplex operation. */ 1174 /* Enable CRS on TX. This must be set for half-duplex operation. */
1754 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1175 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1755 if (ret_val) 1176 if (ret_val)
1756 return ret_val; 1177 return ret_val;
1757 1178
1758 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 1179 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
1759 1180
1760 /* Options: 1181 /* Options:
1761 * MDI/MDI-X = 0 (default) 1182 * MDI/MDI-X = 0 (default)
1762 * 0 - Auto for all speeds 1183 * 0 - Auto for all speeds
1763 * 1 - MDI mode 1184 * 1 - MDI mode
1764 * 2 - MDI-X mode 1185 * 2 - MDI-X mode
1765 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) 1186 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
1766 */ 1187 */
1767 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; 1188 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
1768 1189
1769 switch (hw->mdix) { 1190 switch (hw->mdix) {
1770 case 1: 1191 case 1:
1771 phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; 1192 phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
1772 break; 1193 break;
1773 case 2: 1194 case 2:
1774 phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; 1195 phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
1775 break; 1196 break;
1776 case 3: 1197 case 3:
1777 phy_data |= M88E1000_PSCR_AUTO_X_1000T; 1198 phy_data |= M88E1000_PSCR_AUTO_X_1000T;
1778 break; 1199 break;
1779 case 0: 1200 case 0:
1780 default: 1201 default:
1781 phy_data |= M88E1000_PSCR_AUTO_X_MODE; 1202 phy_data |= M88E1000_PSCR_AUTO_X_MODE;
1782 break; 1203 break;
1783 } 1204 }
1784 1205
1785 /* Options: 1206 /* Options:
1786 * disable_polarity_correction = 0 (default) 1207 * disable_polarity_correction = 0 (default)
1787 * Automatic Correction for Reversed Cable Polarity 1208 * Automatic Correction for Reversed Cable Polarity
1788 * 0 - Disabled 1209 * 0 - Disabled
1789 * 1 - Enabled 1210 * 1 - Enabled
1790 */ 1211 */
1791 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; 1212 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
1792 if (hw->disable_polarity_correction == 1) 1213 if (hw->disable_polarity_correction == 1)
1793 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 1214 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
1794 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 1215 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
1795 if (ret_val) 1216 if (ret_val)
1796 return ret_val; 1217 return ret_val;
1797 1218
1798 if (hw->phy_revision < M88E1011_I_REV_4) { 1219 if (hw->phy_revision < M88E1011_I_REV_4) {
1799 /* Force TX_CLK in the Extended PHY Specific Control Register 1220 /* Force TX_CLK in the Extended PHY Specific Control Register
1800 * to 25MHz clock. 1221 * to 25MHz clock.
1801 */ 1222 */
1802 ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); 1223 ret_val =
1803 if (ret_val) 1224 e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
1804 return ret_val; 1225 &phy_data);
1805 1226 if (ret_val)
1806 phy_data |= M88E1000_EPSCR_TX_CLK_25; 1227 return ret_val;
1807 1228
1808 if ((hw->phy_revision == E1000_REVISION_2) && 1229 phy_data |= M88E1000_EPSCR_TX_CLK_25;
1809 (hw->phy_id == M88E1111_I_PHY_ID)) { 1230
1810 /* Vidalia Phy, set the downshift counter to 5x */ 1231 if ((hw->phy_revision == E1000_REVISION_2) &&
1811 phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); 1232 (hw->phy_id == M88E1111_I_PHY_ID)) {
1812 phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; 1233 /* Vidalia Phy, set the downshift counter to 5x */
1813 ret_val = e1000_write_phy_reg(hw, 1234 phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK);
1814 M88E1000_EXT_PHY_SPEC_CTRL, phy_data); 1235 phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
1815 if (ret_val) 1236 ret_val = e1000_write_phy_reg(hw,
1816 return ret_val; 1237 M88E1000_EXT_PHY_SPEC_CTRL,
1817 } else { 1238 phy_data);
1818 /* Configure Master and Slave downshift values */ 1239 if (ret_val)
1819 phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | 1240 return ret_val;
1820 M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); 1241 } else {
1821 phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | 1242 /* Configure Master and Slave downshift values */
1822 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); 1243 phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
1823 ret_val = e1000_write_phy_reg(hw, 1244 M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
1824 M88E1000_EXT_PHY_SPEC_CTRL, phy_data); 1245 phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
1825 if (ret_val) 1246 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
1826 return ret_val; 1247 ret_val = e1000_write_phy_reg(hw,
1827 } 1248 M88E1000_EXT_PHY_SPEC_CTRL,
1828 } 1249 phy_data);
1829 1250 if (ret_val)
1830 /* SW Reset the PHY so all changes take effect */ 1251 return ret_val;
1831 ret_val = e1000_phy_reset(hw); 1252 }
1832 if (ret_val) { 1253 }
1833 DEBUGOUT("Error Resetting the PHY\n"); 1254
1834 return ret_val; 1255 /* SW Reset the PHY so all changes take effect */
1835 } 1256 ret_val = e1000_phy_reset(hw);
1836 1257 if (ret_val) {
1837 return E1000_SUCCESS; 1258 DEBUGOUT("Error Resetting the PHY\n");
1259 return ret_val;
1260 }
1261
1262 return E1000_SUCCESS;
1838} 1263}
1839 1264
1840/******************************************************************** 1265/**
1841* Setup auto-negotiation and flow control advertisements, 1266 * e1000_copper_link_autoneg - setup auto-neg
1842* and then perform auto-negotiation. 1267 * @hw: Struct containing variables accessed by shared code
1843* 1268 *
1844* hw - Struct containing variables accessed by shared code 1269 * Setup auto-negotiation and flow control advertisements,
1845*********************************************************************/ 1270 * and then perform auto-negotiation.
1271 */
1846static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) 1272static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1847{ 1273{
1848 s32 ret_val; 1274 s32 ret_val;
1849 u16 phy_data; 1275 u16 phy_data;
1850 1276
1851 DEBUGFUNC("e1000_copper_link_autoneg"); 1277 DEBUGFUNC("e1000_copper_link_autoneg");
1852 1278
1853 /* Perform some bounds checking on the hw->autoneg_advertised 1279 /* Perform some bounds checking on the hw->autoneg_advertised
1854 * parameter. If this variable is zero, then set it to the default. 1280 * parameter. If this variable is zero, then set it to the default.
1855 */ 1281 */
1856 hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; 1282 hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT;
1857 1283
1858 /* If autoneg_advertised is zero, we assume it was not defaulted 1284 /* If autoneg_advertised is zero, we assume it was not defaulted
1859 * by the calling code so we set to advertise full capability. 1285 * by the calling code so we set to advertise full capability.
1860 */ 1286 */
1861 if (hw->autoneg_advertised == 0) 1287 if (hw->autoneg_advertised == 0)
1862 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 1288 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
1863 1289
1864 /* IFE phy only supports 10/100 */ 1290 DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
1865 if (hw->phy_type == e1000_phy_ife) 1291 ret_val = e1000_phy_setup_autoneg(hw);
1866 hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; 1292 if (ret_val) {
1867 1293 DEBUGOUT("Error Setting up Auto-Negotiation\n");
1868 DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); 1294 return ret_val;
1869 ret_val = e1000_phy_setup_autoneg(hw); 1295 }
1870 if (ret_val) { 1296 DEBUGOUT("Restarting Auto-Neg\n");
1871 DEBUGOUT("Error Setting up Auto-Negotiation\n"); 1297
1872 return ret_val; 1298 /* Restart auto-negotiation by setting the Auto Neg Enable bit and
1873 } 1299 * the Auto Neg Restart bit in the PHY control register.
1874 DEBUGOUT("Restarting Auto-Neg\n"); 1300 */
1875 1301 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
1876 /* Restart auto-negotiation by setting the Auto Neg Enable bit and 1302 if (ret_val)
1877 * the Auto Neg Restart bit in the PHY control register. 1303 return ret_val;
1878 */ 1304
1879 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); 1305 phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
1880 if (ret_val) 1306 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
1881 return ret_val; 1307 if (ret_val)
1882 1308 return ret_val;
1883 phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 1309
1884 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); 1310 /* Does the user want to wait for Auto-Neg to complete here, or
1885 if (ret_val) 1311 * check at a later time (for example, callback routine).
1886 return ret_val; 1312 */
1887 1313 if (hw->wait_autoneg_complete) {
1888 /* Does the user want to wait for Auto-Neg to complete here, or 1314 ret_val = e1000_wait_autoneg(hw);
1889 * check at a later time (for example, callback routine). 1315 if (ret_val) {
1890 */ 1316 DEBUGOUT
1891 if (hw->wait_autoneg_complete) { 1317 ("Error while waiting for autoneg to complete\n");
1892 ret_val = e1000_wait_autoneg(hw); 1318 return ret_val;
1893 if (ret_val) { 1319 }
1894 DEBUGOUT("Error while waiting for autoneg to complete\n"); 1320 }
1895 return ret_val; 1321
1896 } 1322 hw->get_link_status = true;
1897 } 1323
1898 1324 return E1000_SUCCESS;
1899 hw->get_link_status = true;
1900
1901 return E1000_SUCCESS;
1902} 1325}
1903 1326
1904/****************************************************************************** 1327/**
1905* Config the MAC and the PHY after link is up. 1328 * e1000_copper_link_postconfig - post link setup
1906* 1) Set up the MAC to the current PHY speed/duplex 1329 * @hw: Struct containing variables accessed by shared code
1907* if we are on 82543. If we 1330 *
1908* are on newer silicon, we only need to configure 1331 * Config the MAC and the PHY after link is up.
1909* collision distance in the Transmit Control Register. 1332 * 1) Set up the MAC to the current PHY speed/duplex
1910* 2) Set up flow control on the MAC to that established with 1333 * if we are on 82543. If we
1911* the link partner. 1334 * are on newer silicon, we only need to configure
1912* 3) Config DSP to improve Gigabit link quality for some PHY revisions. 1335 * collision distance in the Transmit Control Register.
1913* 1336 * 2) Set up flow control on the MAC to that established with
1914* hw - Struct containing variables accessed by shared code 1337 * the link partner.
1915******************************************************************************/ 1338 * 3) Config DSP to improve Gigabit link quality for some PHY revisions.
1339 */
1916static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) 1340static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
1917{ 1341{
1918 s32 ret_val; 1342 s32 ret_val;
1919 DEBUGFUNC("e1000_copper_link_postconfig"); 1343 DEBUGFUNC("e1000_copper_link_postconfig");
1920 1344
1921 if (hw->mac_type >= e1000_82544) { 1345 if (hw->mac_type >= e1000_82544) {
1922 e1000_config_collision_dist(hw); 1346 e1000_config_collision_dist(hw);
1923 } else { 1347 } else {
1924 ret_val = e1000_config_mac_to_phy(hw); 1348 ret_val = e1000_config_mac_to_phy(hw);
1925 if (ret_val) { 1349 if (ret_val) {
1926 DEBUGOUT("Error configuring MAC to PHY settings\n"); 1350 DEBUGOUT("Error configuring MAC to PHY settings\n");
1927 return ret_val; 1351 return ret_val;
1928 } 1352 }
1929 } 1353 }
1930 ret_val = e1000_config_fc_after_link_up(hw); 1354 ret_val = e1000_config_fc_after_link_up(hw);
1931 if (ret_val) { 1355 if (ret_val) {
1932 DEBUGOUT("Error Configuring Flow Control\n"); 1356 DEBUGOUT("Error Configuring Flow Control\n");
1933 return ret_val; 1357 return ret_val;
1934 } 1358 }
1935 1359
1936 /* Config DSP to improve Giga link quality */ 1360 /* Config DSP to improve Giga link quality */
1937 if (hw->phy_type == e1000_phy_igp) { 1361 if (hw->phy_type == e1000_phy_igp) {
1938 ret_val = e1000_config_dsp_after_link_change(hw, true); 1362 ret_val = e1000_config_dsp_after_link_change(hw, true);
1939 if (ret_val) { 1363 if (ret_val) {
1940 DEBUGOUT("Error Configuring DSP after link up\n"); 1364 DEBUGOUT("Error Configuring DSP after link up\n");
1941 return ret_val; 1365 return ret_val;
1942 } 1366 }
1943 } 1367 }
1944 1368
1945 return E1000_SUCCESS; 1369 return E1000_SUCCESS;
1946} 1370}
1947 1371
1948/****************************************************************************** 1372/**
1949* Detects which PHY is present and setup the speed and duplex 1373 * e1000_setup_copper_link - phy/speed/duplex setting
1950* 1374 * @hw: Struct containing variables accessed by shared code
1951* hw - Struct containing variables accessed by shared code 1375 *
1952******************************************************************************/ 1376 * Detects which PHY is present and sets up the speed and duplex
1377 */
1953static s32 e1000_setup_copper_link(struct e1000_hw *hw) 1378static s32 e1000_setup_copper_link(struct e1000_hw *hw)
1954{ 1379{
1955 s32 ret_val; 1380 s32 ret_val;
1956 u16 i; 1381 u16 i;
1957 u16 phy_data; 1382 u16 phy_data;
1958 u16 reg_data = 0; 1383
1959 1384 DEBUGFUNC("e1000_setup_copper_link");
1960 DEBUGFUNC("e1000_setup_copper_link"); 1385
1961 1386 /* Check if it is a valid PHY and set PHY mode if necessary. */
1962 switch (hw->mac_type) { 1387 ret_val = e1000_copper_link_preconfig(hw);
1963 case e1000_80003es2lan: 1388 if (ret_val)
1964 case e1000_ich8lan: 1389 return ret_val;
1965 /* Set the mac to wait the maximum time between each 1390
1966 * iteration and increase the max iterations when 1391 if (hw->phy_type == e1000_phy_igp) {
1967 * polling the phy; this fixes erroneous timeouts at 10Mbps. */ 1392 ret_val = e1000_copper_link_igp_setup(hw);
1968 ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); 1393 if (ret_val)
1969 if (ret_val) 1394 return ret_val;
1970 return ret_val; 1395 } else if (hw->phy_type == e1000_phy_m88) {
1971 ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data); 1396 ret_val = e1000_copper_link_mgp_setup(hw);
1972 if (ret_val) 1397 if (ret_val)
1973 return ret_val; 1398 return ret_val;
1974 reg_data |= 0x3F; 1399 }
1975 ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data); 1400
1976 if (ret_val) 1401 if (hw->autoneg) {
1977 return ret_val; 1402 /* Setup autoneg and flow control advertisement
1978 default: 1403 * and perform autonegotiation */
1979 break; 1404 ret_val = e1000_copper_link_autoneg(hw);
1980 } 1405 if (ret_val)
1981 1406 return ret_val;
1982 /* Check if it is a valid PHY and set PHY mode if necessary. */ 1407 } else {
1983 ret_val = e1000_copper_link_preconfig(hw); 1408 /* PHY will be set to 10H, 10F, 100H,or 100F
1984 if (ret_val) 1409 * depending on value from forced_speed_duplex. */
1985 return ret_val; 1410 DEBUGOUT("Forcing speed and duplex\n");
1986 1411 ret_val = e1000_phy_force_speed_duplex(hw);
1987 switch (hw->mac_type) { 1412 if (ret_val) {
1988 case e1000_80003es2lan: 1413 DEBUGOUT("Error Forcing Speed and Duplex\n");
1989 /* Kumeran registers are written-only */ 1414 return ret_val;
1990 reg_data = E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT; 1415 }
1991 reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING; 1416 }
1992 ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL, 1417
1993 reg_data); 1418 /* Check link status. Wait up to 100 microseconds for link to become
1994 if (ret_val) 1419 * valid.
1995 return ret_val; 1420 */
1996 break; 1421 for (i = 0; i < 10; i++) {
1997 default: 1422 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
1998 break; 1423 if (ret_val)
1999 } 1424 return ret_val;
2000 1425 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
2001 if (hw->phy_type == e1000_phy_igp || 1426 if (ret_val)
2002 hw->phy_type == e1000_phy_igp_3 || 1427 return ret_val;
2003 hw->phy_type == e1000_phy_igp_2) { 1428
2004 ret_val = e1000_copper_link_igp_setup(hw); 1429 if (phy_data & MII_SR_LINK_STATUS) {
2005 if (ret_val) 1430 /* Config the MAC and PHY after link is up */
2006 return ret_val; 1431 ret_val = e1000_copper_link_postconfig(hw);
2007 } else if (hw->phy_type == e1000_phy_m88) { 1432 if (ret_val)
2008 ret_val = e1000_copper_link_mgp_setup(hw); 1433 return ret_val;
2009 if (ret_val) 1434
2010 return ret_val; 1435 DEBUGOUT("Valid link established!!!\n");
2011 } else if (hw->phy_type == e1000_phy_gg82563) { 1436 return E1000_SUCCESS;
2012 ret_val = e1000_copper_link_ggp_setup(hw); 1437 }
2013 if (ret_val) 1438 udelay(10);
2014 return ret_val; 1439 }
2015 } 1440
2016 1441 DEBUGOUT("Unable to establish link!!!\n");
2017 if (hw->autoneg) { 1442 return E1000_SUCCESS;
2018 /* Setup autoneg and flow control advertisement
2019 * and perform autonegotiation */
2020 ret_val = e1000_copper_link_autoneg(hw);
2021 if (ret_val)
2022 return ret_val;
2023 } else {
2024 /* PHY will be set to 10H, 10F, 100H,or 100F
2025 * depending on value from forced_speed_duplex. */
2026 DEBUGOUT("Forcing speed and duplex\n");
2027 ret_val = e1000_phy_force_speed_duplex(hw);
2028 if (ret_val) {
2029 DEBUGOUT("Error Forcing Speed and Duplex\n");
2030 return ret_val;
2031 }
2032 }
2033
2034 /* Check link status. Wait up to 100 microseconds for link to become
2035 * valid.
2036 */
2037 for (i = 0; i < 10; i++) {
2038 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
2039 if (ret_val)
2040 return ret_val;
2041 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
2042 if (ret_val)
2043 return ret_val;
2044
2045 if (phy_data & MII_SR_LINK_STATUS) {
2046 /* Config the MAC and PHY after link is up */
2047 ret_val = e1000_copper_link_postconfig(hw);
2048 if (ret_val)
2049 return ret_val;
2050
2051 DEBUGOUT("Valid link established!!!\n");
2052 return E1000_SUCCESS;
2053 }
2054 udelay(10);
2055 }
2056
2057 DEBUGOUT("Unable to establish link!!!\n");
2058 return E1000_SUCCESS;
2059} 1443}
2060 1444
2061/****************************************************************************** 1445/**
2062* Configure the MAC-to-PHY interface for 10/100Mbps 1446 * e1000_phy_setup_autoneg - phy settings
2063* 1447 * @hw: Struct containing variables accessed by shared code
2064* hw - Struct containing variables accessed by shared code 1448 *
2065******************************************************************************/ 1449 * Configures PHY autoneg and flow control advertisement settings
2066static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex) 1450 */
1451s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
2067{ 1452{
2068 s32 ret_val = E1000_SUCCESS; 1453 s32 ret_val;
2069 u32 tipg; 1454 u16 mii_autoneg_adv_reg;
2070 u16 reg_data; 1455 u16 mii_1000t_ctrl_reg;
2071 1456
2072 DEBUGFUNC("e1000_configure_kmrn_for_10_100"); 1457 DEBUGFUNC("e1000_phy_setup_autoneg");
2073 1458
2074 reg_data = E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT; 1459 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
2075 ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL, 1460 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
2076 reg_data); 1461 if (ret_val)
2077 if (ret_val) 1462 return ret_val;
2078 return ret_val;
2079 1463
2080 /* Configure Transmit Inter-Packet Gap */ 1464 /* Read the MII 1000Base-T Control Register (Address 9). */
2081 tipg = er32(TIPG); 1465 ret_val =
2082 tipg &= ~E1000_TIPG_IPGT_MASK; 1466 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
2083 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100; 1467 if (ret_val)
2084 ew32(TIPG, tipg); 1468 return ret_val;
2085 1469
2086 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data); 1470 /* Need to parse both autoneg_advertised and fc and set up
1471 * the appropriate PHY registers. First we will parse for
1472 * autoneg_advertised software override. Since we can advertise
1473 * a plethora of combinations, we need to check each bit
1474 * individually.
1475 */
2087 1476
2088 if (ret_val) 1477 /* First we clear all the 10/100 mb speed bits in the Auto-Neg
2089 return ret_val; 1478 * Advertisement Register (Address 4) and the 1000 mb speed bits in
1479 * the 1000Base-T Control Register (Address 9).
1480 */
1481 mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
1482 mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
2090 1483
2091 if (duplex == HALF_DUPLEX) 1484 DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
2092 reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
2093 else
2094 reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
2095 1485
2096 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); 1486 /* Do we want to advertise 10 Mb Half Duplex? */
1487 if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
1488 DEBUGOUT("Advertise 10mb Half duplex\n");
1489 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
1490 }
2097 1491
2098 return ret_val; 1492 /* Do we want to advertise 10 Mb Full Duplex? */
2099} 1493 if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
1494 DEBUGOUT("Advertise 10mb Full duplex\n");
1495 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
1496 }
2100 1497
2101static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw) 1498 /* Do we want to advertise 100 Mb Half Duplex? */
2102{ 1499 if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
2103 s32 ret_val = E1000_SUCCESS; 1500 DEBUGOUT("Advertise 100mb Half duplex\n");
2104 u16 reg_data; 1501 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
2105 u32 tipg; 1502 }
2106 1503
2107 DEBUGFUNC("e1000_configure_kmrn_for_1000"); 1504 /* Do we want to advertise 100 Mb Full Duplex? */
1505 if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
1506 DEBUGOUT("Advertise 100mb Full duplex\n");
1507 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
1508 }
2108 1509
2109 reg_data = E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT; 1510 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
2110 ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL, 1511 if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
2111 reg_data); 1512 DEBUGOUT
2112 if (ret_val) 1513 ("Advertise 1000mb Half duplex requested, request denied!\n");
2113 return ret_val; 1514 }
2114 1515
2115 /* Configure Transmit Inter-Packet Gap */ 1516 /* Do we want to advertise 1000 Mb Full Duplex? */
2116 tipg = er32(TIPG); 1517 if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
2117 tipg &= ~E1000_TIPG_IPGT_MASK; 1518 DEBUGOUT("Advertise 1000mb Full duplex\n");
2118 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; 1519 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
2119 ew32(TIPG, tipg); 1520 }
2120 1521
2121 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data); 1522 /* Check for a software override of the flow control settings, and
1523 * setup the PHY advertisement registers accordingly. If
1524 * auto-negotiation is enabled, then software will have to set the
1525 * "PAUSE" bits to the correct value in the Auto-Negotiation
1526 * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
1527 *
1528 * The possible values of the "fc" parameter are:
1529 * 0: Flow control is completely disabled
1530 * 1: Rx flow control is enabled (we can receive pause frames
1531 * but not send pause frames).
1532 * 2: Tx flow control is enabled (we can send pause frames
1533 * but we do not support receiving pause frames).
1534 * 3: Both Rx and TX flow control (symmetric) are enabled.
1535 * other: No software override. The flow control configuration
1536 * in the EEPROM is used.
1537 */
1538 switch (hw->fc) {
1539 case E1000_FC_NONE: /* 0 */
1540 /* Flow control (RX & TX) is completely disabled by a
1541 * software over-ride.
1542 */
1543 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1544 break;
1545 case E1000_FC_RX_PAUSE: /* 1 */
1546 /* RX Flow control is enabled, and TX Flow control is
1547 * disabled, by a software over-ride.
1548 */
1549 /* Since there really isn't a way to advertise that we are
1550 * capable of RX Pause ONLY, we will advertise that we
1551 * support both symmetric and asymmetric RX PAUSE. Later
1552 * (in e1000_config_fc_after_link_up) we will disable the
1553 *hw's ability to send PAUSE frames.
1554 */
1555 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1556 break;
1557 case E1000_FC_TX_PAUSE: /* 2 */
1558 /* TX Flow control is enabled, and RX Flow control is
1559 * disabled, by a software over-ride.
1560 */
1561 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
1562 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
1563 break;
1564 case E1000_FC_FULL: /* 3 */
1565 /* Flow control (both RX and TX) is enabled by a software
1566 * over-ride.
1567 */
1568 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1569 break;
1570 default:
1571 DEBUGOUT("Flow control param set incorrectly\n");
1572 return -E1000_ERR_CONFIG;
1573 }
2122 1574
2123 if (ret_val) 1575 ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
2124 return ret_val; 1576 if (ret_val)
1577 return ret_val;
2125 1578
2126 reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; 1579 DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
2127 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
2128 1580
2129 return ret_val; 1581 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
2130} 1582 if (ret_val)
1583 return ret_val;
2131 1584
2132/****************************************************************************** 1585 return E1000_SUCCESS;
2133* Configures PHY autoneg and flow control advertisement settings
2134*
2135* hw - Struct containing variables accessed by shared code
2136******************************************************************************/
2137s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
2138{
2139 s32 ret_val;
2140 u16 mii_autoneg_adv_reg;
2141 u16 mii_1000t_ctrl_reg;
2142
2143 DEBUGFUNC("e1000_phy_setup_autoneg");
2144
2145 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
2146 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
2147 if (ret_val)
2148 return ret_val;
2149
2150 if (hw->phy_type != e1000_phy_ife) {
2151 /* Read the MII 1000Base-T Control Register (Address 9). */
2152 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
2153 if (ret_val)
2154 return ret_val;
2155 } else
2156 mii_1000t_ctrl_reg=0;
2157
2158 /* Need to parse both autoneg_advertised and fc and set up
2159 * the appropriate PHY registers. First we will parse for
2160 * autoneg_advertised software override. Since we can advertise
2161 * a plethora of combinations, we need to check each bit
2162 * individually.
2163 */
2164
2165 /* First we clear all the 10/100 mb speed bits in the Auto-Neg
2166 * Advertisement Register (Address 4) and the 1000 mb speed bits in
2167 * the 1000Base-T Control Register (Address 9).
2168 */
2169 mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
2170 mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
2171
2172 DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
2173
2174 /* Do we want to advertise 10 Mb Half Duplex? */
2175 if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
2176 DEBUGOUT("Advertise 10mb Half duplex\n");
2177 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
2178 }
2179
2180 /* Do we want to advertise 10 Mb Full Duplex? */
2181 if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
2182 DEBUGOUT("Advertise 10mb Full duplex\n");
2183 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
2184 }
2185
2186 /* Do we want to advertise 100 Mb Half Duplex? */
2187 if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
2188 DEBUGOUT("Advertise 100mb Half duplex\n");
2189 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
2190 }
2191
2192 /* Do we want to advertise 100 Mb Full Duplex? */
2193 if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
2194 DEBUGOUT("Advertise 100mb Full duplex\n");
2195 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
2196 }
2197
2198 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
2199 if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
2200 DEBUGOUT("Advertise 1000mb Half duplex requested, request denied!\n");
2201 }
2202
2203 /* Do we want to advertise 1000 Mb Full Duplex? */
2204 if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
2205 DEBUGOUT("Advertise 1000mb Full duplex\n");
2206 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
2207 if (hw->phy_type == e1000_phy_ife) {
2208 DEBUGOUT("e1000_phy_ife is a 10/100 PHY. Gigabit speed is not supported.\n");
2209 }
2210 }
2211
2212 /* Check for a software override of the flow control settings, and
2213 * setup the PHY advertisement registers accordingly. If
2214 * auto-negotiation is enabled, then software will have to set the
2215 * "PAUSE" bits to the correct value in the Auto-Negotiation
2216 * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
2217 *
2218 * The possible values of the "fc" parameter are:
2219 * 0: Flow control is completely disabled
2220 * 1: Rx flow control is enabled (we can receive pause frames
2221 * but not send pause frames).
2222 * 2: Tx flow control is enabled (we can send pause frames
2223 * but we do not support receiving pause frames).
2224 * 3: Both Rx and TX flow control (symmetric) are enabled.
2225 * other: No software override. The flow control configuration
2226 * in the EEPROM is used.
2227 */
2228 switch (hw->fc) {
2229 case E1000_FC_NONE: /* 0 */
2230 /* Flow control (RX & TX) is completely disabled by a
2231 * software over-ride.
2232 */
2233 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
2234 break;
2235 case E1000_FC_RX_PAUSE: /* 1 */
2236 /* RX Flow control is enabled, and TX Flow control is
2237 * disabled, by a software over-ride.
2238 */
2239 /* Since there really isn't a way to advertise that we are
2240 * capable of RX Pause ONLY, we will advertise that we
2241 * support both symmetric and asymmetric RX PAUSE. Later
2242 * (in e1000_config_fc_after_link_up) we will disable the
2243 *hw's ability to send PAUSE frames.
2244 */
2245 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
2246 break;
2247 case E1000_FC_TX_PAUSE: /* 2 */
2248 /* TX Flow control is enabled, and RX Flow control is
2249 * disabled, by a software over-ride.
2250 */
2251 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
2252 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
2253 break;
2254 case E1000_FC_FULL: /* 3 */
2255 /* Flow control (both RX and TX) is enabled by a software
2256 * over-ride.
2257 */
2258 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
2259 break;
2260 default:
2261 DEBUGOUT("Flow control param set incorrectly\n");
2262 return -E1000_ERR_CONFIG;
2263 }
2264
2265 ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
2266 if (ret_val)
2267 return ret_val;
2268
2269 DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
2270
2271 if (hw->phy_type != e1000_phy_ife) {
2272 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
2273 if (ret_val)
2274 return ret_val;
2275 }
2276
2277 return E1000_SUCCESS;
2278} 1586}
2279 1587
2280/****************************************************************************** 1588/**
2281* Force PHY speed and duplex settings to hw->forced_speed_duplex 1589 * e1000_phy_force_speed_duplex - force link settings
2282* 1590 * @hw: Struct containing variables accessed by shared code
2283* hw - Struct containing variables accessed by shared code 1591 *
2284******************************************************************************/ 1592 * Force PHY speed and duplex settings to hw->forced_speed_duplex
1593 */
2285static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) 1594static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
2286{ 1595{
2287 u32 ctrl; 1596 u32 ctrl;
2288 s32 ret_val; 1597 s32 ret_val;
2289 u16 mii_ctrl_reg; 1598 u16 mii_ctrl_reg;
2290 u16 mii_status_reg; 1599 u16 mii_status_reg;
2291 u16 phy_data; 1600 u16 phy_data;
2292 u16 i; 1601 u16 i;
2293 1602
2294 DEBUGFUNC("e1000_phy_force_speed_duplex"); 1603 DEBUGFUNC("e1000_phy_force_speed_duplex");
2295 1604
2296 /* Turn off Flow control if we are forcing speed and duplex. */ 1605 /* Turn off Flow control if we are forcing speed and duplex. */
2297 hw->fc = E1000_FC_NONE; 1606 hw->fc = E1000_FC_NONE;
2298 1607
2299 DEBUGOUT1("hw->fc = %d\n", hw->fc); 1608 DEBUGOUT1("hw->fc = %d\n", hw->fc);
2300 1609
2301 /* Read the Device Control Register. */ 1610 /* Read the Device Control Register. */
2302 ctrl = er32(CTRL); 1611 ctrl = er32(CTRL);
2303 1612
2304 /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ 1613 /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */
2305 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1614 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
2306 ctrl &= ~(DEVICE_SPEED_MASK); 1615 ctrl &= ~(DEVICE_SPEED_MASK);
2307 1616
2308 /* Clear the Auto Speed Detect Enable bit. */ 1617 /* Clear the Auto Speed Detect Enable bit. */
2309 ctrl &= ~E1000_CTRL_ASDE; 1618 ctrl &= ~E1000_CTRL_ASDE;
2310 1619
2311 /* Read the MII Control Register. */ 1620 /* Read the MII Control Register. */
2312 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); 1621 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
2313 if (ret_val) 1622 if (ret_val)
2314 return ret_val; 1623 return ret_val;
2315 1624
2316 /* We need to disable autoneg in order to force link and duplex. */ 1625 /* We need to disable autoneg in order to force link and duplex. */
2317 1626
2318 mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; 1627 mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
2319 1628
2320 /* Are we forcing Full or Half Duplex? */ 1629 /* Are we forcing Full or Half Duplex? */
2321 if (hw->forced_speed_duplex == e1000_100_full || 1630 if (hw->forced_speed_duplex == e1000_100_full ||
2322 hw->forced_speed_duplex == e1000_10_full) { 1631 hw->forced_speed_duplex == e1000_10_full) {
2323 /* We want to force full duplex so we SET the full duplex bits in the 1632 /* We want to force full duplex so we SET the full duplex bits in the
2324 * Device and MII Control Registers. 1633 * Device and MII Control Registers.
2325 */ 1634 */
2326 ctrl |= E1000_CTRL_FD; 1635 ctrl |= E1000_CTRL_FD;
2327 mii_ctrl_reg |= MII_CR_FULL_DUPLEX; 1636 mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
2328 DEBUGOUT("Full Duplex\n"); 1637 DEBUGOUT("Full Duplex\n");
2329 } else { 1638 } else {
2330 /* We want to force half duplex so we CLEAR the full duplex bits in 1639 /* We want to force half duplex so we CLEAR the full duplex bits in
2331 * the Device and MII Control Registers. 1640 * the Device and MII Control Registers.
2332 */ 1641 */
2333 ctrl &= ~E1000_CTRL_FD; 1642 ctrl &= ~E1000_CTRL_FD;
2334 mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; 1643 mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
2335 DEBUGOUT("Half Duplex\n"); 1644 DEBUGOUT("Half Duplex\n");
2336 } 1645 }
2337 1646
2338 /* Are we forcing 100Mbps??? */ 1647 /* Are we forcing 100Mbps??? */
2339 if (hw->forced_speed_duplex == e1000_100_full || 1648 if (hw->forced_speed_duplex == e1000_100_full ||
2340 hw->forced_speed_duplex == e1000_100_half) { 1649 hw->forced_speed_duplex == e1000_100_half) {
2341 /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ 1650 /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
2342 ctrl |= E1000_CTRL_SPD_100; 1651 ctrl |= E1000_CTRL_SPD_100;
2343 mii_ctrl_reg |= MII_CR_SPEED_100; 1652 mii_ctrl_reg |= MII_CR_SPEED_100;
2344 mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); 1653 mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
2345 DEBUGOUT("Forcing 100mb "); 1654 DEBUGOUT("Forcing 100mb ");
2346 } else { 1655 } else {
2347 /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ 1656 /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
2348 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); 1657 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2349 mii_ctrl_reg |= MII_CR_SPEED_10; 1658 mii_ctrl_reg |= MII_CR_SPEED_10;
2350 mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); 1659 mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
2351 DEBUGOUT("Forcing 10mb "); 1660 DEBUGOUT("Forcing 10mb ");
2352 } 1661 }
2353 1662
2354 e1000_config_collision_dist(hw); 1663 e1000_config_collision_dist(hw);
2355 1664
2356 /* Write the configured values back to the Device Control Reg. */ 1665 /* Write the configured values back to the Device Control Reg. */
2357 ew32(CTRL, ctrl); 1666 ew32(CTRL, ctrl);
2358 1667
2359 if ((hw->phy_type == e1000_phy_m88) || 1668 if (hw->phy_type == e1000_phy_m88) {
2360 (hw->phy_type == e1000_phy_gg82563)) { 1669 ret_val =
2361 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1670 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
2362 if (ret_val) 1671 if (ret_val)
2363 return ret_val; 1672 return ret_val;
2364 1673
2365 /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI 1674 /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
2366 * forced whenever speed are duplex are forced. 1675 * forced whenever speed are duplex are forced.
2367 */ 1676 */
2368 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; 1677 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
2369 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 1678 ret_val =
2370 if (ret_val) 1679 e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
2371 return ret_val; 1680 if (ret_val)
2372 1681 return ret_val;
2373 DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data); 1682
2374 1683 DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
2375 /* Need to reset the PHY or these changes will be ignored */ 1684
2376 mii_ctrl_reg |= MII_CR_RESET; 1685 /* Need to reset the PHY or these changes will be ignored */
2377 1686 mii_ctrl_reg |= MII_CR_RESET;
2378 /* Disable MDI-X support for 10/100 */ 1687
2379 } else if (hw->phy_type == e1000_phy_ife) { 1688 /* Disable MDI-X support for 10/100 */
2380 ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data); 1689 } else {
2381 if (ret_val) 1690 /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
2382 return ret_val; 1691 * forced whenever speed or duplex are forced.
2383 1692 */
2384 phy_data &= ~IFE_PMC_AUTO_MDIX; 1693 ret_val =
2385 phy_data &= ~IFE_PMC_FORCE_MDIX; 1694 e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
2386 1695 if (ret_val)
2387 ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data); 1696 return ret_val;
2388 if (ret_val) 1697
2389 return ret_val; 1698 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
2390 1699 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
2391 } else { 1700
2392 /* Clear Auto-Crossover to force MDI manually. IGP requires MDI 1701 ret_val =
2393 * forced whenever speed or duplex are forced. 1702 e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
2394 */ 1703 if (ret_val)
2395 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); 1704 return ret_val;
2396 if (ret_val) 1705 }
2397 return ret_val; 1706
2398 1707 /* Write back the modified PHY MII control register. */
2399 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; 1708 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
2400 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; 1709 if (ret_val)
2401 1710 return ret_val;
2402 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); 1711
2403 if (ret_val) 1712 udelay(1);
2404 return ret_val; 1713
2405 } 1714 /* The wait_autoneg_complete flag may be a little misleading here.
2406 1715 * Since we are forcing speed and duplex, Auto-Neg is not enabled.
2407 /* Write back the modified PHY MII control register. */ 1716 * But we do want to delay for a period while forcing only so we
2408 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); 1717 * don't generate false No Link messages. So we will wait here
2409 if (ret_val) 1718 * only if the user has set wait_autoneg_complete to 1, which is
2410 return ret_val; 1719 * the default.
2411 1720 */
2412 udelay(1); 1721 if (hw->wait_autoneg_complete) {
2413 1722 /* We will wait for autoneg to complete. */
2414 /* The wait_autoneg_complete flag may be a little misleading here. 1723 DEBUGOUT("Waiting for forced speed/duplex link.\n");
2415 * Since we are forcing speed and duplex, Auto-Neg is not enabled. 1724 mii_status_reg = 0;
2416 * But we do want to delay for a period while forcing only so we 1725
2417 * don't generate false No Link messages. So we will wait here 1726 /* We will wait for autoneg to complete or 4.5 seconds to expire. */
2418 * only if the user has set wait_autoneg_complete to 1, which is 1727 for (i = PHY_FORCE_TIME; i > 0; i--) {
2419 * the default. 1728 /* Read the MII Status Register and wait for Auto-Neg Complete bit
2420 */ 1729 * to be set.
2421 if (hw->wait_autoneg_complete) { 1730 */
2422 /* We will wait for autoneg to complete. */ 1731 ret_val =
2423 DEBUGOUT("Waiting for forced speed/duplex link.\n"); 1732 e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2424 mii_status_reg = 0; 1733 if (ret_val)
2425 1734 return ret_val;
2426 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ 1735
2427 for (i = PHY_FORCE_TIME; i > 0; i--) { 1736 ret_val =
2428 /* Read the MII Status Register and wait for Auto-Neg Complete bit 1737 e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2429 * to be set. 1738 if (ret_val)
2430 */ 1739 return ret_val;
2431 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 1740
2432 if (ret_val) 1741 if (mii_status_reg & MII_SR_LINK_STATUS)
2433 return ret_val; 1742 break;
2434 1743 msleep(100);
2435 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 1744 }
2436 if (ret_val) 1745 if ((i == 0) && (hw->phy_type == e1000_phy_m88)) {
2437 return ret_val; 1746 /* We didn't get link. Reset the DSP and wait again for link. */
2438 1747 ret_val = e1000_phy_reset_dsp(hw);
2439 if (mii_status_reg & MII_SR_LINK_STATUS) break; 1748 if (ret_val) {
2440 msleep(100); 1749 DEBUGOUT("Error Resetting PHY DSP\n");
2441 } 1750 return ret_val;
2442 if ((i == 0) && 1751 }
2443 ((hw->phy_type == e1000_phy_m88) || 1752 }
2444 (hw->phy_type == e1000_phy_gg82563))) { 1753 /* This loop will early-out if the link condition has been met. */
2445 /* We didn't get link. Reset the DSP and wait again for link. */ 1754 for (i = PHY_FORCE_TIME; i > 0; i--) {
2446 ret_val = e1000_phy_reset_dsp(hw); 1755 if (mii_status_reg & MII_SR_LINK_STATUS)
2447 if (ret_val) { 1756 break;
2448 DEBUGOUT("Error Resetting PHY DSP\n"); 1757 msleep(100);
2449 return ret_val; 1758 /* Read the MII Status Register and wait for Auto-Neg Complete bit
2450 } 1759 * to be set.
2451 } 1760 */
2452 /* This loop will early-out if the link condition has been met. */ 1761 ret_val =
2453 for (i = PHY_FORCE_TIME; i > 0; i--) { 1762 e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2454 if (mii_status_reg & MII_SR_LINK_STATUS) break; 1763 if (ret_val)
2455 msleep(100); 1764 return ret_val;
2456 /* Read the MII Status Register and wait for Auto-Neg Complete bit 1765
2457 * to be set. 1766 ret_val =
2458 */ 1767 e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2459 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 1768 if (ret_val)
2460 if (ret_val) 1769 return ret_val;
2461 return ret_val; 1770 }
2462 1771 }
2463 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 1772
2464 if (ret_val) 1773 if (hw->phy_type == e1000_phy_m88) {
2465 return ret_val; 1774 /* Because we reset the PHY above, we need to re-force TX_CLK in the
2466 } 1775 * Extended PHY Specific Control Register to 25MHz clock. This value
2467 } 1776 * defaults back to a 2.5MHz clock when the PHY is reset.
2468 1777 */
2469 if (hw->phy_type == e1000_phy_m88) { 1778 ret_val =
2470 /* Because we reset the PHY above, we need to re-force TX_CLK in the 1779 e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
2471 * Extended PHY Specific Control Register to 25MHz clock. This value 1780 &phy_data);
2472 * defaults back to a 2.5MHz clock when the PHY is reset. 1781 if (ret_val)
2473 */ 1782 return ret_val;
2474 ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); 1783
2475 if (ret_val) 1784 phy_data |= M88E1000_EPSCR_TX_CLK_25;
2476 return ret_val; 1785 ret_val =
2477 1786 e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
2478 phy_data |= M88E1000_EPSCR_TX_CLK_25; 1787 phy_data);
2479 ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); 1788 if (ret_val)
2480 if (ret_val) 1789 return ret_val;
2481 return ret_val; 1790
2482 1791 /* In addition, because of the s/w reset above, we need to enable CRS on
2483 /* In addition, because of the s/w reset above, we need to enable CRS on 1792 * TX. This must be set for both full and half duplex operation.
2484 * TX. This must be set for both full and half duplex operation. 1793 */
2485 */ 1794 ret_val =
2486 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1795 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
2487 if (ret_val) 1796 if (ret_val)
2488 return ret_val; 1797 return ret_val;
2489 1798
2490 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 1799 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
2491 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 1800 ret_val =
2492 if (ret_val) 1801 e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
2493 return ret_val; 1802 if (ret_val)
2494 1803 return ret_val;
2495 if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) && 1804
2496 (!hw->autoneg) && (hw->forced_speed_duplex == e1000_10_full || 1805 if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543)
2497 hw->forced_speed_duplex == e1000_10_half)) { 1806 && (!hw->autoneg)
2498 ret_val = e1000_polarity_reversal_workaround(hw); 1807 && (hw->forced_speed_duplex == e1000_10_full
2499 if (ret_val) 1808 || hw->forced_speed_duplex == e1000_10_half)) {
2500 return ret_val; 1809 ret_val = e1000_polarity_reversal_workaround(hw);
2501 } 1810 if (ret_val)
2502 } else if (hw->phy_type == e1000_phy_gg82563) { 1811 return ret_val;
2503 /* The TX_CLK of the Extended PHY Specific Control Register defaults 1812 }
2504 * to 2.5MHz on a reset. We need to re-force it back to 25MHz, if 1813 }
2505 * we're not in a forced 10/duplex configuration. */ 1814 return E1000_SUCCESS;
2506 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
2507 if (ret_val)
2508 return ret_val;
2509
2510 phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
2511 if ((hw->forced_speed_duplex == e1000_10_full) ||
2512 (hw->forced_speed_duplex == e1000_10_half))
2513 phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ;
2514 else
2515 phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25MHZ;
2516
2517 /* Also due to the reset, we need to enable CRS on Tx. */
2518 phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
2519
2520 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
2521 if (ret_val)
2522 return ret_val;
2523 }
2524 return E1000_SUCCESS;
2525} 1815}
2526 1816
2527/****************************************************************************** 1817/**
2528* Sets the collision distance in the Transmit Control register 1818 * e1000_config_collision_dist - set collision distance register
2529* 1819 * @hw: Struct containing variables accessed by shared code
2530* hw - Struct containing variables accessed by shared code 1820 *
2531* 1821 * Sets the collision distance in the Transmit Control register.
2532* Link should have been established previously. Reads the speed and duplex 1822 * Link should have been established previously. Reads the speed and duplex
2533* information from the Device Status register. 1823 * information from the Device Status register.
2534******************************************************************************/ 1824 */
2535void e1000_config_collision_dist(struct e1000_hw *hw) 1825void e1000_config_collision_dist(struct e1000_hw *hw)
2536{ 1826{
2537 u32 tctl, coll_dist; 1827 u32 tctl, coll_dist;
2538 1828
2539 DEBUGFUNC("e1000_config_collision_dist"); 1829 DEBUGFUNC("e1000_config_collision_dist");
2540 1830
2541 if (hw->mac_type < e1000_82543) 1831 if (hw->mac_type < e1000_82543)
2542 coll_dist = E1000_COLLISION_DISTANCE_82542; 1832 coll_dist = E1000_COLLISION_DISTANCE_82542;
2543 else 1833 else
2544 coll_dist = E1000_COLLISION_DISTANCE; 1834 coll_dist = E1000_COLLISION_DISTANCE;
2545 1835
2546 tctl = er32(TCTL); 1836 tctl = er32(TCTL);
2547 1837
2548 tctl &= ~E1000_TCTL_COLD; 1838 tctl &= ~E1000_TCTL_COLD;
2549 tctl |= coll_dist << E1000_COLD_SHIFT; 1839 tctl |= coll_dist << E1000_COLD_SHIFT;
2550 1840
2551 ew32(TCTL, tctl); 1841 ew32(TCTL, tctl);
2552 E1000_WRITE_FLUSH(); 1842 E1000_WRITE_FLUSH();
2553} 1843}
2554 1844
2555/****************************************************************************** 1845/**
2556* Sets MAC speed and duplex settings to reflect the those in the PHY 1846 * e1000_config_mac_to_phy - sync phy and mac settings
2557* 1847 * @hw: Struct containing variables accessed by shared code
2558* hw - Struct containing variables accessed by shared code 1848 * @mii_reg: data to write to the MII control register
2559* mii_reg - data to write to the MII control register 1849 *
2560* 1850 * Sets MAC speed and duplex settings to reflect the those in the PHY
2561* The contents of the PHY register containing the needed information need to 1851 * The contents of the PHY register containing the needed information need to
2562* be passed in. 1852 * be passed in.
2563******************************************************************************/ 1853 */
2564static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) 1854static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
2565{ 1855{
2566 u32 ctrl; 1856 u32 ctrl;
2567 s32 ret_val; 1857 s32 ret_val;
2568 u16 phy_data; 1858 u16 phy_data;
2569 1859
2570 DEBUGFUNC("e1000_config_mac_to_phy"); 1860 DEBUGFUNC("e1000_config_mac_to_phy");
2571 1861
2572 /* 82544 or newer MAC, Auto Speed Detection takes care of 1862 /* 82544 or newer MAC, Auto Speed Detection takes care of
2573 * MAC speed/duplex configuration.*/ 1863 * MAC speed/duplex configuration.*/
2574 if (hw->mac_type >= e1000_82544) 1864 if (hw->mac_type >= e1000_82544)
2575 return E1000_SUCCESS; 1865 return E1000_SUCCESS;
2576 1866
2577 /* Read the Device Control Register and set the bits to Force Speed 1867 /* Read the Device Control Register and set the bits to Force Speed
2578 * and Duplex. 1868 * and Duplex.
2579 */ 1869 */
2580 ctrl = er32(CTRL); 1870 ctrl = er32(CTRL);
2581 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1871 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
2582 ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); 1872 ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
2583 1873
2584 /* Set up duplex in the Device Control and Transmit Control 1874 /* Set up duplex in the Device Control and Transmit Control
2585 * registers depending on negotiated values. 1875 * registers depending on negotiated values.
2586 */ 1876 */
2587 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); 1877 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
2588 if (ret_val) 1878 if (ret_val)
2589 return ret_val; 1879 return ret_val;
2590 1880
2591 if (phy_data & M88E1000_PSSR_DPLX) 1881 if (phy_data & M88E1000_PSSR_DPLX)
2592 ctrl |= E1000_CTRL_FD; 1882 ctrl |= E1000_CTRL_FD;
2593 else 1883 else
2594 ctrl &= ~E1000_CTRL_FD; 1884 ctrl &= ~E1000_CTRL_FD;
2595 1885
2596 e1000_config_collision_dist(hw); 1886 e1000_config_collision_dist(hw);
2597 1887
2598 /* Set up speed in the Device Control register depending on 1888 /* Set up speed in the Device Control register depending on
2599 * negotiated values. 1889 * negotiated values.
2600 */ 1890 */
2601 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) 1891 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
2602 ctrl |= E1000_CTRL_SPD_1000; 1892 ctrl |= E1000_CTRL_SPD_1000;
2603 else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) 1893 else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
2604 ctrl |= E1000_CTRL_SPD_100; 1894 ctrl |= E1000_CTRL_SPD_100;
2605 1895
2606 /* Write the configured values back to the Device Control Reg. */ 1896 /* Write the configured values back to the Device Control Reg. */
2607 ew32(CTRL, ctrl); 1897 ew32(CTRL, ctrl);
2608 return E1000_SUCCESS; 1898 return E1000_SUCCESS;
2609} 1899}
2610 1900
2611/****************************************************************************** 1901/**
2612 * Forces the MAC's flow control settings. 1902 * e1000_force_mac_fc - force flow control settings
2613 * 1903 * @hw: Struct containing variables accessed by shared code
2614 * hw - Struct containing variables accessed by shared code
2615 * 1904 *
1905 * Forces the MAC's flow control settings.
2616 * Sets the TFCE and RFCE bits in the device control register to reflect 1906 * Sets the TFCE and RFCE bits in the device control register to reflect
2617 * the adapter settings. TFCE and RFCE need to be explicitly set by 1907 * the adapter settings. TFCE and RFCE need to be explicitly set by
2618 * software when a Copper PHY is used because autonegotiation is managed 1908 * software when a Copper PHY is used because autonegotiation is managed
2619 * by the PHY rather than the MAC. Software must also configure these 1909 * by the PHY rather than the MAC. Software must also configure these
2620 * bits when link is forced on a fiber connection. 1910 * bits when link is forced on a fiber connection.
2621 *****************************************************************************/ 1911 */
2622s32 e1000_force_mac_fc(struct e1000_hw *hw) 1912s32 e1000_force_mac_fc(struct e1000_hw *hw)
2623{ 1913{
2624 u32 ctrl; 1914 u32 ctrl;
2625 1915
2626 DEBUGFUNC("e1000_force_mac_fc"); 1916 DEBUGFUNC("e1000_force_mac_fc");
2627 1917
2628 /* Get the current configuration of the Device Control Register */ 1918 /* Get the current configuration of the Device Control Register */
2629 ctrl = er32(CTRL); 1919 ctrl = er32(CTRL);
2630 1920
2631 /* Because we didn't get link via the internal auto-negotiation 1921 /* Because we didn't get link via the internal auto-negotiation
2632 * mechanism (we either forced link or we got link via PHY 1922 * mechanism (we either forced link or we got link via PHY
2633 * auto-neg), we have to manually enable/disable transmit an 1923 * auto-neg), we have to manually enable/disable transmit an
2634 * receive flow control. 1924 * receive flow control.
2635 * 1925 *
2636 * The "Case" statement below enables/disable flow control 1926 * The "Case" statement below enables/disable flow control
2637 * according to the "hw->fc" parameter. 1927 * according to the "hw->fc" parameter.
2638 * 1928 *
2639 * The possible values of the "fc" parameter are: 1929 * The possible values of the "fc" parameter are:
2640 * 0: Flow control is completely disabled 1930 * 0: Flow control is completely disabled
2641 * 1: Rx flow control is enabled (we can receive pause 1931 * 1: Rx flow control is enabled (we can receive pause
2642 * frames but not send pause frames). 1932 * frames but not send pause frames).
2643 * 2: Tx flow control is enabled (we can send pause frames 1933 * 2: Tx flow control is enabled (we can send pause frames
2644 * frames but we do not receive pause frames). 1934 * frames but we do not receive pause frames).
2645 * 3: Both Rx and TX flow control (symmetric) is enabled. 1935 * 3: Both Rx and TX flow control (symmetric) is enabled.
2646 * other: No other values should be possible at this point. 1936 * other: No other values should be possible at this point.
2647 */ 1937 */
2648 1938
2649 switch (hw->fc) { 1939 switch (hw->fc) {
2650 case E1000_FC_NONE: 1940 case E1000_FC_NONE:
2651 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); 1941 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
2652 break; 1942 break;
2653 case E1000_FC_RX_PAUSE: 1943 case E1000_FC_RX_PAUSE:
2654 ctrl &= (~E1000_CTRL_TFCE); 1944 ctrl &= (~E1000_CTRL_TFCE);
2655 ctrl |= E1000_CTRL_RFCE; 1945 ctrl |= E1000_CTRL_RFCE;
2656 break; 1946 break;
2657 case E1000_FC_TX_PAUSE: 1947 case E1000_FC_TX_PAUSE:
2658 ctrl &= (~E1000_CTRL_RFCE); 1948 ctrl &= (~E1000_CTRL_RFCE);
2659 ctrl |= E1000_CTRL_TFCE; 1949 ctrl |= E1000_CTRL_TFCE;
2660 break; 1950 break;
2661 case E1000_FC_FULL: 1951 case E1000_FC_FULL:
2662 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); 1952 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
2663 break; 1953 break;
2664 default: 1954 default:
2665 DEBUGOUT("Flow control param set incorrectly\n"); 1955 DEBUGOUT("Flow control param set incorrectly\n");
2666 return -E1000_ERR_CONFIG; 1956 return -E1000_ERR_CONFIG;
2667 } 1957 }
2668 1958
2669 /* Disable TX Flow Control for 82542 (rev 2.0) */ 1959 /* Disable TX Flow Control for 82542 (rev 2.0) */
2670 if (hw->mac_type == e1000_82542_rev2_0) 1960 if (hw->mac_type == e1000_82542_rev2_0)
2671 ctrl &= (~E1000_CTRL_TFCE); 1961 ctrl &= (~E1000_CTRL_TFCE);
2672 1962
2673 ew32(CTRL, ctrl); 1963 ew32(CTRL, ctrl);
2674 return E1000_SUCCESS; 1964 return E1000_SUCCESS;
2675} 1965}
2676 1966
2677/****************************************************************************** 1967/**
2678 * Configures flow control settings after link is established 1968 * e1000_config_fc_after_link_up - configure flow control after autoneg
2679 * 1969 * @hw: Struct containing variables accessed by shared code
2680 * hw - Struct containing variables accessed by shared code
2681 * 1970 *
1971 * Configures flow control settings after link is established
2682 * Should be called immediately after a valid link has been established. 1972 * Should be called immediately after a valid link has been established.
2683 * Forces MAC flow control settings if link was forced. When in MII/GMII mode 1973 * Forces MAC flow control settings if link was forced. When in MII/GMII mode
2684 * and autonegotiation is enabled, the MAC flow control settings will be set 1974 * and autonegotiation is enabled, the MAC flow control settings will be set
2685 * based on the flow control negotiated by the PHY. In TBI mode, the TFCE 1975 * based on the flow control negotiated by the PHY. In TBI mode, the TFCE
2686 * and RFCE bits will be automaticaly set to the negotiated flow control mode. 1976 * and RFCE bits will be automatically set to the negotiated flow control mode.
2687 *****************************************************************************/ 1977 */
2688static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) 1978static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2689{ 1979{
2690 s32 ret_val; 1980 s32 ret_val;
2691 u16 mii_status_reg; 1981 u16 mii_status_reg;
2692 u16 mii_nway_adv_reg; 1982 u16 mii_nway_adv_reg;
2693 u16 mii_nway_lp_ability_reg; 1983 u16 mii_nway_lp_ability_reg;
2694 u16 speed; 1984 u16 speed;
2695 u16 duplex; 1985 u16 duplex;
2696 1986
2697 DEBUGFUNC("e1000_config_fc_after_link_up"); 1987 DEBUGFUNC("e1000_config_fc_after_link_up");
2698 1988
2699 /* Check for the case where we have fiber media and auto-neg failed 1989 /* Check for the case where we have fiber media and auto-neg failed
2700 * so we had to force link. In this case, we need to force the 1990 * so we had to force link. In this case, we need to force the
2701 * configuration of the MAC to match the "fc" parameter. 1991 * configuration of the MAC to match the "fc" parameter.
2702 */ 1992 */
2703 if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) || 1993 if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed))
2704 ((hw->media_type == e1000_media_type_internal_serdes) && 1994 || ((hw->media_type == e1000_media_type_internal_serdes)
2705 (hw->autoneg_failed)) || 1995 && (hw->autoneg_failed))
2706 ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) { 1996 || ((hw->media_type == e1000_media_type_copper)
2707 ret_val = e1000_force_mac_fc(hw); 1997 && (!hw->autoneg))) {
2708 if (ret_val) { 1998 ret_val = e1000_force_mac_fc(hw);
2709 DEBUGOUT("Error forcing flow control settings\n"); 1999 if (ret_val) {
2710 return ret_val; 2000 DEBUGOUT("Error forcing flow control settings\n");
2711 } 2001 return ret_val;
2712 } 2002 }
2713 2003 }
2714 /* Check for the case where we have copper media and auto-neg is 2004
2715 * enabled. In this case, we need to check and see if Auto-Neg 2005 /* Check for the case where we have copper media and auto-neg is
2716 * has completed, and if so, how the PHY and link partner has 2006 * enabled. In this case, we need to check and see if Auto-Neg
2717 * flow control configured. 2007 * has completed, and if so, how the PHY and link partner has
2718 */ 2008 * flow control configured.
2719 if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) { 2009 */
2720 /* Read the MII Status Register and check to see if AutoNeg 2010 if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
2721 * has completed. We read this twice because this reg has 2011 /* Read the MII Status Register and check to see if AutoNeg
2722 * some "sticky" (latched) bits. 2012 * has completed. We read this twice because this reg has
2723 */ 2013 * some "sticky" (latched) bits.
2724 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 2014 */
2725 if (ret_val) 2015 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2726 return ret_val; 2016 if (ret_val)
2727 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 2017 return ret_val;
2728 if (ret_val) 2018 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2729 return ret_val; 2019 if (ret_val)
2730 2020 return ret_val;
2731 if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { 2021
2732 /* The AutoNeg process has completed, so we now need to 2022 if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
2733 * read both the Auto Negotiation Advertisement Register 2023 /* The AutoNeg process has completed, so we now need to
2734 * (Address 4) and the Auto_Negotiation Base Page Ability 2024 * read both the Auto Negotiation Advertisement Register
2735 * Register (Address 5) to determine how flow control was 2025 * (Address 4) and the Auto_Negotiation Base Page Ability
2736 * negotiated. 2026 * Register (Address 5) to determine how flow control was
2737 */ 2027 * negotiated.
2738 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, 2028 */
2739 &mii_nway_adv_reg); 2029 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
2740 if (ret_val) 2030 &mii_nway_adv_reg);
2741 return ret_val; 2031 if (ret_val)
2742 ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, 2032 return ret_val;
2743 &mii_nway_lp_ability_reg); 2033 ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
2744 if (ret_val) 2034 &mii_nway_lp_ability_reg);
2745 return ret_val; 2035 if (ret_val)
2746 2036 return ret_val;
2747 /* Two bits in the Auto Negotiation Advertisement Register 2037
2748 * (Address 4) and two bits in the Auto Negotiation Base 2038 /* Two bits in the Auto Negotiation Advertisement Register
2749 * Page Ability Register (Address 5) determine flow control 2039 * (Address 4) and two bits in the Auto Negotiation Base
2750 * for both the PHY and the link partner. The following 2040 * Page Ability Register (Address 5) determine flow control
2751 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, 2041 * for both the PHY and the link partner. The following
2752 * 1999, describes these PAUSE resolution bits and how flow 2042 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
2753 * control is determined based upon these settings. 2043 * 1999, describes these PAUSE resolution bits and how flow
2754 * NOTE: DC = Don't Care 2044 * control is determined based upon these settings.
2755 * 2045 * NOTE: DC = Don't Care
2756 * LOCAL DEVICE | LINK PARTNER 2046 *
2757 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution 2047 * LOCAL DEVICE | LINK PARTNER
2758 *-------|---------|-------|---------|-------------------- 2048 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
2759 * 0 | 0 | DC | DC | E1000_FC_NONE 2049 *-------|---------|-------|---------|--------------------
2760 * 0 | 1 | 0 | DC | E1000_FC_NONE 2050 * 0 | 0 | DC | DC | E1000_FC_NONE
2761 * 0 | 1 | 1 | 0 | E1000_FC_NONE 2051 * 0 | 1 | 0 | DC | E1000_FC_NONE
2762 * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE 2052 * 0 | 1 | 1 | 0 | E1000_FC_NONE
2763 * 1 | 0 | 0 | DC | E1000_FC_NONE 2053 * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
2764 * 1 | DC | 1 | DC | E1000_FC_FULL 2054 * 1 | 0 | 0 | DC | E1000_FC_NONE
2765 * 1 | 1 | 0 | 0 | E1000_FC_NONE 2055 * 1 | DC | 1 | DC | E1000_FC_FULL
2766 * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE 2056 * 1 | 1 | 0 | 0 | E1000_FC_NONE
2767 * 2057 * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
2768 */ 2058 *
2769 /* Are both PAUSE bits set to 1? If so, this implies 2059 */
2770 * Symmetric Flow Control is enabled at both ends. The 2060 /* Are both PAUSE bits set to 1? If so, this implies
2771 * ASM_DIR bits are irrelevant per the spec. 2061 * Symmetric Flow Control is enabled at both ends. The
2772 * 2062 * ASM_DIR bits are irrelevant per the spec.
2773 * For Symmetric Flow Control: 2063 *
2774 * 2064 * For Symmetric Flow Control:
2775 * LOCAL DEVICE | LINK PARTNER 2065 *
2776 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 2066 * LOCAL DEVICE | LINK PARTNER
2777 *-------|---------|-------|---------|-------------------- 2067 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
2778 * 1 | DC | 1 | DC | E1000_FC_FULL 2068 *-------|---------|-------|---------|--------------------
2779 * 2069 * 1 | DC | 1 | DC | E1000_FC_FULL
2780 */ 2070 *
2781 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 2071 */
2782 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { 2072 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
2783 /* Now we need to check if the user selected RX ONLY 2073 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
2784 * of pause frames. In this case, we had to advertise 2074 /* Now we need to check if the user selected RX ONLY
2785 * FULL flow control because we could not advertise RX 2075 * of pause frames. In this case, we had to advertise
2786 * ONLY. Hence, we must now check to see if we need to 2076 * FULL flow control because we could not advertise RX
2787 * turn OFF the TRANSMISSION of PAUSE frames. 2077 * ONLY. Hence, we must now check to see if we need to
2788 */ 2078 * turn OFF the TRANSMISSION of PAUSE frames.
2789 if (hw->original_fc == E1000_FC_FULL) { 2079 */
2790 hw->fc = E1000_FC_FULL; 2080 if (hw->original_fc == E1000_FC_FULL) {
2791 DEBUGOUT("Flow Control = FULL.\n"); 2081 hw->fc = E1000_FC_FULL;
2792 } else { 2082 DEBUGOUT("Flow Control = FULL.\n");
2793 hw->fc = E1000_FC_RX_PAUSE; 2083 } else {
2794 DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); 2084 hw->fc = E1000_FC_RX_PAUSE;
2795 } 2085 DEBUGOUT
2796 } 2086 ("Flow Control = RX PAUSE frames only.\n");
2797 /* For receiving PAUSE frames ONLY. 2087 }
2798 * 2088 }
2799 * LOCAL DEVICE | LINK PARTNER 2089 /* For receiving PAUSE frames ONLY.
2800 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 2090 *
2801 *-------|---------|-------|---------|-------------------- 2091 * LOCAL DEVICE | LINK PARTNER
2802 * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE 2092 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
2803 * 2093 *-------|---------|-------|---------|--------------------
2804 */ 2094 * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
2805 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && 2095 *
2806 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 2096 */
2807 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 2097 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
2808 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 2098 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
2809 hw->fc = E1000_FC_TX_PAUSE; 2099 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
2810 DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); 2100 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
2811 } 2101 {
2812 /* For transmitting PAUSE frames ONLY. 2102 hw->fc = E1000_FC_TX_PAUSE;
2813 * 2103 DEBUGOUT
2814 * LOCAL DEVICE | LINK PARTNER 2104 ("Flow Control = TX PAUSE frames only.\n");
2815 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 2105 }
2816 *-------|---------|-------|---------|-------------------- 2106 /* For transmitting PAUSE frames ONLY.
2817 * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE 2107 *
2818 * 2108 * LOCAL DEVICE | LINK PARTNER
2819 */ 2109 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
2820 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 2110 *-------|---------|-------|---------|--------------------
2821 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 2111 * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
2822 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 2112 *
2823 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 2113 */
2824 hw->fc = E1000_FC_RX_PAUSE; 2114 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
2825 DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); 2115 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
2826 } 2116 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
2827 /* Per the IEEE spec, at this point flow control should be 2117 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
2828 * disabled. However, we want to consider that we could 2118 {
2829 * be connected to a legacy switch that doesn't advertise 2119 hw->fc = E1000_FC_RX_PAUSE;
2830 * desired flow control, but can be forced on the link 2120 DEBUGOUT
2831 * partner. So if we advertised no flow control, that is 2121 ("Flow Control = RX PAUSE frames only.\n");
2832 * what we will resolve to. If we advertised some kind of 2122 }
2833 * receive capability (Rx Pause Only or Full Flow Control) 2123 /* Per the IEEE spec, at this point flow control should be
2834 * and the link partner advertised none, we will configure 2124 * disabled. However, we want to consider that we could
2835 * ourselves to enable Rx Flow Control only. We can do 2125 * be connected to a legacy switch that doesn't advertise
2836 * this safely for two reasons: If the link partner really 2126 * desired flow control, but can be forced on the link
2837 * didn't want flow control enabled, and we enable Rx, no 2127 * partner. So if we advertised no flow control, that is
2838 * harm done since we won't be receiving any PAUSE frames 2128 * what we will resolve to. If we advertised some kind of
2839 * anyway. If the intent on the link partner was to have 2129 * receive capability (Rx Pause Only or Full Flow Control)
2840 * flow control enabled, then by us enabling RX only, we 2130 * and the link partner advertised none, we will configure
2841 * can at least receive pause frames and process them. 2131 * ourselves to enable Rx Flow Control only. We can do
2842 * This is a good idea because in most cases, since we are 2132 * this safely for two reasons: If the link partner really
2843 * predominantly a server NIC, more times than not we will 2133 * didn't want flow control enabled, and we enable Rx, no
2844 * be asked to delay transmission of packets than asking 2134 * harm done since we won't be receiving any PAUSE frames
2845 * our link partner to pause transmission of frames. 2135 * anyway. If the intent on the link partner was to have
2846 */ 2136 * flow control enabled, then by us enabling RX only, we
2847 else if ((hw->original_fc == E1000_FC_NONE || 2137 * can at least receive pause frames and process them.
2848 hw->original_fc == E1000_FC_TX_PAUSE) || 2138 * This is a good idea because in most cases, since we are
2849 hw->fc_strict_ieee) { 2139 * predominantly a server NIC, more times than not we will
2850 hw->fc = E1000_FC_NONE; 2140 * be asked to delay transmission of packets than asking
2851 DEBUGOUT("Flow Control = NONE.\n"); 2141 * our link partner to pause transmission of frames.
2852 } else { 2142 */
2853 hw->fc = E1000_FC_RX_PAUSE; 2143 else if ((hw->original_fc == E1000_FC_NONE ||
2854 DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); 2144 hw->original_fc == E1000_FC_TX_PAUSE) ||
2855 } 2145 hw->fc_strict_ieee) {
2856 2146 hw->fc = E1000_FC_NONE;
2857 /* Now we need to do one last check... If we auto- 2147 DEBUGOUT("Flow Control = NONE.\n");
2858 * negotiated to HALF DUPLEX, flow control should not be 2148 } else {
2859 * enabled per IEEE 802.3 spec. 2149 hw->fc = E1000_FC_RX_PAUSE;
2860 */ 2150 DEBUGOUT
2861 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); 2151 ("Flow Control = RX PAUSE frames only.\n");
2862 if (ret_val) { 2152 }
2863 DEBUGOUT("Error getting link speed and duplex\n"); 2153
2864 return ret_val; 2154 /* Now we need to do one last check... If we auto-
2865 } 2155 * negotiated to HALF DUPLEX, flow control should not be
2866 2156 * enabled per IEEE 802.3 spec.
2867 if (duplex == HALF_DUPLEX) 2157 */
2868 hw->fc = E1000_FC_NONE; 2158 ret_val =
2869 2159 e1000_get_speed_and_duplex(hw, &speed, &duplex);
2870 /* Now we call a subroutine to actually force the MAC 2160 if (ret_val) {
2871 * controller to use the correct flow control settings. 2161 DEBUGOUT
2872 */ 2162 ("Error getting link speed and duplex\n");
2873 ret_val = e1000_force_mac_fc(hw); 2163 return ret_val;
2874 if (ret_val) { 2164 }
2875 DEBUGOUT("Error forcing flow control settings\n"); 2165
2876 return ret_val; 2166 if (duplex == HALF_DUPLEX)
2877 } 2167 hw->fc = E1000_FC_NONE;
2878 } else { 2168
2879 DEBUGOUT("Copper PHY and Auto Neg has not completed.\n"); 2169 /* Now we call a subroutine to actually force the MAC
2880 } 2170 * controller to use the correct flow control settings.
2881 } 2171 */
2882 return E1000_SUCCESS; 2172 ret_val = e1000_force_mac_fc(hw);
2173 if (ret_val) {
2174 DEBUGOUT
2175 ("Error forcing flow control settings\n");
2176 return ret_val;
2177 }
2178 } else {
2179 DEBUGOUT
2180 ("Copper PHY and Auto Neg has not completed.\n");
2181 }
2182 }
2183 return E1000_SUCCESS;
2883} 2184}
2884 2185
2885/****************************************************************************** 2186/**
2886 * Checks to see if the link status of the hardware has changed. 2187 * e1000_check_for_serdes_link_generic - Check for link (Serdes)
2188 * @hw: pointer to the HW structure
2887 * 2189 *
2888 * hw - Struct containing variables accessed by shared code 2190 * Checks for link up on the hardware. If link is not up and we have
2191 * a signal, then we need to force link up.
2192 */
2193static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2194{
2195 u32 rxcw;
2196 u32 ctrl;
2197 u32 status;
2198 s32 ret_val = E1000_SUCCESS;
2199
2200 DEBUGFUNC("e1000_check_for_serdes_link_generic");
2201
2202 ctrl = er32(CTRL);
2203 status = er32(STATUS);
2204 rxcw = er32(RXCW);
2205
2206 /*
2207 * If we don't have link (auto-negotiation failed or link partner
2208 * cannot auto-negotiate), and our link partner is not trying to
2209 * auto-negotiate with us (we are receiving idles or data),
2210 * we need to force link up. We also need to give auto-negotiation
2211 * time to complete.
2212 */
2213 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
2214 if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
2215 if (hw->autoneg_failed == 0) {
2216 hw->autoneg_failed = 1;
2217 goto out;
2218 }
2219 DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
2220
2221 /* Disable auto-negotiation in the TXCW register */
2222 ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
2223
2224 /* Force link-up and also force full-duplex. */
2225 ctrl = er32(CTRL);
2226 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
2227 ew32(CTRL, ctrl);
2228
2229 /* Configure Flow Control after forcing link up. */
2230 ret_val = e1000_config_fc_after_link_up(hw);
2231 if (ret_val) {
2232 DEBUGOUT("Error configuring flow control\n");
2233 goto out;
2234 }
2235 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
2236 /*
2237 * If we are forcing link and we are receiving /C/ ordered
2238 * sets, re-enable auto-negotiation in the TXCW register
2239 * and disable forced link in the Device Control register
2240 * in an attempt to auto-negotiate with our link partner.
2241 */
2242 DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
2243 ew32(TXCW, hw->txcw);
2244 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
2245
2246 hw->serdes_has_link = true;
2247 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
2248 /*
2249 * If we force link for non-auto-negotiation switch, check
2250 * link status based on MAC synchronization for internal
2251 * serdes media type.
2252 */
2253 /* SYNCH bit and IV bit are sticky. */
2254 udelay(10);
2255 rxcw = er32(RXCW);
2256 if (rxcw & E1000_RXCW_SYNCH) {
2257 if (!(rxcw & E1000_RXCW_IV)) {
2258 hw->serdes_has_link = true;
2259 DEBUGOUT("SERDES: Link up - forced.\n");
2260 }
2261 } else {
2262 hw->serdes_has_link = false;
2263 DEBUGOUT("SERDES: Link down - force failed.\n");
2264 }
2265 }
2266
2267 if (E1000_TXCW_ANE & er32(TXCW)) {
2268 status = er32(STATUS);
2269 if (status & E1000_STATUS_LU) {
2270 /* SYNCH bit and IV bit are sticky, so reread rxcw. */
2271 udelay(10);
2272 rxcw = er32(RXCW);
2273 if (rxcw & E1000_RXCW_SYNCH) {
2274 if (!(rxcw & E1000_RXCW_IV)) {
2275 hw->serdes_has_link = true;
2276 DEBUGOUT("SERDES: Link up - autoneg "
2277 "completed successfully.\n");
2278 } else {
2279 hw->serdes_has_link = false;
2280 DEBUGOUT("SERDES: Link down - invalid"
2281 "codewords detected in autoneg.\n");
2282 }
2283 } else {
2284 hw->serdes_has_link = false;
2285 DEBUGOUT("SERDES: Link down - no sync.\n");
2286 }
2287 } else {
2288 hw->serdes_has_link = false;
2289 DEBUGOUT("SERDES: Link down - autoneg failed\n");
2290 }
2291 }
2292
2293 out:
2294 return ret_val;
2295}
2296
2297/**
2298 * e1000_check_for_link
2299 * @hw: Struct containing variables accessed by shared code
2889 * 2300 *
2301 * Checks to see if the link status of the hardware has changed.
2890 * Called by any function that needs to check the link status of the adapter. 2302 * Called by any function that needs to check the link status of the adapter.
2891 *****************************************************************************/ 2303 */
2892s32 e1000_check_for_link(struct e1000_hw *hw) 2304s32 e1000_check_for_link(struct e1000_hw *hw)
2893{ 2305{
2894 u32 rxcw = 0; 2306 u32 rxcw = 0;
2895 u32 ctrl; 2307 u32 ctrl;
2896 u32 status; 2308 u32 status;
2897 u32 rctl; 2309 u32 rctl;
2898 u32 icr; 2310 u32 icr;
2899 u32 signal = 0; 2311 u32 signal = 0;
2900 s32 ret_val; 2312 s32 ret_val;
2901 u16 phy_data; 2313 u16 phy_data;
2902 2314
2903 DEBUGFUNC("e1000_check_for_link"); 2315 DEBUGFUNC("e1000_check_for_link");
2904 2316
2905 ctrl = er32(CTRL); 2317 ctrl = er32(CTRL);
2906 status = er32(STATUS); 2318 status = er32(STATUS);
2907 2319
2908 /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be 2320 /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be
2909 * set when the optics detect a signal. On older adapters, it will be 2321 * set when the optics detect a signal. On older adapters, it will be
2910 * cleared when there is a signal. This applies to fiber media only. 2322 * cleared when there is a signal. This applies to fiber media only.
2911 */ 2323 */
2912 if ((hw->media_type == e1000_media_type_fiber) || 2324 if ((hw->media_type == e1000_media_type_fiber) ||
2913 (hw->media_type == e1000_media_type_internal_serdes)) { 2325 (hw->media_type == e1000_media_type_internal_serdes)) {
2914 rxcw = er32(RXCW); 2326 rxcw = er32(RXCW);
2915 2327
2916 if (hw->media_type == e1000_media_type_fiber) { 2328 if (hw->media_type == e1000_media_type_fiber) {
2917 signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; 2329 signal =
2918 if (status & E1000_STATUS_LU) 2330 (hw->mac_type >
2919 hw->get_link_status = false; 2331 e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
2920 } 2332 if (status & E1000_STATUS_LU)
2921 } 2333 hw->get_link_status = false;
2922 2334 }
2923 /* If we have a copper PHY then we only want to go out to the PHY 2335 }
2924 * registers to see if Auto-Neg has completed and/or if our link 2336
2925 * status has changed. The get_link_status flag will be set if we 2337 /* If we have a copper PHY then we only want to go out to the PHY
2926 * receive a Link Status Change interrupt or we have Rx Sequence 2338 * registers to see if Auto-Neg has completed and/or if our link
2927 * Errors. 2339 * status has changed. The get_link_status flag will be set if we
2928 */ 2340 * receive a Link Status Change interrupt or we have Rx Sequence
2929 if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { 2341 * Errors.
2930 /* First we want to see if the MII Status Register reports 2342 */
2931 * link. If so, then we want to get the current speed/duplex 2343 if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
2932 * of the PHY. 2344 /* First we want to see if the MII Status Register reports
2933 * Read the register twice since the link bit is sticky. 2345 * link. If so, then we want to get the current speed/duplex
2934 */ 2346 * of the PHY.
2935 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 2347 * Read the register twice since the link bit is sticky.
2936 if (ret_val) 2348 */
2937 return ret_val; 2349 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
2938 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 2350 if (ret_val)
2939 if (ret_val) 2351 return ret_val;
2940 return ret_val; 2352 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
2941 2353 if (ret_val)
2942 if (phy_data & MII_SR_LINK_STATUS) { 2354 return ret_val;
2943 hw->get_link_status = false; 2355
2944 /* Check if there was DownShift, must be checked immediately after 2356 if (phy_data & MII_SR_LINK_STATUS) {
2945 * link-up */ 2357 hw->get_link_status = false;
2946 e1000_check_downshift(hw); 2358 /* Check if there was DownShift, must be checked immediately after
2947 2359 * link-up */
2948 /* If we are on 82544 or 82543 silicon and speed/duplex 2360 e1000_check_downshift(hw);
2949 * are forced to 10H or 10F, then we will implement the polarity 2361
2950 * reversal workaround. We disable interrupts first, and upon 2362 /* If we are on 82544 or 82543 silicon and speed/duplex
2951 * returning, place the devices interrupt state to its previous 2363 * are forced to 10H or 10F, then we will implement the polarity
2952 * value except for the link status change interrupt which will 2364 * reversal workaround. We disable interrupts first, and upon
2953 * happen due to the execution of this workaround. 2365 * returning, place the devices interrupt state to its previous
2954 */ 2366 * value except for the link status change interrupt which will
2955 2367 * happen due to the execution of this workaround.
2956 if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) && 2368 */
2957 (!hw->autoneg) && 2369
2958 (hw->forced_speed_duplex == e1000_10_full || 2370 if ((hw->mac_type == e1000_82544
2959 hw->forced_speed_duplex == e1000_10_half)) { 2371 || hw->mac_type == e1000_82543) && (!hw->autoneg)
2960 ew32(IMC, 0xffffffff); 2372 && (hw->forced_speed_duplex == e1000_10_full
2961 ret_val = e1000_polarity_reversal_workaround(hw); 2373 || hw->forced_speed_duplex == e1000_10_half)) {
2962 icr = er32(ICR); 2374 ew32(IMC, 0xffffffff);
2963 ew32(ICS, (icr & ~E1000_ICS_LSC)); 2375 ret_val =
2964 ew32(IMS, IMS_ENABLE_MASK); 2376 e1000_polarity_reversal_workaround(hw);
2965 } 2377 icr = er32(ICR);
2966 2378 ew32(ICS, (icr & ~E1000_ICS_LSC));
2967 } else { 2379 ew32(IMS, IMS_ENABLE_MASK);
2968 /* No link detected */ 2380 }
2969 e1000_config_dsp_after_link_change(hw, false); 2381
2970 return 0; 2382 } else {
2971 } 2383 /* No link detected */
2972 2384 e1000_config_dsp_after_link_change(hw, false);
2973 /* If we are forcing speed/duplex, then we simply return since 2385 return 0;
2974 * we have already determined whether we have link or not. 2386 }
2975 */ 2387
2976 if (!hw->autoneg) return -E1000_ERR_CONFIG; 2388 /* If we are forcing speed/duplex, then we simply return since
2977 2389 * we have already determined whether we have link or not.
2978 /* optimize the dsp settings for the igp phy */ 2390 */
2979 e1000_config_dsp_after_link_change(hw, true); 2391 if (!hw->autoneg)
2980 2392 return -E1000_ERR_CONFIG;
2981 /* We have a M88E1000 PHY and Auto-Neg is enabled. If we 2393
2982 * have Si on board that is 82544 or newer, Auto 2394 /* optimize the dsp settings for the igp phy */
2983 * Speed Detection takes care of MAC speed/duplex 2395 e1000_config_dsp_after_link_change(hw, true);
2984 * configuration. So we only need to configure Collision 2396
2985 * Distance in the MAC. Otherwise, we need to force 2397 /* We have a M88E1000 PHY and Auto-Neg is enabled. If we
2986 * speed/duplex on the MAC to the current PHY speed/duplex 2398 * have Si on board that is 82544 or newer, Auto
2987 * settings. 2399 * Speed Detection takes care of MAC speed/duplex
2988 */ 2400 * configuration. So we only need to configure Collision
2989 if (hw->mac_type >= e1000_82544) 2401 * Distance in the MAC. Otherwise, we need to force
2990 e1000_config_collision_dist(hw); 2402 * speed/duplex on the MAC to the current PHY speed/duplex
2991 else { 2403 * settings.
2992 ret_val = e1000_config_mac_to_phy(hw); 2404 */
2993 if (ret_val) { 2405 if (hw->mac_type >= e1000_82544)
2994 DEBUGOUT("Error configuring MAC to PHY settings\n"); 2406 e1000_config_collision_dist(hw);
2995 return ret_val; 2407 else {
2996 } 2408 ret_val = e1000_config_mac_to_phy(hw);
2997 } 2409 if (ret_val) {
2998 2410 DEBUGOUT
2999 /* Configure Flow Control now that Auto-Neg has completed. First, we 2411 ("Error configuring MAC to PHY settings\n");
3000 * need to restore the desired flow control settings because we may 2412 return ret_val;
3001 * have had to re-autoneg with a different link partner. 2413 }
3002 */ 2414 }
3003 ret_val = e1000_config_fc_after_link_up(hw); 2415
3004 if (ret_val) { 2416 /* Configure Flow Control now that Auto-Neg has completed. First, we
3005 DEBUGOUT("Error configuring flow control\n"); 2417 * need to restore the desired flow control settings because we may
3006 return ret_val; 2418 * have had to re-autoneg with a different link partner.
3007 } 2419 */
3008 2420 ret_val = e1000_config_fc_after_link_up(hw);
3009 /* At this point we know that we are on copper and we have 2421 if (ret_val) {
3010 * auto-negotiated link. These are conditions for checking the link 2422 DEBUGOUT("Error configuring flow control\n");
3011 * partner capability register. We use the link speed to determine if 2423 return ret_val;
3012 * TBI compatibility needs to be turned on or off. If the link is not 2424 }
3013 * at gigabit speed, then TBI compatibility is not needed. If we are 2425
3014 * at gigabit speed, we turn on TBI compatibility. 2426 /* At this point we know that we are on copper and we have
3015 */ 2427 * auto-negotiated link. These are conditions for checking the link
3016 if (hw->tbi_compatibility_en) { 2428 * partner capability register. We use the link speed to determine if
3017 u16 speed, duplex; 2429 * TBI compatibility needs to be turned on or off. If the link is not
3018 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); 2430 * at gigabit speed, then TBI compatibility is not needed. If we are
3019 if (ret_val) { 2431 * at gigabit speed, we turn on TBI compatibility.
3020 DEBUGOUT("Error getting link speed and duplex\n"); 2432 */
3021 return ret_val; 2433 if (hw->tbi_compatibility_en) {
3022 } 2434 u16 speed, duplex;
3023 if (speed != SPEED_1000) { 2435 ret_val =
3024 /* If link speed is not set to gigabit speed, we do not need 2436 e1000_get_speed_and_duplex(hw, &speed, &duplex);
3025 * to enable TBI compatibility. 2437 if (ret_val) {
3026 */ 2438 DEBUGOUT
3027 if (hw->tbi_compatibility_on) { 2439 ("Error getting link speed and duplex\n");
3028 /* If we previously were in the mode, turn it off. */ 2440 return ret_val;
3029 rctl = er32(RCTL); 2441 }
3030 rctl &= ~E1000_RCTL_SBP; 2442 if (speed != SPEED_1000) {
3031 ew32(RCTL, rctl); 2443 /* If link speed is not set to gigabit speed, we do not need
3032 hw->tbi_compatibility_on = false; 2444 * to enable TBI compatibility.
3033 } 2445 */
3034 } else { 2446 if (hw->tbi_compatibility_on) {
3035 /* If TBI compatibility is was previously off, turn it on. For 2447 /* If we previously were in the mode, turn it off. */
3036 * compatibility with a TBI link partner, we will store bad 2448 rctl = er32(RCTL);
3037 * packets. Some frames have an additional byte on the end and 2449 rctl &= ~E1000_RCTL_SBP;
3038 * will look like CRC errors to the hardware. 2450 ew32(RCTL, rctl);
3039 */ 2451 hw->tbi_compatibility_on = false;
3040 if (!hw->tbi_compatibility_on) { 2452 }
3041 hw->tbi_compatibility_on = true; 2453 } else {
3042 rctl = er32(RCTL); 2454 /* If TBI compatibility is was previously off, turn it on. For
3043 rctl |= E1000_RCTL_SBP; 2455 * compatibility with a TBI link partner, we will store bad
3044 ew32(RCTL, rctl); 2456 * packets. Some frames have an additional byte on the end and
3045 } 2457 * will look like CRC errors to to the hardware.
3046 } 2458 */
3047 } 2459 if (!hw->tbi_compatibility_on) {
3048 } 2460 hw->tbi_compatibility_on = true;
3049 /* If we don't have link (auto-negotiation failed or link partner cannot 2461 rctl = er32(RCTL);
3050 * auto-negotiate), the cable is plugged in (we have signal), and our 2462 rctl |= E1000_RCTL_SBP;
3051 * link partner is not trying to auto-negotiate with us (we are receiving 2463 ew32(RCTL, rctl);
3052 * idles or data), we need to force link up. We also need to give 2464 }
3053 * auto-negotiation time to complete, in case the cable was just plugged 2465 }
3054 * in. The autoneg_failed flag does this. 2466 }
3055 */ 2467 }
3056 else if ((((hw->media_type == e1000_media_type_fiber) && 2468
3057 ((ctrl & E1000_CTRL_SWDPIN1) == signal)) || 2469 if ((hw->media_type == e1000_media_type_fiber) ||
3058 (hw->media_type == e1000_media_type_internal_serdes)) && 2470 (hw->media_type == e1000_media_type_internal_serdes))
3059 (!(status & E1000_STATUS_LU)) && 2471 e1000_check_for_serdes_link_generic(hw);
3060 (!(rxcw & E1000_RXCW_C))) { 2472
3061 if (hw->autoneg_failed == 0) { 2473 return E1000_SUCCESS;
3062 hw->autoneg_failed = 1;
3063 return 0;
3064 }
3065 DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
3066
3067 /* Disable auto-negotiation in the TXCW register */
3068 ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
3069
3070 /* Force link-up and also force full-duplex. */
3071 ctrl = er32(CTRL);
3072 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
3073 ew32(CTRL, ctrl);
3074
3075 /* Configure Flow Control after forcing link up. */
3076 ret_val = e1000_config_fc_after_link_up(hw);
3077 if (ret_val) {
3078 DEBUGOUT("Error configuring flow control\n");
3079 return ret_val;
3080 }
3081 }
3082 /* If we are forcing link and we are receiving /C/ ordered sets, re-enable
3083 * auto-negotiation in the TXCW register and disable forced link in the
3084 * Device Control register in an attempt to auto-negotiate with our link
3085 * partner.
3086 */
3087 else if (((hw->media_type == e1000_media_type_fiber) ||
3088 (hw->media_type == e1000_media_type_internal_serdes)) &&
3089 (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
3090 DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
3091 ew32(TXCW, hw->txcw);
3092 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
3093
3094 hw->serdes_link_down = false;
3095 }
3096 /* If we force link for non-auto-negotiation switch, check link status
3097 * based on MAC synchronization for internal serdes media type.
3098 */
3099 else if ((hw->media_type == e1000_media_type_internal_serdes) &&
3100 !(E1000_TXCW_ANE & er32(TXCW))) {
3101 /* SYNCH bit and IV bit are sticky. */
3102 udelay(10);
3103 if (E1000_RXCW_SYNCH & er32(RXCW)) {
3104 if (!(rxcw & E1000_RXCW_IV)) {
3105 hw->serdes_link_down = false;
3106 DEBUGOUT("SERDES: Link is up.\n");
3107 }
3108 } else {
3109 hw->serdes_link_down = true;
3110 DEBUGOUT("SERDES: Link is down.\n");
3111 }
3112 }
3113 if ((hw->media_type == e1000_media_type_internal_serdes) &&
3114 (E1000_TXCW_ANE & er32(TXCW))) {
3115 hw->serdes_link_down = !(E1000_STATUS_LU & er32(STATUS));
3116 }
3117 return E1000_SUCCESS;
3118} 2474}
3119 2475
3120/****************************************************************************** 2476/**
2477 * e1000_get_speed_and_duplex
2478 * @hw: Struct containing variables accessed by shared code
2479 * @speed: Speed of the connection
2480 * @duplex: Duplex setting of the connection
2481
3121 * Detects the current speed and duplex settings of the hardware. 2482 * Detects the current speed and duplex settings of the hardware.
3122 * 2483 */
3123 * hw - Struct containing variables accessed by shared code
3124 * speed - Speed of the connection
3125 * duplex - Duplex setting of the connection
3126 *****************************************************************************/
3127s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) 2484s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
3128{ 2485{
3129 u32 status; 2486 u32 status;
3130 s32 ret_val; 2487 s32 ret_val;
3131 u16 phy_data; 2488 u16 phy_data;
3132 2489
3133 DEBUGFUNC("e1000_get_speed_and_duplex"); 2490 DEBUGFUNC("e1000_get_speed_and_duplex");
3134 2491
3135 if (hw->mac_type >= e1000_82543) { 2492 if (hw->mac_type >= e1000_82543) {
3136 status = er32(STATUS); 2493 status = er32(STATUS);
3137 if (status & E1000_STATUS_SPEED_1000) { 2494 if (status & E1000_STATUS_SPEED_1000) {
3138 *speed = SPEED_1000; 2495 *speed = SPEED_1000;
3139 DEBUGOUT("1000 Mbs, "); 2496 DEBUGOUT("1000 Mbs, ");
3140 } else if (status & E1000_STATUS_SPEED_100) { 2497 } else if (status & E1000_STATUS_SPEED_100) {
3141 *speed = SPEED_100; 2498 *speed = SPEED_100;
3142 DEBUGOUT("100 Mbs, "); 2499 DEBUGOUT("100 Mbs, ");
3143 } else { 2500 } else {
3144 *speed = SPEED_10; 2501 *speed = SPEED_10;
3145 DEBUGOUT("10 Mbs, "); 2502 DEBUGOUT("10 Mbs, ");
3146 } 2503 }
3147 2504
3148 if (status & E1000_STATUS_FD) { 2505 if (status & E1000_STATUS_FD) {
3149 *duplex = FULL_DUPLEX; 2506 *duplex = FULL_DUPLEX;
3150 DEBUGOUT("Full Duplex\n"); 2507 DEBUGOUT("Full Duplex\n");
3151 } else { 2508 } else {
3152 *duplex = HALF_DUPLEX; 2509 *duplex = HALF_DUPLEX;
3153 DEBUGOUT(" Half Duplex\n"); 2510 DEBUGOUT(" Half Duplex\n");
3154 } 2511 }
3155 } else { 2512 } else {
3156 DEBUGOUT("1000 Mbs, Full Duplex\n"); 2513 DEBUGOUT("1000 Mbs, Full Duplex\n");
3157 *speed = SPEED_1000; 2514 *speed = SPEED_1000;
3158 *duplex = FULL_DUPLEX; 2515 *duplex = FULL_DUPLEX;
3159 } 2516 }
3160 2517
3161 /* IGP01 PHY may advertise full duplex operation after speed downgrade even 2518 /* IGP01 PHY may advertise full duplex operation after speed downgrade even
3162 * if it is operating at half duplex. Here we set the duplex settings to 2519 * if it is operating at half duplex. Here we set the duplex settings to
3163 * match the duplex in the link partner's capabilities. 2520 * match the duplex in the link partner's capabilities.
3164 */ 2521 */
3165 if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { 2522 if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
3166 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); 2523 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
3167 if (ret_val) 2524 if (ret_val)
3168 return ret_val; 2525 return ret_val;
3169 2526
3170 if (!(phy_data & NWAY_ER_LP_NWAY_CAPS)) 2527 if (!(phy_data & NWAY_ER_LP_NWAY_CAPS))
3171 *duplex = HALF_DUPLEX; 2528 *duplex = HALF_DUPLEX;
3172 else { 2529 else {
3173 ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); 2530 ret_val =
3174 if (ret_val) 2531 e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
3175 return ret_val; 2532 if (ret_val)
3176 if ((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) || 2533 return ret_val;
3177 (*speed == SPEED_10 && !(phy_data & NWAY_LPAR_10T_FD_CAPS))) 2534 if ((*speed == SPEED_100
3178 *duplex = HALF_DUPLEX; 2535 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS))
3179 } 2536 || (*speed == SPEED_10
3180 } 2537 && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
3181 2538 *duplex = HALF_DUPLEX;
3182 if ((hw->mac_type == e1000_80003es2lan) && 2539 }
3183 (hw->media_type == e1000_media_type_copper)) { 2540 }
3184 if (*speed == SPEED_1000) 2541
3185 ret_val = e1000_configure_kmrn_for_1000(hw); 2542 return E1000_SUCCESS;
3186 else
3187 ret_val = e1000_configure_kmrn_for_10_100(hw, *duplex);
3188 if (ret_val)
3189 return ret_val;
3190 }
3191
3192 if ((hw->phy_type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
3193 ret_val = e1000_kumeran_lock_loss_workaround(hw);
3194 if (ret_val)
3195 return ret_val;
3196 }
3197
3198 return E1000_SUCCESS;
3199} 2543}
3200 2544
3201/****************************************************************************** 2545/**
3202* Blocks until autoneg completes or times out (~4.5 seconds) 2546 * e1000_wait_autoneg
3203* 2547 * @hw: Struct containing variables accessed by shared code
3204* hw - Struct containing variables accessed by shared code 2548 *
3205******************************************************************************/ 2549 * Blocks until autoneg completes or times out (~4.5 seconds)
2550 */
3206static s32 e1000_wait_autoneg(struct e1000_hw *hw) 2551static s32 e1000_wait_autoneg(struct e1000_hw *hw)
3207{ 2552{
3208 s32 ret_val; 2553 s32 ret_val;
3209 u16 i; 2554 u16 i;
3210 u16 phy_data; 2555 u16 phy_data;
3211 2556
3212 DEBUGFUNC("e1000_wait_autoneg"); 2557 DEBUGFUNC("e1000_wait_autoneg");
3213 DEBUGOUT("Waiting for Auto-Neg to complete.\n"); 2558 DEBUGOUT("Waiting for Auto-Neg to complete.\n");
3214 2559
3215 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ 2560 /* We will wait for autoneg to complete or 4.5 seconds to expire. */
3216 for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { 2561 for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
3217 /* Read the MII Status Register and wait for Auto-Neg 2562 /* Read the MII Status Register and wait for Auto-Neg
3218 * Complete bit to be set. 2563 * Complete bit to be set.
3219 */ 2564 */
3220 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 2565 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
3221 if (ret_val) 2566 if (ret_val)
3222 return ret_val; 2567 return ret_val;
3223 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 2568 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
3224 if (ret_val) 2569 if (ret_val)
3225 return ret_val; 2570 return ret_val;
3226 if (phy_data & MII_SR_AUTONEG_COMPLETE) { 2571 if (phy_data & MII_SR_AUTONEG_COMPLETE) {
3227 return E1000_SUCCESS; 2572 return E1000_SUCCESS;
3228 } 2573 }
3229 msleep(100); 2574 msleep(100);
3230 } 2575 }
3231 return E1000_SUCCESS; 2576 return E1000_SUCCESS;
3232} 2577}
3233 2578
3234/****************************************************************************** 2579/**
3235* Raises the Management Data Clock 2580 * e1000_raise_mdi_clk - Raises the Management Data Clock
3236* 2581 * @hw: Struct containing variables accessed by shared code
3237* hw - Struct containing variables accessed by shared code 2582 * @ctrl: Device control register's current value
3238* ctrl - Device control register's current value 2583 */
3239******************************************************************************/
3240static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) 2584static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
3241{ 2585{
3242 /* Raise the clock input to the Management Data Clock (by setting the MDC 2586 /* Raise the clock input to the Management Data Clock (by setting the MDC
3243 * bit), and then delay 10 microseconds. 2587 * bit), and then delay 10 microseconds.
3244 */ 2588 */
3245 ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); 2589 ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
3246 E1000_WRITE_FLUSH(); 2590 E1000_WRITE_FLUSH();
3247 udelay(10); 2591 udelay(10);
3248} 2592}
3249 2593
3250/****************************************************************************** 2594/**
3251* Lowers the Management Data Clock 2595 * e1000_lower_mdi_clk - Lowers the Management Data Clock
3252* 2596 * @hw: Struct containing variables accessed by shared code
3253* hw - Struct containing variables accessed by shared code 2597 * @ctrl: Device control register's current value
3254* ctrl - Device control register's current value 2598 */
3255******************************************************************************/
3256static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) 2599static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
3257{ 2600{
3258 /* Lower the clock input to the Management Data Clock (by clearing the MDC 2601 /* Lower the clock input to the Management Data Clock (by clearing the MDC
3259 * bit), and then delay 10 microseconds. 2602 * bit), and then delay 10 microseconds.
3260 */ 2603 */
3261 ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); 2604 ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
3262 E1000_WRITE_FLUSH(); 2605 E1000_WRITE_FLUSH();
3263 udelay(10); 2606 udelay(10);
3264} 2607}
3265 2608
3266/****************************************************************************** 2609/**
3267* Shifts data bits out to the PHY 2610 * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY
3268* 2611 * @hw: Struct containing variables accessed by shared code
3269* hw - Struct containing variables accessed by shared code 2612 * @data: Data to send out to the PHY
3270* data - Data to send out to the PHY 2613 * @count: Number of bits to shift out
3271* count - Number of bits to shift out 2614 *
3272* 2615 * Bits are shifted out in MSB to LSB order.
3273* Bits are shifted out in MSB to LSB order. 2616 */
3274******************************************************************************/
3275static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) 2617static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
3276{ 2618{
3277 u32 ctrl; 2619 u32 ctrl;
3278 u32 mask; 2620 u32 mask;
3279
3280 /* We need to shift "count" number of bits out to the PHY. So, the value
3281 * in the "data" parameter will be shifted out to the PHY one bit at a
3282 * time. In order to do this, "data" must be broken down into bits.
3283 */
3284 mask = 0x01;
3285 mask <<= (count - 1);
3286
3287 ctrl = er32(CTRL);
3288
3289 /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
3290 ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
3291
3292 while (mask) {
3293 /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
3294 * then raising and lowering the Management Data Clock. A "0" is
3295 * shifted out to the PHY by setting the MDIO bit to "0" and then
3296 * raising and lowering the clock.
3297 */
3298 if (data & mask)
3299 ctrl |= E1000_CTRL_MDIO;
3300 else
3301 ctrl &= ~E1000_CTRL_MDIO;
3302
3303 ew32(CTRL, ctrl);
3304 E1000_WRITE_FLUSH();
3305
3306 udelay(10);
3307
3308 e1000_raise_mdi_clk(hw, &ctrl);
3309 e1000_lower_mdi_clk(hw, &ctrl);
3310
3311 mask = mask >> 1;
3312 }
3313}
3314
3315/******************************************************************************
3316* Shifts data bits in from the PHY
3317*
3318* hw - Struct containing variables accessed by shared code
3319*
3320* Bits are shifted in in MSB to LSB order.
3321******************************************************************************/
3322static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
3323{
3324 u32 ctrl;
3325 u16 data = 0;
3326 u8 i;
3327
3328 /* In order to read a register from the PHY, we need to shift in a total
3329 * of 18 bits from the PHY. The first two bit (turnaround) times are used
3330 * to avoid contention on the MDIO pin when a read operation is performed.
3331 * These two bits are ignored by us and thrown away. Bits are "shifted in"
3332 * by raising the input to the Management Data Clock (setting the MDC bit),
3333 * and then reading the value of the MDIO bit.
3334 */
3335 ctrl = er32(CTRL);
3336
3337 /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
3338 ctrl &= ~E1000_CTRL_MDIO_DIR;
3339 ctrl &= ~E1000_CTRL_MDIO;
3340
3341 ew32(CTRL, ctrl);
3342 E1000_WRITE_FLUSH();
3343
3344 /* Raise and Lower the clock before reading in the data. This accounts for
3345 * the turnaround bits. The first clock occurred when we clocked out the
3346 * last bit of the Register Address.
3347 */
3348 e1000_raise_mdi_clk(hw, &ctrl);
3349 e1000_lower_mdi_clk(hw, &ctrl);
3350
3351 for (data = 0, i = 0; i < 16; i++) {
3352 data = data << 1;
3353 e1000_raise_mdi_clk(hw, &ctrl);
3354 ctrl = er32(CTRL);
3355 /* Check to see if we shifted in a "1". */
3356 if (ctrl & E1000_CTRL_MDIO)
3357 data |= 1;
3358 e1000_lower_mdi_clk(hw, &ctrl);
3359 }
3360
3361 e1000_raise_mdi_clk(hw, &ctrl);
3362 e1000_lower_mdi_clk(hw, &ctrl);
3363
3364 return data;
3365}
3366
3367static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask)
3368{
3369 u32 swfw_sync = 0;
3370 u32 swmask = mask;
3371 u32 fwmask = mask << 16;
3372 s32 timeout = 200;
3373 2621
3374 DEBUGFUNC("e1000_swfw_sync_acquire"); 2622 /* We need to shift "count" number of bits out to the PHY. So, the value
3375 2623 * in the "data" parameter will be shifted out to the PHY one bit at a
3376 if (hw->swfwhw_semaphore_present) 2624 * time. In order to do this, "data" must be broken down into bits.
3377 return e1000_get_software_flag(hw); 2625 */
2626 mask = 0x01;
2627 mask <<= (count - 1);
3378 2628
3379 if (!hw->swfw_sync_present) 2629 ctrl = er32(CTRL);
3380 return e1000_get_hw_eeprom_semaphore(hw);
3381 2630
3382 while (timeout) { 2631 /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
3383 if (e1000_get_hw_eeprom_semaphore(hw)) 2632 ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
3384 return -E1000_ERR_SWFW_SYNC;
3385 2633
3386 swfw_sync = er32(SW_FW_SYNC); 2634 while (mask) {
3387 if (!(swfw_sync & (fwmask | swmask))) { 2635 /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
3388 break; 2636 * then raising and lowering the Management Data Clock. A "0" is
3389 } 2637 * shifted out to the PHY by setting the MDIO bit to "0" and then
2638 * raising and lowering the clock.
2639 */
2640 if (data & mask)
2641 ctrl |= E1000_CTRL_MDIO;
2642 else
2643 ctrl &= ~E1000_CTRL_MDIO;
3390 2644
3391 /* firmware currently using resource (fwmask) */ 2645 ew32(CTRL, ctrl);
3392 /* or other software thread currently using resource (swmask) */ 2646 E1000_WRITE_FLUSH();
3393 e1000_put_hw_eeprom_semaphore(hw);
3394 mdelay(5);
3395 timeout--;
3396 }
3397 2647
3398 if (!timeout) { 2648 udelay(10);
3399 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
3400 return -E1000_ERR_SWFW_SYNC;
3401 }
3402 2649
3403 swfw_sync |= swmask; 2650 e1000_raise_mdi_clk(hw, &ctrl);
3404 ew32(SW_FW_SYNC, swfw_sync); 2651 e1000_lower_mdi_clk(hw, &ctrl);
3405 2652
3406 e1000_put_hw_eeprom_semaphore(hw); 2653 mask = mask >> 1;
3407 return E1000_SUCCESS; 2654 }
3408} 2655}
3409 2656
3410static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask) 2657/**
2658 * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY
2659 * @hw: Struct containing variables accessed by shared code
2660 *
2661 * Bits are shifted in in MSB to LSB order.
2662 */
2663static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
3411{ 2664{
3412 u32 swfw_sync; 2665 u32 ctrl;
3413 u32 swmask = mask; 2666 u16 data = 0;
2667 u8 i;
3414 2668
3415 DEBUGFUNC("e1000_swfw_sync_release"); 2669 /* In order to read a register from the PHY, we need to shift in a total
2670 * of 18 bits from the PHY. The first two bit (turnaround) times are used
2671 * to avoid contention on the MDIO pin when a read operation is performed.
2672 * These two bits are ignored by us and thrown away. Bits are "shifted in"
2673 * by raising the input to the Management Data Clock (setting the MDC bit),
2674 * and then reading the value of the MDIO bit.
2675 */
2676 ctrl = er32(CTRL);
3416 2677
3417 if (hw->swfwhw_semaphore_present) { 2678 /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
3418 e1000_release_software_flag(hw); 2679 ctrl &= ~E1000_CTRL_MDIO_DIR;
3419 return; 2680 ctrl &= ~E1000_CTRL_MDIO;
3420 }
3421 2681
3422 if (!hw->swfw_sync_present) { 2682 ew32(CTRL, ctrl);
3423 e1000_put_hw_eeprom_semaphore(hw); 2683 E1000_WRITE_FLUSH();
3424 return;
3425 }
3426 2684
3427 /* if (e1000_get_hw_eeprom_semaphore(hw)) 2685 /* Raise and Lower the clock before reading in the data. This accounts for
3428 * return -E1000_ERR_SWFW_SYNC; */ 2686 * the turnaround bits. The first clock occurred when we clocked out the
3429 while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS); 2687 * last bit of the Register Address.
3430 /* empty */ 2688 */
2689 e1000_raise_mdi_clk(hw, &ctrl);
2690 e1000_lower_mdi_clk(hw, &ctrl);
2691
2692 for (data = 0, i = 0; i < 16; i++) {
2693 data = data << 1;
2694 e1000_raise_mdi_clk(hw, &ctrl);
2695 ctrl = er32(CTRL);
2696 /* Check to see if we shifted in a "1". */
2697 if (ctrl & E1000_CTRL_MDIO)
2698 data |= 1;
2699 e1000_lower_mdi_clk(hw, &ctrl);
2700 }
3431 2701
3432 swfw_sync = er32(SW_FW_SYNC); 2702 e1000_raise_mdi_clk(hw, &ctrl);
3433 swfw_sync &= ~swmask; 2703 e1000_lower_mdi_clk(hw, &ctrl);
3434 ew32(SW_FW_SYNC, swfw_sync);
3435 2704
3436 e1000_put_hw_eeprom_semaphore(hw); 2705 return data;
3437} 2706}
3438 2707
3439/***************************************************************************** 2708
3440* Reads the value from a PHY register, if the value is on a specific non zero 2709/**
3441* page, sets the page first. 2710 * e1000_read_phy_reg - read a phy register
3442* hw - Struct containing variables accessed by shared code 2711 * @hw: Struct containing variables accessed by shared code
3443* reg_addr - address of the PHY register to read 2712 * @reg_addr: address of the PHY register to read
3444******************************************************************************/ 2713 *
2714 * Reads the value from a PHY register, if the value is on a specific non zero
2715 * page, sets the page first.
2716 */
3445s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) 2717s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
3446{ 2718{
3447 u32 ret_val; 2719 u32 ret_val;
3448 u16 swfw; 2720
3449 2721 DEBUGFUNC("e1000_read_phy_reg");
3450 DEBUGFUNC("e1000_read_phy_reg"); 2722
3451 2723 if ((hw->phy_type == e1000_phy_igp) &&
3452 if ((hw->mac_type == e1000_80003es2lan) && 2724 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
3453 (er32(STATUS) & E1000_STATUS_FUNC_1)) { 2725 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
3454 swfw = E1000_SWFW_PHY1_SM; 2726 (u16) reg_addr);
3455 } else { 2727 if (ret_val)
3456 swfw = E1000_SWFW_PHY0_SM; 2728 return ret_val;
3457 } 2729 }
3458 if (e1000_swfw_sync_acquire(hw, swfw)) 2730
3459 return -E1000_ERR_SWFW_SYNC; 2731 ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
3460 2732 phy_data);
3461 if ((hw->phy_type == e1000_phy_igp || 2733
3462 hw->phy_type == e1000_phy_igp_3 || 2734 return ret_val;
3463 hw->phy_type == e1000_phy_igp_2) &&
3464 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
3465 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
3466 (u16)reg_addr);
3467 if (ret_val) {
3468 e1000_swfw_sync_release(hw, swfw);
3469 return ret_val;
3470 }
3471 } else if (hw->phy_type == e1000_phy_gg82563) {
3472 if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
3473 (hw->mac_type == e1000_80003es2lan)) {
3474 /* Select Configuration Page */
3475 if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
3476 ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
3477 (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
3478 } else {
3479 /* Use Alternative Page Select register to access
3480 * registers 30 and 31
3481 */
3482 ret_val = e1000_write_phy_reg_ex(hw,
3483 GG82563_PHY_PAGE_SELECT_ALT,
3484 (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
3485 }
3486
3487 if (ret_val) {
3488 e1000_swfw_sync_release(hw, swfw);
3489 return ret_val;
3490 }
3491 }
3492 }
3493
3494 ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
3495 phy_data);
3496
3497 e1000_swfw_sync_release(hw, swfw);
3498 return ret_val;
3499} 2735}
3500 2736
3501static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, 2737static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
3502 u16 *phy_data) 2738 u16 *phy_data)
3503{ 2739{
3504 u32 i; 2740 u32 i;
3505 u32 mdic = 0; 2741 u32 mdic = 0;
3506 const u32 phy_addr = 1; 2742 const u32 phy_addr = 1;
3507 2743
3508 DEBUGFUNC("e1000_read_phy_reg_ex"); 2744 DEBUGFUNC("e1000_read_phy_reg_ex");
3509 2745
3510 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2746 if (reg_addr > MAX_PHY_REG_ADDRESS) {
3511 DEBUGOUT1("PHY Address %d is out of range\n", reg_addr); 2747 DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
3512 return -E1000_ERR_PARAM; 2748 return -E1000_ERR_PARAM;
3513 } 2749 }
3514 2750
3515 if (hw->mac_type > e1000_82543) { 2751 if (hw->mac_type > e1000_82543) {
3516 /* Set up Op-code, Phy Address, and register address in the MDI 2752 /* Set up Op-code, Phy Address, and register address in the MDI
3517 * Control register. The MAC will take care of interfacing with the 2753 * Control register. The MAC will take care of interfacing with the
3518 * PHY to retrieve the desired data. 2754 * PHY to retrieve the desired data.
3519 */ 2755 */
3520 mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | 2756 mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
3521 (phy_addr << E1000_MDIC_PHY_SHIFT) | 2757 (phy_addr << E1000_MDIC_PHY_SHIFT) |
3522 (E1000_MDIC_OP_READ)); 2758 (E1000_MDIC_OP_READ));
3523 2759
3524 ew32(MDIC, mdic); 2760 ew32(MDIC, mdic);
3525 2761
3526 /* Poll the ready bit to see if the MDI read completed */ 2762 /* Poll the ready bit to see if the MDI read completed */
3527 for (i = 0; i < 64; i++) { 2763 for (i = 0; i < 64; i++) {
3528 udelay(50); 2764 udelay(50);
3529 mdic = er32(MDIC); 2765 mdic = er32(MDIC);
3530 if (mdic & E1000_MDIC_READY) break; 2766 if (mdic & E1000_MDIC_READY)
3531 } 2767 break;
3532 if (!(mdic & E1000_MDIC_READY)) { 2768 }
3533 DEBUGOUT("MDI Read did not complete\n"); 2769 if (!(mdic & E1000_MDIC_READY)) {
3534 return -E1000_ERR_PHY; 2770 DEBUGOUT("MDI Read did not complete\n");
3535 } 2771 return -E1000_ERR_PHY;
3536 if (mdic & E1000_MDIC_ERROR) { 2772 }
3537 DEBUGOUT("MDI Error\n"); 2773 if (mdic & E1000_MDIC_ERROR) {
3538 return -E1000_ERR_PHY; 2774 DEBUGOUT("MDI Error\n");
3539 } 2775 return -E1000_ERR_PHY;
3540 *phy_data = (u16)mdic; 2776 }
3541 } else { 2777 *phy_data = (u16) mdic;
3542 /* We must first send a preamble through the MDIO pin to signal the 2778 } else {
3543 * beginning of an MII instruction. This is done by sending 32 2779 /* We must first send a preamble through the MDIO pin to signal the
3544 * consecutive "1" bits. 2780 * beginning of an MII instruction. This is done by sending 32
3545 */ 2781 * consecutive "1" bits.
3546 e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); 2782 */
3547 2783 e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
3548 /* Now combine the next few fields that are required for a read 2784
3549 * operation. We use this method instead of calling the 2785 /* Now combine the next few fields that are required for a read
3550 * e1000_shift_out_mdi_bits routine five different times. The format of 2786 * operation. We use this method instead of calling the
3551 * a MII read instruction consists of a shift out of 14 bits and is 2787 * e1000_shift_out_mdi_bits routine five different times. The format of
3552 * defined as follows: 2788 * a MII read instruction consists of a shift out of 14 bits and is
3553 * <Preamble><SOF><Op Code><Phy Addr><Reg Addr> 2789 * defined as follows:
3554 * followed by a shift in of 18 bits. This first two bits shifted in 2790 * <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
3555 * are TurnAround bits used to avoid contention on the MDIO pin when a 2791 * followed by a shift in of 18 bits. This first two bits shifted in
3556 * READ operation is performed. These two bits are thrown away 2792 * are TurnAround bits used to avoid contention on the MDIO pin when a
3557 * followed by a shift in of 16 bits which contains the desired data. 2793 * READ operation is performed. These two bits are thrown away
3558 */ 2794 * followed by a shift in of 16 bits which contains the desired data.
3559 mdic = ((reg_addr) | (phy_addr << 5) | 2795 */
3560 (PHY_OP_READ << 10) | (PHY_SOF << 12)); 2796 mdic = ((reg_addr) | (phy_addr << 5) |
3561 2797 (PHY_OP_READ << 10) | (PHY_SOF << 12));
3562 e1000_shift_out_mdi_bits(hw, mdic, 14); 2798
3563 2799 e1000_shift_out_mdi_bits(hw, mdic, 14);
3564 /* Now that we've shifted out the read command to the MII, we need to 2800
3565 * "shift in" the 16-bit value (18 total bits) of the requested PHY 2801 /* Now that we've shifted out the read command to the MII, we need to
3566 * register address. 2802 * "shift in" the 16-bit value (18 total bits) of the requested PHY
3567 */ 2803 * register address.
3568 *phy_data = e1000_shift_in_mdi_bits(hw); 2804 */
3569 } 2805 *phy_data = e1000_shift_in_mdi_bits(hw);
3570 return E1000_SUCCESS; 2806 }
2807 return E1000_SUCCESS;
3571} 2808}
3572 2809
3573/****************************************************************************** 2810/**
3574* Writes a value to a PHY register 2811 * e1000_write_phy_reg - write a phy register
3575* 2812 *
3576* hw - Struct containing variables accessed by shared code 2813 * @hw: Struct containing variables accessed by shared code
3577* reg_addr - address of the PHY register to write 2814 * @reg_addr: address of the PHY register to write
3578* data - data to write to the PHY 2815 * @data: data to write to the PHY
3579******************************************************************************/ 2816
2817 * Writes a value to a PHY register
2818 */
3580s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) 2819s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
3581{ 2820{
3582 u32 ret_val; 2821 u32 ret_val;
3583 u16 swfw; 2822
3584 2823 DEBUGFUNC("e1000_write_phy_reg");
3585 DEBUGFUNC("e1000_write_phy_reg"); 2824
3586 2825 if ((hw->phy_type == e1000_phy_igp) &&
3587 if ((hw->mac_type == e1000_80003es2lan) && 2826 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
3588 (er32(STATUS) & E1000_STATUS_FUNC_1)) { 2827 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
3589 swfw = E1000_SWFW_PHY1_SM; 2828 (u16) reg_addr);
3590 } else { 2829 if (ret_val)
3591 swfw = E1000_SWFW_PHY0_SM; 2830 return ret_val;
3592 } 2831 }
3593 if (e1000_swfw_sync_acquire(hw, swfw)) 2832
3594 return -E1000_ERR_SWFW_SYNC; 2833 ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
3595 2834 phy_data);
3596 if ((hw->phy_type == e1000_phy_igp || 2835
3597 hw->phy_type == e1000_phy_igp_3 || 2836 return ret_val;
3598 hw->phy_type == e1000_phy_igp_2) &&
3599 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
3600 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
3601 (u16)reg_addr);
3602 if (ret_val) {
3603 e1000_swfw_sync_release(hw, swfw);
3604 return ret_val;
3605 }
3606 } else if (hw->phy_type == e1000_phy_gg82563) {
3607 if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
3608 (hw->mac_type == e1000_80003es2lan)) {
3609 /* Select Configuration Page */
3610 if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
3611 ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
3612 (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
3613 } else {
3614 /* Use Alternative Page Select register to access
3615 * registers 30 and 31
3616 */
3617 ret_val = e1000_write_phy_reg_ex(hw,
3618 GG82563_PHY_PAGE_SELECT_ALT,
3619 (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
3620 }
3621
3622 if (ret_val) {
3623 e1000_swfw_sync_release(hw, swfw);
3624 return ret_val;
3625 }
3626 }
3627 }
3628
3629 ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
3630 phy_data);
3631
3632 e1000_swfw_sync_release(hw, swfw);
3633 return ret_val;
3634} 2837}
3635 2838
3636static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, 2839static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
3637 u16 phy_data) 2840 u16 phy_data)
3638{ 2841{
3639 u32 i; 2842 u32 i;
3640 u32 mdic = 0; 2843 u32 mdic = 0;
3641 const u32 phy_addr = 1; 2844 const u32 phy_addr = 1;
3642
3643 DEBUGFUNC("e1000_write_phy_reg_ex");
3644
3645 if (reg_addr > MAX_PHY_REG_ADDRESS) {
3646 DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
3647 return -E1000_ERR_PARAM;
3648 }
3649
3650 if (hw->mac_type > e1000_82543) {
3651 /* Set up Op-code, Phy Address, register address, and data intended
3652 * for the PHY register in the MDI Control register. The MAC will take
3653 * care of interfacing with the PHY to send the desired data.
3654 */
3655 mdic = (((u32)phy_data) |
3656 (reg_addr << E1000_MDIC_REG_SHIFT) |
3657 (phy_addr << E1000_MDIC_PHY_SHIFT) |
3658 (E1000_MDIC_OP_WRITE));
3659
3660 ew32(MDIC, mdic);
3661
3662 /* Poll the ready bit to see if the MDI read completed */
3663 for (i = 0; i < 641; i++) {
3664 udelay(5);
3665 mdic = er32(MDIC);
3666 if (mdic & E1000_MDIC_READY) break;
3667 }
3668 if (!(mdic & E1000_MDIC_READY)) {
3669 DEBUGOUT("MDI Write did not complete\n");
3670 return -E1000_ERR_PHY;
3671 }
3672 } else {
3673 /* We'll need to use the SW defined pins to shift the write command
3674 * out to the PHY. We first send a preamble to the PHY to signal the
3675 * beginning of the MII instruction. This is done by sending 32
3676 * consecutive "1" bits.
3677 */
3678 e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
3679
3680 /* Now combine the remaining required fields that will indicate a
3681 * write operation. We use this method instead of calling the
3682 * e1000_shift_out_mdi_bits routine for each field in the command. The
3683 * format of a MII write instruction is as follows:
3684 * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
3685 */
3686 mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
3687 (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
3688 mdic <<= 16;
3689 mdic |= (u32)phy_data;
3690
3691 e1000_shift_out_mdi_bits(hw, mdic, 32);
3692 }
3693
3694 return E1000_SUCCESS;
3695}
3696 2845
3697static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data) 2846 DEBUGFUNC("e1000_write_phy_reg_ex");
3698{
3699 u32 reg_val;
3700 u16 swfw;
3701 DEBUGFUNC("e1000_read_kmrn_reg");
3702
3703 if ((hw->mac_type == e1000_80003es2lan) &&
3704 (er32(STATUS) & E1000_STATUS_FUNC_1)) {
3705 swfw = E1000_SWFW_PHY1_SM;
3706 } else {
3707 swfw = E1000_SWFW_PHY0_SM;
3708 }
3709 if (e1000_swfw_sync_acquire(hw, swfw))
3710 return -E1000_ERR_SWFW_SYNC;
3711
3712 /* Write register address */
3713 reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
3714 E1000_KUMCTRLSTA_OFFSET) |
3715 E1000_KUMCTRLSTA_REN;
3716 ew32(KUMCTRLSTA, reg_val);
3717 udelay(2);
3718
3719 /* Read the data returned */
3720 reg_val = er32(KUMCTRLSTA);
3721 *data = (u16)reg_val;
3722
3723 e1000_swfw_sync_release(hw, swfw);
3724 return E1000_SUCCESS;
3725}
3726 2847
3727static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data) 2848 if (reg_addr > MAX_PHY_REG_ADDRESS) {
3728{ 2849 DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
3729 u32 reg_val; 2850 return -E1000_ERR_PARAM;
3730 u16 swfw; 2851 }
3731 DEBUGFUNC("e1000_write_kmrn_reg"); 2852
3732 2853 if (hw->mac_type > e1000_82543) {
3733 if ((hw->mac_type == e1000_80003es2lan) && 2854 /* Set up Op-code, Phy Address, register address, and data intended
3734 (er32(STATUS) & E1000_STATUS_FUNC_1)) { 2855 * for the PHY register in the MDI Control register. The MAC will take
3735 swfw = E1000_SWFW_PHY1_SM; 2856 * care of interfacing with the PHY to send the desired data.
3736 } else { 2857 */
3737 swfw = E1000_SWFW_PHY0_SM; 2858 mdic = (((u32) phy_data) |
3738 } 2859 (reg_addr << E1000_MDIC_REG_SHIFT) |
3739 if (e1000_swfw_sync_acquire(hw, swfw)) 2860 (phy_addr << E1000_MDIC_PHY_SHIFT) |
3740 return -E1000_ERR_SWFW_SYNC; 2861 (E1000_MDIC_OP_WRITE));
3741 2862
3742 reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) & 2863 ew32(MDIC, mdic);
3743 E1000_KUMCTRLSTA_OFFSET) | data; 2864
3744 ew32(KUMCTRLSTA, reg_val); 2865 /* Poll the ready bit to see if the MDI read completed */
3745 udelay(2); 2866 for (i = 0; i < 641; i++) {
3746 2867 udelay(5);
3747 e1000_swfw_sync_release(hw, swfw); 2868 mdic = er32(MDIC);
3748 return E1000_SUCCESS; 2869 if (mdic & E1000_MDIC_READY)
2870 break;
2871 }
2872 if (!(mdic & E1000_MDIC_READY)) {
2873 DEBUGOUT("MDI Write did not complete\n");
2874 return -E1000_ERR_PHY;
2875 }
2876 } else {
2877 /* We'll need to use the SW defined pins to shift the write command
2878 * out to the PHY. We first send a preamble to the PHY to signal the
2879 * beginning of the MII instruction. This is done by sending 32
2880 * consecutive "1" bits.
2881 */
2882 e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
2883
2884 /* Now combine the remaining required fields that will indicate a
2885 * write operation. We use this method instead of calling the
2886 * e1000_shift_out_mdi_bits routine for each field in the command. The
2887 * format of a MII write instruction is as follows:
2888 * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
2889 */
2890 mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
2891 (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
2892 mdic <<= 16;
2893 mdic |= (u32) phy_data;
2894
2895 e1000_shift_out_mdi_bits(hw, mdic, 32);
2896 }
2897
2898 return E1000_SUCCESS;
3749} 2899}
3750 2900
3751/****************************************************************************** 2901/**
3752* Returns the PHY to the power-on reset state 2902 * e1000_phy_hw_reset - reset the phy, hardware style
3753* 2903 * @hw: Struct containing variables accessed by shared code
3754* hw - Struct containing variables accessed by shared code 2904 *
3755******************************************************************************/ 2905 * Returns the PHY to the power-on reset state
2906 */
3756s32 e1000_phy_hw_reset(struct e1000_hw *hw) 2907s32 e1000_phy_hw_reset(struct e1000_hw *hw)
3757{ 2908{
3758 u32 ctrl, ctrl_ext; 2909 u32 ctrl, ctrl_ext;
3759 u32 led_ctrl; 2910 u32 led_ctrl;
3760 s32 ret_val; 2911 s32 ret_val;
3761 u16 swfw; 2912
3762 2913 DEBUGFUNC("e1000_phy_hw_reset");
3763 DEBUGFUNC("e1000_phy_hw_reset"); 2914
3764 2915 DEBUGOUT("Resetting Phy...\n");
3765 /* In the case of the phy reset being blocked, it's not an error, we 2916
3766 * simply return success without performing the reset. */ 2917 if (hw->mac_type > e1000_82543) {
3767 ret_val = e1000_check_phy_reset_block(hw); 2918 /* Read the device control register and assert the E1000_CTRL_PHY_RST
3768 if (ret_val) 2919 * bit. Then, take it out of reset.
3769 return E1000_SUCCESS; 2920 * For e1000 hardware, we delay for 10ms between the assert
3770 2921 * and deassert.
3771 DEBUGOUT("Resetting Phy...\n"); 2922 */
3772 2923 ctrl = er32(CTRL);
3773 if (hw->mac_type > e1000_82543) { 2924 ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
3774 if ((hw->mac_type == e1000_80003es2lan) && 2925 E1000_WRITE_FLUSH();
3775 (er32(STATUS) & E1000_STATUS_FUNC_1)) { 2926
3776 swfw = E1000_SWFW_PHY1_SM; 2927 msleep(10);
3777 } else { 2928
3778 swfw = E1000_SWFW_PHY0_SM; 2929 ew32(CTRL, ctrl);
3779 } 2930 E1000_WRITE_FLUSH();
3780 if (e1000_swfw_sync_acquire(hw, swfw)) { 2931
3781 DEBUGOUT("Unable to acquire swfw sync\n"); 2932 } else {
3782 return -E1000_ERR_SWFW_SYNC; 2933 /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
3783 } 2934 * bit to put the PHY into reset. Then, take it out of reset.
3784 /* Read the device control register and assert the E1000_CTRL_PHY_RST 2935 */
3785 * bit. Then, take it out of reset. 2936 ctrl_ext = er32(CTRL_EXT);
3786 * For pre-e1000_82571 hardware, we delay for 10ms between the assert 2937 ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
3787 * and deassert. For e1000_82571 hardware and later, we instead delay 2938 ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
3788 * for 50us between and 10ms after the deassertion. 2939 ew32(CTRL_EXT, ctrl_ext);
3789 */ 2940 E1000_WRITE_FLUSH();
3790 ctrl = er32(CTRL); 2941 msleep(10);
3791 ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); 2942 ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
3792 E1000_WRITE_FLUSH(); 2943 ew32(CTRL_EXT, ctrl_ext);
3793 2944 E1000_WRITE_FLUSH();
3794 if (hw->mac_type < e1000_82571) 2945 }
3795 msleep(10); 2946 udelay(150);
3796 else 2947
3797 udelay(100); 2948 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
3798 2949 /* Configure activity LED after PHY reset */
3799 ew32(CTRL, ctrl); 2950 led_ctrl = er32(LEDCTL);
3800 E1000_WRITE_FLUSH(); 2951 led_ctrl &= IGP_ACTIVITY_LED_MASK;
3801 2952 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
3802 if (hw->mac_type >= e1000_82571) 2953 ew32(LEDCTL, led_ctrl);
3803 mdelay(10); 2954 }
3804 2955
3805 e1000_swfw_sync_release(hw, swfw); 2956 /* Wait for FW to finish PHY configuration. */
3806 } else { 2957 ret_val = e1000_get_phy_cfg_done(hw);
3807 /* Read the Extended Device Control Register, assert the PHY_RESET_DIR 2958 if (ret_val != E1000_SUCCESS)
3808 * bit to put the PHY into reset. Then, take it out of reset. 2959 return ret_val;
3809 */ 2960
3810 ctrl_ext = er32(CTRL_EXT); 2961 return ret_val;
3811 ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
3812 ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
3813 ew32(CTRL_EXT, ctrl_ext);
3814 E1000_WRITE_FLUSH();
3815 msleep(10);
3816 ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
3817 ew32(CTRL_EXT, ctrl_ext);
3818 E1000_WRITE_FLUSH();
3819 }
3820 udelay(150);
3821
3822 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
3823 /* Configure activity LED after PHY reset */
3824 led_ctrl = er32(LEDCTL);
3825 led_ctrl &= IGP_ACTIVITY_LED_MASK;
3826 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
3827 ew32(LEDCTL, led_ctrl);
3828 }
3829
3830 /* Wait for FW to finish PHY configuration. */
3831 ret_val = e1000_get_phy_cfg_done(hw);
3832 if (ret_val != E1000_SUCCESS)
3833 return ret_val;
3834 e1000_release_software_semaphore(hw);
3835
3836 if ((hw->mac_type == e1000_ich8lan) && (hw->phy_type == e1000_phy_igp_3))
3837 ret_val = e1000_init_lcd_from_nvm(hw);
3838
3839 return ret_val;
3840} 2962}
3841 2963
3842/****************************************************************************** 2964/**
3843* Resets the PHY 2965 * e1000_phy_reset - reset the phy to commit settings
3844* 2966 * @hw: Struct containing variables accessed by shared code
3845* hw - Struct containing variables accessed by shared code 2967 *
3846* 2968 * Resets the PHY
3847* Sets bit 15 of the MII Control register 2969 * Sets bit 15 of the MII Control register
3848******************************************************************************/ 2970 */
3849s32 e1000_phy_reset(struct e1000_hw *hw) 2971s32 e1000_phy_reset(struct e1000_hw *hw)
3850{ 2972{
3851 s32 ret_val; 2973 s32 ret_val;
3852 u16 phy_data; 2974 u16 phy_data;
3853
3854 DEBUGFUNC("e1000_phy_reset");
3855
3856 /* In the case of the phy reset being blocked, it's not an error, we
3857 * simply return success without performing the reset. */
3858 ret_val = e1000_check_phy_reset_block(hw);
3859 if (ret_val)
3860 return E1000_SUCCESS;
3861
3862 switch (hw->phy_type) {
3863 case e1000_phy_igp:
3864 case e1000_phy_igp_2:
3865 case e1000_phy_igp_3:
3866 case e1000_phy_ife:
3867 ret_val = e1000_phy_hw_reset(hw);
3868 if (ret_val)
3869 return ret_val;
3870 break;
3871 default:
3872 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
3873 if (ret_val)
3874 return ret_val;
3875
3876 phy_data |= MII_CR_RESET;
3877 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
3878 if (ret_val)
3879 return ret_val;
3880
3881 udelay(1);
3882 break;
3883 }
3884
3885 if (hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2)
3886 e1000_phy_init_script(hw);
3887
3888 return E1000_SUCCESS;
3889}
3890 2975
3891/****************************************************************************** 2976 DEBUGFUNC("e1000_phy_reset");
3892* Work-around for 82566 power-down: on D3 entry-
3893* 1) disable gigabit link
3894* 2) write VR power-down enable
3895* 3) read it back
3896* if successful continue, else issue LCD reset and repeat
3897*
3898* hw - struct containing variables accessed by shared code
3899******************************************************************************/
3900void e1000_phy_powerdown_workaround(struct e1000_hw *hw)
3901{
3902 s32 reg;
3903 u16 phy_data;
3904 s32 retry = 0;
3905 2977
3906 DEBUGFUNC("e1000_phy_powerdown_workaround"); 2978 switch (hw->phy_type) {
2979 case e1000_phy_igp:
2980 ret_val = e1000_phy_hw_reset(hw);
2981 if (ret_val)
2982 return ret_val;
2983 break;
2984 default:
2985 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
2986 if (ret_val)
2987 return ret_val;
3907 2988
3908 if (hw->phy_type != e1000_phy_igp_3) 2989 phy_data |= MII_CR_RESET;
3909 return; 2990 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
2991 if (ret_val)
2992 return ret_val;
3910 2993
3911 do { 2994 udelay(1);
3912 /* Disable link */ 2995 break;
3913 reg = er32(PHY_CTRL); 2996 }
3914 ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
3915 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3916 2997
3917 /* Write VR power-down enable - bits 9:8 should be 10b */ 2998 if (hw->phy_type == e1000_phy_igp)
3918 e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data); 2999 e1000_phy_init_script(hw);
3919 phy_data |= (1 << 9);
3920 phy_data &= ~(1 << 8);
3921 e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data);
3922 3000
3923 /* Read it back and test */ 3001 return E1000_SUCCESS;
3924 e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data); 3002}
3925 if (((phy_data & IGP3_VR_CTRL_MODE_MASK) == IGP3_VR_CTRL_MODE_SHUT) || retry)
3926 break;
3927 3003
3928 /* Issue PHY reset and repeat at most one more time */ 3004/**
3929 reg = er32(CTRL); 3005 * e1000_detect_gig_phy - check the phy type
3930 ew32(CTRL, reg | E1000_CTRL_PHY_RST); 3006 * @hw: Struct containing variables accessed by shared code
3931 retry++; 3007 *
3932 } while (retry); 3008 * Probes the expected PHY address for known PHY IDs
3009 */
3010static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3011{
3012 s32 phy_init_status, ret_val;
3013 u16 phy_id_high, phy_id_low;
3014 bool match = false;
3933 3015
3934 return; 3016 DEBUGFUNC("e1000_detect_gig_phy");
3935 3017
3936} 3018 if (hw->phy_id != 0)
3019 return E1000_SUCCESS;
3937 3020
3938/****************************************************************************** 3021 /* Read the PHY ID Registers to identify which PHY is onboard. */
3939* Work-around for 82566 Kumeran PCS lock loss: 3022 ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
3940* On link status change (i.e. PCI reset, speed change) and link is up and 3023 if (ret_val)
3941* speed is gigabit- 3024 return ret_val;
3942* 0) if workaround is optionally disabled do nothing
3943* 1) wait 1ms for Kumeran link to come up
3944* 2) check Kumeran Diagnostic register PCS lock loss bit
3945* 3) if not set the link is locked (all is good), otherwise...
3946* 4) reset the PHY
3947* 5) repeat up to 10 times
3948* Note: this is only called for IGP3 copper when speed is 1gb.
3949*
3950* hw - struct containing variables accessed by shared code
3951******************************************************************************/
3952static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
3953{
3954 s32 ret_val;
3955 s32 reg;
3956 s32 cnt;
3957 u16 phy_data;
3958
3959 if (hw->kmrn_lock_loss_workaround_disabled)
3960 return E1000_SUCCESS;
3961
3962 /* Make sure link is up before proceeding. If not just return.
3963 * Attempting this while link is negotiating fouled up link
3964 * stability */
3965 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
3966 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
3967
3968 if (phy_data & MII_SR_LINK_STATUS) {
3969 for (cnt = 0; cnt < 10; cnt++) {
3970 /* read once to clear */
3971 ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
3972 if (ret_val)
3973 return ret_val;
3974 /* and again to get new status */
3975 ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
3976 if (ret_val)
3977 return ret_val;
3978
3979 /* check for PCS lock */
3980 if (!(phy_data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
3981 return E1000_SUCCESS;
3982
3983 /* Issue PHY reset */
3984 e1000_phy_hw_reset(hw);
3985 mdelay(5);
3986 }
3987 /* Disable GigE link negotiation */
3988 reg = er32(PHY_CTRL);
3989 ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
3990 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3991
3992 /* unable to acquire PCS lock */
3993 return E1000_ERR_PHY;
3994 }
3995
3996 return E1000_SUCCESS;
3997}
3998 3025
3999/****************************************************************************** 3026 hw->phy_id = (u32) (phy_id_high << 16);
4000* Probes the expected PHY address for known PHY IDs 3027 udelay(20);
4001* 3028 ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
4002* hw - Struct containing variables accessed by shared code 3029 if (ret_val)
4003******************************************************************************/ 3030 return ret_val;
4004static s32 e1000_detect_gig_phy(struct e1000_hw *hw) 3031
4005{ 3032 hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK);
4006 s32 phy_init_status, ret_val; 3033 hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK;
4007 u16 phy_id_high, phy_id_low; 3034
4008 bool match = false; 3035 switch (hw->mac_type) {
4009 3036 case e1000_82543:
4010 DEBUGFUNC("e1000_detect_gig_phy"); 3037 if (hw->phy_id == M88E1000_E_PHY_ID)
4011 3038 match = true;
4012 if (hw->phy_id != 0) 3039 break;
4013 return E1000_SUCCESS; 3040 case e1000_82544:
4014 3041 if (hw->phy_id == M88E1000_I_PHY_ID)
4015 /* The 82571 firmware may still be configuring the PHY. In this 3042 match = true;
4016 * case, we cannot access the PHY until the configuration is done. So 3043 break;
4017 * we explicitly set the PHY values. */ 3044 case e1000_82540:
4018 if (hw->mac_type == e1000_82571 || 3045 case e1000_82545:
4019 hw->mac_type == e1000_82572) { 3046 case e1000_82545_rev_3:
4020 hw->phy_id = IGP01E1000_I_PHY_ID; 3047 case e1000_82546:
4021 hw->phy_type = e1000_phy_igp_2; 3048 case e1000_82546_rev_3:
4022 return E1000_SUCCESS; 3049 if (hw->phy_id == M88E1011_I_PHY_ID)
4023 } 3050 match = true;
4024 3051 break;
4025 /* ESB-2 PHY reads require e1000_phy_gg82563 to be set because of a work- 3052 case e1000_82541:
4026 * around that forces PHY page 0 to be set or the reads fail. The rest of 3053 case e1000_82541_rev_2:
4027 * the code in this routine uses e1000_read_phy_reg to read the PHY ID. 3054 case e1000_82547:
4028 * So for ESB-2 we need to have this set so our reads won't fail. If the 3055 case e1000_82547_rev_2:
4029 * attached PHY is not a e1000_phy_gg82563, the routines below will figure 3056 if (hw->phy_id == IGP01E1000_I_PHY_ID)
4030 * this out as well. */ 3057 match = true;
4031 if (hw->mac_type == e1000_80003es2lan) 3058 break;
4032 hw->phy_type = e1000_phy_gg82563; 3059 default:
4033 3060 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
4034 /* Read the PHY ID Registers to identify which PHY is onboard. */ 3061 return -E1000_ERR_CONFIG;
4035 ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); 3062 }
4036 if (ret_val) 3063 phy_init_status = e1000_set_phy_type(hw);
4037 return ret_val; 3064
4038 3065 if ((match) && (phy_init_status == E1000_SUCCESS)) {
4039 hw->phy_id = (u32)(phy_id_high << 16); 3066 DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
4040 udelay(20); 3067 return E1000_SUCCESS;
4041 ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); 3068 }
4042 if (ret_val) 3069 DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
4043 return ret_val; 3070 return -E1000_ERR_PHY;
4044
4045 hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK);
4046 hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK;
4047
4048 switch (hw->mac_type) {
4049 case e1000_82543:
4050 if (hw->phy_id == M88E1000_E_PHY_ID) match = true;
4051 break;
4052 case e1000_82544:
4053 if (hw->phy_id == M88E1000_I_PHY_ID) match = true;
4054 break;
4055 case e1000_82540:
4056 case e1000_82545:
4057 case e1000_82545_rev_3:
4058 case e1000_82546:
4059 case e1000_82546_rev_3:
4060 if (hw->phy_id == M88E1011_I_PHY_ID) match = true;
4061 break;
4062 case e1000_82541:
4063 case e1000_82541_rev_2:
4064 case e1000_82547:
4065 case e1000_82547_rev_2:
4066 if (hw->phy_id == IGP01E1000_I_PHY_ID) match = true;
4067 break;
4068 case e1000_82573:
4069 if (hw->phy_id == M88E1111_I_PHY_ID) match = true;
4070 break;
4071 case e1000_80003es2lan:
4072 if (hw->phy_id == GG82563_E_PHY_ID) match = true;
4073 break;
4074 case e1000_ich8lan:
4075 if (hw->phy_id == IGP03E1000_E_PHY_ID) match = true;
4076 if (hw->phy_id == IFE_E_PHY_ID) match = true;
4077 if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = true;
4078 if (hw->phy_id == IFE_C_E_PHY_ID) match = true;
4079 break;
4080 default:
4081 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
4082 return -E1000_ERR_CONFIG;
4083 }
4084 phy_init_status = e1000_set_phy_type(hw);
4085
4086 if ((match) && (phy_init_status == E1000_SUCCESS)) {
4087 DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
4088 return E1000_SUCCESS;
4089 }
4090 DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
4091 return -E1000_ERR_PHY;
4092} 3071}
4093 3072
4094/****************************************************************************** 3073/**
4095* Resets the PHY's DSP 3074 * e1000_phy_reset_dsp - reset DSP
4096* 3075 * @hw: Struct containing variables accessed by shared code
4097* hw - Struct containing variables accessed by shared code 3076 *
4098******************************************************************************/ 3077 * Resets the PHY's DSP
3078 */
4099static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) 3079static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
4100{ 3080{
4101 s32 ret_val; 3081 s32 ret_val;
4102 DEBUGFUNC("e1000_phy_reset_dsp"); 3082 DEBUGFUNC("e1000_phy_reset_dsp");
4103 3083
4104 do { 3084 do {
4105 if (hw->phy_type != e1000_phy_gg82563) { 3085 ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
4106 ret_val = e1000_write_phy_reg(hw, 29, 0x001d); 3086 if (ret_val)
4107 if (ret_val) break; 3087 break;
4108 } 3088 ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
4109 ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); 3089 if (ret_val)
4110 if (ret_val) break; 3090 break;
4111 ret_val = e1000_write_phy_reg(hw, 30, 0x0000); 3091 ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
4112 if (ret_val) break; 3092 if (ret_val)
4113 ret_val = E1000_SUCCESS; 3093 break;
4114 } while (0); 3094 ret_val = E1000_SUCCESS;
4115 3095 } while (0);
4116 return ret_val; 3096
3097 return ret_val;
4117} 3098}
4118 3099
4119/****************************************************************************** 3100/**
4120* Get PHY information from various PHY registers for igp PHY only. 3101 * e1000_phy_igp_get_info - get igp specific registers
4121* 3102 * @hw: Struct containing variables accessed by shared code
4122* hw - Struct containing variables accessed by shared code 3103 * @phy_info: PHY information structure
4123* phy_info - PHY information structure 3104 *
4124******************************************************************************/ 3105 * Get PHY information from various PHY registers for igp PHY only.
3106 */
4125static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, 3107static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
4126 struct e1000_phy_info *phy_info) 3108 struct e1000_phy_info *phy_info)
4127{ 3109{
4128 s32 ret_val; 3110 s32 ret_val;
4129 u16 phy_data, min_length, max_length, average; 3111 u16 phy_data, min_length, max_length, average;
4130 e1000_rev_polarity polarity; 3112 e1000_rev_polarity polarity;
4131 3113
4132 DEBUGFUNC("e1000_phy_igp_get_info"); 3114 DEBUGFUNC("e1000_phy_igp_get_info");
4133 3115
4134 /* The downshift status is checked only once, after link is established, 3116 /* The downshift status is checked only once, after link is established,
4135 * and it stored in the hw->speed_downgraded parameter. */ 3117 * and it stored in the hw->speed_downgraded parameter. */
4136 phy_info->downshift = (e1000_downshift)hw->speed_downgraded; 3118 phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
4137 3119
4138 /* IGP01E1000 does not need to support it. */ 3120 /* IGP01E1000 does not need to support it. */
4139 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; 3121 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
4140 3122
4141 /* IGP01E1000 always correct polarity reversal */ 3123 /* IGP01E1000 always correct polarity reversal */
4142 phy_info->polarity_correction = e1000_polarity_reversal_enabled; 3124 phy_info->polarity_correction = e1000_polarity_reversal_enabled;
4143 3125
4144 /* Check polarity status */ 3126 /* Check polarity status */
4145 ret_val = e1000_check_polarity(hw, &polarity); 3127 ret_val = e1000_check_polarity(hw, &polarity);
4146 if (ret_val) 3128 if (ret_val)
4147 return ret_val; 3129 return ret_val;
4148 3130
4149 phy_info->cable_polarity = polarity; 3131 phy_info->cable_polarity = polarity;
4150 3132
4151 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); 3133 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
4152 if (ret_val) 3134 if (ret_val)
4153 return ret_val; 3135 return ret_val;
4154 3136
4155 phy_info->mdix_mode = (e1000_auto_x_mode)((phy_data & IGP01E1000_PSSR_MDIX) >> 3137 phy_info->mdix_mode =
4156 IGP01E1000_PSSR_MDIX_SHIFT); 3138 (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >>
4157 3139 IGP01E1000_PSSR_MDIX_SHIFT);
4158 if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == 3140
4159 IGP01E1000_PSSR_SPEED_1000MBPS) { 3141 if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
4160 /* Local/Remote Receiver Information are only valid at 1000 Mbps */ 3142 IGP01E1000_PSSR_SPEED_1000MBPS) {
4161 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); 3143 /* Local/Remote Receiver Information are only valid at 1000 Mbps */
4162 if (ret_val) 3144 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
4163 return ret_val; 3145 if (ret_val)
4164 3146 return ret_val;
4165 phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> 3147
4166 SR_1000T_LOCAL_RX_STATUS_SHIFT) ? 3148 phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
4167 e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; 3149 SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
4168 phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> 3150 e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
4169 SR_1000T_REMOTE_RX_STATUS_SHIFT) ? 3151 phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
4170 e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; 3152 SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
4171 3153 e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
4172 /* Get cable length */ 3154
4173 ret_val = e1000_get_cable_length(hw, &min_length, &max_length); 3155 /* Get cable length */
4174 if (ret_val) 3156 ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
4175 return ret_val; 3157 if (ret_val)
4176 3158 return ret_val;
4177 /* Translate to old method */ 3159
4178 average = (max_length + min_length) / 2; 3160 /* Translate to old method */
4179 3161 average = (max_length + min_length) / 2;
4180 if (average <= e1000_igp_cable_length_50) 3162
4181 phy_info->cable_length = e1000_cable_length_50; 3163 if (average <= e1000_igp_cable_length_50)
4182 else if (average <= e1000_igp_cable_length_80) 3164 phy_info->cable_length = e1000_cable_length_50;
4183 phy_info->cable_length = e1000_cable_length_50_80; 3165 else if (average <= e1000_igp_cable_length_80)
4184 else if (average <= e1000_igp_cable_length_110) 3166 phy_info->cable_length = e1000_cable_length_50_80;
4185 phy_info->cable_length = e1000_cable_length_80_110; 3167 else if (average <= e1000_igp_cable_length_110)
4186 else if (average <= e1000_igp_cable_length_140) 3168 phy_info->cable_length = e1000_cable_length_80_110;
4187 phy_info->cable_length = e1000_cable_length_110_140; 3169 else if (average <= e1000_igp_cable_length_140)
4188 else 3170 phy_info->cable_length = e1000_cable_length_110_140;
4189 phy_info->cable_length = e1000_cable_length_140; 3171 else
4190 } 3172 phy_info->cable_length = e1000_cable_length_140;
4191 3173 }
4192 return E1000_SUCCESS;
4193}
4194 3174
4195/****************************************************************************** 3175 return E1000_SUCCESS;
4196* Get PHY information from various PHY registers for ife PHY only.
4197*
4198* hw - Struct containing variables accessed by shared code
4199* phy_info - PHY information structure
4200******************************************************************************/
4201static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
4202 struct e1000_phy_info *phy_info)
4203{
4204 s32 ret_val;
4205 u16 phy_data;
4206 e1000_rev_polarity polarity;
4207
4208 DEBUGFUNC("e1000_phy_ife_get_info");
4209
4210 phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
4211 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
4212
4213 ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
4214 if (ret_val)
4215 return ret_val;
4216 phy_info->polarity_correction =
4217 ((phy_data & IFE_PSC_AUTO_POLARITY_DISABLE) >>
4218 IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT) ?
4219 e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
4220
4221 if (phy_info->polarity_correction == e1000_polarity_reversal_enabled) {
4222 ret_val = e1000_check_polarity(hw, &polarity);
4223 if (ret_val)
4224 return ret_val;
4225 } else {
4226 /* Polarity is forced. */
4227 polarity = ((phy_data & IFE_PSC_FORCE_POLARITY) >>
4228 IFE_PSC_FORCE_POLARITY_SHIFT) ?
4229 e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
4230 }
4231 phy_info->cable_polarity = polarity;
4232
4233 ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
4234 if (ret_val)
4235 return ret_val;
4236
4237 phy_info->mdix_mode = (e1000_auto_x_mode)
4238 ((phy_data & (IFE_PMC_AUTO_MDIX | IFE_PMC_FORCE_MDIX)) >>
4239 IFE_PMC_MDIX_MODE_SHIFT);
4240
4241 return E1000_SUCCESS;
4242} 3176}
4243 3177
4244/****************************************************************************** 3178/**
4245* Get PHY information from various PHY registers fot m88 PHY only. 3179 * e1000_phy_m88_get_info - get m88 specific registers
4246* 3180 * @hw: Struct containing variables accessed by shared code
4247* hw - Struct containing variables accessed by shared code 3181 * @phy_info: PHY information structure
4248* phy_info - PHY information structure 3182 *
4249******************************************************************************/ 3183 * Get PHY information from various PHY registers for m88 PHY only.
3184 */
4250static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, 3185static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
4251 struct e1000_phy_info *phy_info) 3186 struct e1000_phy_info *phy_info)
4252{ 3187{
4253 s32 ret_val; 3188 s32 ret_val;
4254 u16 phy_data; 3189 u16 phy_data;
4255 e1000_rev_polarity polarity; 3190 e1000_rev_polarity polarity;
4256 3191
4257 DEBUGFUNC("e1000_phy_m88_get_info"); 3192 DEBUGFUNC("e1000_phy_m88_get_info");
4258 3193
4259 /* The downshift status is checked only once, after link is established, 3194 /* The downshift status is checked only once, after link is established,
4260 * and it stored in the hw->speed_downgraded parameter. */ 3195 * and it stored in the hw->speed_downgraded parameter. */
4261 phy_info->downshift = (e1000_downshift)hw->speed_downgraded; 3196 phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
4262 3197
4263 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 3198 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
4264 if (ret_val) 3199 if (ret_val)
4265 return ret_val; 3200 return ret_val;
4266 3201
4267 phy_info->extended_10bt_distance = 3202 phy_info->extended_10bt_distance =
4268 ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >> 3203 ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
4269 M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ? 3204 M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ?
4270 e1000_10bt_ext_dist_enable_lower : e1000_10bt_ext_dist_enable_normal; 3205 e1000_10bt_ext_dist_enable_lower :
4271 3206 e1000_10bt_ext_dist_enable_normal;
4272 phy_info->polarity_correction = 3207
4273 ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >> 3208 phy_info->polarity_correction =
4274 M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ? 3209 ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
4275 e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled; 3210 M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ?
4276 3211 e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
4277 /* Check polarity status */ 3212
4278 ret_val = e1000_check_polarity(hw, &polarity); 3213 /* Check polarity status */
4279 if (ret_val) 3214 ret_val = e1000_check_polarity(hw, &polarity);
4280 return ret_val; 3215 if (ret_val)
4281 phy_info->cable_polarity = polarity; 3216 return ret_val;
4282 3217 phy_info->cable_polarity = polarity;
4283 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); 3218
4284 if (ret_val) 3219 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
4285 return ret_val; 3220 if (ret_val)
4286 3221 return ret_val;
4287 phy_info->mdix_mode = (e1000_auto_x_mode)((phy_data & M88E1000_PSSR_MDIX) >> 3222
4288 M88E1000_PSSR_MDIX_SHIFT); 3223 phy_info->mdix_mode =
4289 3224 (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >>
4290 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { 3225 M88E1000_PSSR_MDIX_SHIFT);
4291 /* Cable Length Estimation and Local/Remote Receiver Information 3226
4292 * are only valid at 1000 Mbps. 3227 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
4293 */ 3228 /* Cable Length Estimation and Local/Remote Receiver Information
4294 if (hw->phy_type != e1000_phy_gg82563) { 3229 * are only valid at 1000 Mbps.
4295 phy_info->cable_length = (e1000_cable_length)((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> 3230 */
4296 M88E1000_PSSR_CABLE_LENGTH_SHIFT); 3231 phy_info->cable_length =
4297 } else { 3232 (e1000_cable_length) ((phy_data &
4298 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE, 3233 M88E1000_PSSR_CABLE_LENGTH) >>
4299 &phy_data); 3234 M88E1000_PSSR_CABLE_LENGTH_SHIFT);
4300 if (ret_val) 3235
4301 return ret_val; 3236 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
4302 3237 if (ret_val)
4303 phy_info->cable_length = (e1000_cable_length)(phy_data & GG82563_DSPD_CABLE_LENGTH); 3238 return ret_val;
4304 } 3239
4305 3240 phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
4306 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); 3241 SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
4307 if (ret_val) 3242 e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
4308 return ret_val; 3243 phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
4309 3244 SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
4310 phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> 3245 e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
4311 SR_1000T_LOCAL_RX_STATUS_SHIFT) ? 3246
4312 e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; 3247 }
4313 phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> 3248
4314 SR_1000T_REMOTE_RX_STATUS_SHIFT) ? 3249 return E1000_SUCCESS;
4315 e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
4316
4317 }
4318
4319 return E1000_SUCCESS;
4320} 3250}
4321 3251
4322/****************************************************************************** 3252/**
4323* Get PHY information from various PHY registers 3253 * e1000_phy_get_info - request phy info
4324* 3254 * @hw: Struct containing variables accessed by shared code
4325* hw - Struct containing variables accessed by shared code 3255 * @phy_info: PHY information structure
4326* phy_info - PHY information structure 3256 *
4327******************************************************************************/ 3257 * Get PHY information from various PHY registers
3258 */
4328s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) 3259s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
4329{ 3260{
4330 s32 ret_val; 3261 s32 ret_val;
4331 u16 phy_data; 3262 u16 phy_data;
4332 3263
4333 DEBUGFUNC("e1000_phy_get_info"); 3264 DEBUGFUNC("e1000_phy_get_info");
4334 3265
4335 phy_info->cable_length = e1000_cable_length_undefined; 3266 phy_info->cable_length = e1000_cable_length_undefined;
4336 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; 3267 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
4337 phy_info->cable_polarity = e1000_rev_polarity_undefined; 3268 phy_info->cable_polarity = e1000_rev_polarity_undefined;
4338 phy_info->downshift = e1000_downshift_undefined; 3269 phy_info->downshift = e1000_downshift_undefined;
4339 phy_info->polarity_correction = e1000_polarity_reversal_undefined; 3270 phy_info->polarity_correction = e1000_polarity_reversal_undefined;
4340 phy_info->mdix_mode = e1000_auto_x_mode_undefined; 3271 phy_info->mdix_mode = e1000_auto_x_mode_undefined;
4341 phy_info->local_rx = e1000_1000t_rx_status_undefined; 3272 phy_info->local_rx = e1000_1000t_rx_status_undefined;
4342 phy_info->remote_rx = e1000_1000t_rx_status_undefined; 3273 phy_info->remote_rx = e1000_1000t_rx_status_undefined;
4343 3274
4344 if (hw->media_type != e1000_media_type_copper) { 3275 if (hw->media_type != e1000_media_type_copper) {
4345 DEBUGOUT("PHY info is only valid for copper media\n"); 3276 DEBUGOUT("PHY info is only valid for copper media\n");
4346 return -E1000_ERR_CONFIG; 3277 return -E1000_ERR_CONFIG;
4347 } 3278 }
4348 3279
4349 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 3280 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
4350 if (ret_val) 3281 if (ret_val)
4351 return ret_val; 3282 return ret_val;
4352 3283
4353 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 3284 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
4354 if (ret_val) 3285 if (ret_val)
4355 return ret_val; 3286 return ret_val;
4356 3287
4357 if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { 3288 if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
4358 DEBUGOUT("PHY info is only valid if link is up\n"); 3289 DEBUGOUT("PHY info is only valid if link is up\n");
4359 return -E1000_ERR_CONFIG; 3290 return -E1000_ERR_CONFIG;
4360 } 3291 }
4361 3292
4362 if (hw->phy_type == e1000_phy_igp || 3293 if (hw->phy_type == e1000_phy_igp)
4363 hw->phy_type == e1000_phy_igp_3 || 3294 return e1000_phy_igp_get_info(hw, phy_info);
4364 hw->phy_type == e1000_phy_igp_2) 3295 else
4365 return e1000_phy_igp_get_info(hw, phy_info); 3296 return e1000_phy_m88_get_info(hw, phy_info);
4366 else if (hw->phy_type == e1000_phy_ife)
4367 return e1000_phy_ife_get_info(hw, phy_info);
4368 else
4369 return e1000_phy_m88_get_info(hw, phy_info);
4370} 3297}
4371 3298
4372s32 e1000_validate_mdi_setting(struct e1000_hw *hw) 3299s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
4373{ 3300{
4374 DEBUGFUNC("e1000_validate_mdi_settings"); 3301 DEBUGFUNC("e1000_validate_mdi_settings");
4375
4376 if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
4377 DEBUGOUT("Invalid MDI setting detected\n");
4378 hw->mdix = 1;
4379 return -E1000_ERR_CONFIG;
4380 }
4381 return E1000_SUCCESS;
4382}
4383 3302
3303 if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
3304 DEBUGOUT("Invalid MDI setting detected\n");
3305 hw->mdix = 1;
3306 return -E1000_ERR_CONFIG;
3307 }
3308 return E1000_SUCCESS;
3309}
4384 3310
4385/****************************************************************************** 3311/**
4386 * Sets up eeprom variables in the hw struct. Must be called after mac_type 3312 * e1000_init_eeprom_params - initialize sw eeprom vars
4387 * is configured. Additionally, if this is ICH8, the flash controller GbE 3313 * @hw: Struct containing variables accessed by shared code
4388 * registers must be mapped, or this will crash.
4389 * 3314 *
4390 * hw - Struct containing variables accessed by shared code 3315 * Sets up eeprom variables in the hw struct. Must be called after mac_type
4391 *****************************************************************************/ 3316 * is configured.
3317 */
4392s32 e1000_init_eeprom_params(struct e1000_hw *hw) 3318s32 e1000_init_eeprom_params(struct e1000_hw *hw)
4393{ 3319{
4394 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3320 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4395 u32 eecd = er32(EECD); 3321 u32 eecd = er32(EECD);
4396 s32 ret_val = E1000_SUCCESS; 3322 s32 ret_val = E1000_SUCCESS;
4397 u16 eeprom_size; 3323 u16 eeprom_size;
4398 3324
4399 DEBUGFUNC("e1000_init_eeprom_params"); 3325 DEBUGFUNC("e1000_init_eeprom_params");
4400 3326
4401 switch (hw->mac_type) { 3327 switch (hw->mac_type) {
4402 case e1000_82542_rev2_0: 3328 case e1000_82542_rev2_0:
4403 case e1000_82542_rev2_1: 3329 case e1000_82542_rev2_1:
4404 case e1000_82543: 3330 case e1000_82543:
4405 case e1000_82544: 3331 case e1000_82544:
4406 eeprom->type = e1000_eeprom_microwire; 3332 eeprom->type = e1000_eeprom_microwire;
4407 eeprom->word_size = 64; 3333 eeprom->word_size = 64;
4408 eeprom->opcode_bits = 3; 3334 eeprom->opcode_bits = 3;
4409 eeprom->address_bits = 6; 3335 eeprom->address_bits = 6;
4410 eeprom->delay_usec = 50; 3336 eeprom->delay_usec = 50;
4411 eeprom->use_eerd = false; 3337 break;
4412 eeprom->use_eewr = false; 3338 case e1000_82540:
4413 break; 3339 case e1000_82545:
4414 case e1000_82540: 3340 case e1000_82545_rev_3:
4415 case e1000_82545: 3341 case e1000_82546:
4416 case e1000_82545_rev_3: 3342 case e1000_82546_rev_3:
4417 case e1000_82546: 3343 eeprom->type = e1000_eeprom_microwire;
4418 case e1000_82546_rev_3: 3344 eeprom->opcode_bits = 3;
4419 eeprom->type = e1000_eeprom_microwire; 3345 eeprom->delay_usec = 50;
4420 eeprom->opcode_bits = 3; 3346 if (eecd & E1000_EECD_SIZE) {
4421 eeprom->delay_usec = 50; 3347 eeprom->word_size = 256;
4422 if (eecd & E1000_EECD_SIZE) { 3348 eeprom->address_bits = 8;
4423 eeprom->word_size = 256; 3349 } else {
4424 eeprom->address_bits = 8; 3350 eeprom->word_size = 64;
4425 } else { 3351 eeprom->address_bits = 6;
4426 eeprom->word_size = 64; 3352 }
4427 eeprom->address_bits = 6; 3353 break;
4428 } 3354 case e1000_82541:
4429 eeprom->use_eerd = false; 3355 case e1000_82541_rev_2:
4430 eeprom->use_eewr = false; 3356 case e1000_82547:
4431 break; 3357 case e1000_82547_rev_2:
4432 case e1000_82541: 3358 if (eecd & E1000_EECD_TYPE) {
4433 case e1000_82541_rev_2: 3359 eeprom->type = e1000_eeprom_spi;
4434 case e1000_82547: 3360 eeprom->opcode_bits = 8;
4435 case e1000_82547_rev_2: 3361 eeprom->delay_usec = 1;
4436 if (eecd & E1000_EECD_TYPE) { 3362 if (eecd & E1000_EECD_ADDR_BITS) {
4437 eeprom->type = e1000_eeprom_spi; 3363 eeprom->page_size = 32;
4438 eeprom->opcode_bits = 8; 3364 eeprom->address_bits = 16;
4439 eeprom->delay_usec = 1; 3365 } else {
4440 if (eecd & E1000_EECD_ADDR_BITS) { 3366 eeprom->page_size = 8;
4441 eeprom->page_size = 32; 3367 eeprom->address_bits = 8;
4442 eeprom->address_bits = 16; 3368 }
4443 } else { 3369 } else {
4444 eeprom->page_size = 8; 3370 eeprom->type = e1000_eeprom_microwire;
4445 eeprom->address_bits = 8; 3371 eeprom->opcode_bits = 3;
4446 } 3372 eeprom->delay_usec = 50;
4447 } else { 3373 if (eecd & E1000_EECD_ADDR_BITS) {
4448 eeprom->type = e1000_eeprom_microwire; 3374 eeprom->word_size = 256;
4449 eeprom->opcode_bits = 3; 3375 eeprom->address_bits = 8;
4450 eeprom->delay_usec = 50; 3376 } else {
4451 if (eecd & E1000_EECD_ADDR_BITS) { 3377 eeprom->word_size = 64;
4452 eeprom->word_size = 256; 3378 eeprom->address_bits = 6;
4453 eeprom->address_bits = 8; 3379 }
4454 } else { 3380 }
4455 eeprom->word_size = 64; 3381 break;
4456 eeprom->address_bits = 6; 3382 default:
4457 } 3383 break;
4458 } 3384 }
4459 eeprom->use_eerd = false; 3385
4460 eeprom->use_eewr = false; 3386 if (eeprom->type == e1000_eeprom_spi) {
4461 break; 3387 /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
4462 case e1000_82571: 3388 * 32KB (incremented by powers of 2).
4463 case e1000_82572: 3389 */
4464 eeprom->type = e1000_eeprom_spi; 3390 /* Set to default value for initial eeprom read. */
4465 eeprom->opcode_bits = 8; 3391 eeprom->word_size = 64;
4466 eeprom->delay_usec = 1; 3392 ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
4467 if (eecd & E1000_EECD_ADDR_BITS) { 3393 if (ret_val)
4468 eeprom->page_size = 32; 3394 return ret_val;
4469 eeprom->address_bits = 16; 3395 eeprom_size =
4470 } else { 3396 (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
4471 eeprom->page_size = 8; 3397 /* 256B eeprom size was not supported in earlier hardware, so we
4472 eeprom->address_bits = 8; 3398 * bump eeprom_size up one to ensure that "1" (which maps to 256B)
4473 } 3399 * is never the result used in the shifting logic below. */
4474 eeprom->use_eerd = false; 3400 if (eeprom_size)
4475 eeprom->use_eewr = false; 3401 eeprom_size++;
4476 break; 3402
4477 case e1000_82573: 3403 eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT);
4478 eeprom->type = e1000_eeprom_spi; 3404 }
4479 eeprom->opcode_bits = 8; 3405 return ret_val;
4480 eeprom->delay_usec = 1;
4481 if (eecd & E1000_EECD_ADDR_BITS) {
4482 eeprom->page_size = 32;
4483 eeprom->address_bits = 16;
4484 } else {
4485 eeprom->page_size = 8;
4486 eeprom->address_bits = 8;
4487 }
4488 eeprom->use_eerd = true;
4489 eeprom->use_eewr = true;
4490 if (!e1000_is_onboard_nvm_eeprom(hw)) {
4491 eeprom->type = e1000_eeprom_flash;
4492 eeprom->word_size = 2048;
4493
4494 /* Ensure that the Autonomous FLASH update bit is cleared due to
4495 * Flash update issue on parts which use a FLASH for NVM. */
4496 eecd &= ~E1000_EECD_AUPDEN;
4497 ew32(EECD, eecd);
4498 }
4499 break;
4500 case e1000_80003es2lan:
4501 eeprom->type = e1000_eeprom_spi;
4502 eeprom->opcode_bits = 8;
4503 eeprom->delay_usec = 1;
4504 if (eecd & E1000_EECD_ADDR_BITS) {
4505 eeprom->page_size = 32;
4506 eeprom->address_bits = 16;
4507 } else {
4508 eeprom->page_size = 8;
4509 eeprom->address_bits = 8;
4510 }
4511 eeprom->use_eerd = true;
4512 eeprom->use_eewr = false;
4513 break;
4514 case e1000_ich8lan:
4515 {
4516 s32 i = 0;
4517 u32 flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG);
4518
4519 eeprom->type = e1000_eeprom_ich8;
4520 eeprom->use_eerd = false;
4521 eeprom->use_eewr = false;
4522 eeprom->word_size = E1000_SHADOW_RAM_WORDS;
4523
4524 /* Zero the shadow RAM structure. But don't load it from NVM
4525 * so as to save time for driver init */
4526 if (hw->eeprom_shadow_ram != NULL) {
4527 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4528 hw->eeprom_shadow_ram[i].modified = false;
4529 hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
4530 }
4531 }
4532
4533 hw->flash_base_addr = (flash_size & ICH_GFPREG_BASE_MASK) *
4534 ICH_FLASH_SECTOR_SIZE;
4535
4536 hw->flash_bank_size = ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
4537 hw->flash_bank_size -= (flash_size & ICH_GFPREG_BASE_MASK);
4538
4539 hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
4540
4541 hw->flash_bank_size /= 2 * sizeof(u16);
4542
4543 break;
4544 }
4545 default:
4546 break;
4547 }
4548
4549 if (eeprom->type == e1000_eeprom_spi) {
4550 /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
4551 * 32KB (incremented by powers of 2).
4552 */
4553 if (hw->mac_type <= e1000_82547_rev_2) {
4554 /* Set to default value for initial eeprom read. */
4555 eeprom->word_size = 64;
4556 ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
4557 if (ret_val)
4558 return ret_val;
4559 eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
4560 /* 256B eeprom size was not supported in earlier hardware, so we
4561 * bump eeprom_size up one to ensure that "1" (which maps to 256B)
4562 * is never the result used in the shifting logic below. */
4563 if (eeprom_size)
4564 eeprom_size++;
4565 } else {
4566 eeprom_size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
4567 E1000_EECD_SIZE_EX_SHIFT);
4568 }
4569
4570 eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT);
4571 }
4572 return ret_val;
4573} 3406}
4574 3407
4575/****************************************************************************** 3408/**
4576 * Raises the EEPROM's clock input. 3409 * e1000_raise_ee_clk - Raises the EEPROM's clock input.
4577 * 3410 * @hw: Struct containing variables accessed by shared code
4578 * hw - Struct containing variables accessed by shared code 3411 * @eecd: EECD's current value
4579 * eecd - EECD's current value 3412 */
4580 *****************************************************************************/
4581static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) 3413static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
4582{ 3414{
4583 /* Raise the clock input to the EEPROM (by setting the SK bit), and then 3415 /* Raise the clock input to the EEPROM (by setting the SK bit), and then
4584 * wait <delay> microseconds. 3416 * wait <delay> microseconds.
4585 */ 3417 */
4586 *eecd = *eecd | E1000_EECD_SK; 3418 *eecd = *eecd | E1000_EECD_SK;
4587 ew32(EECD, *eecd); 3419 ew32(EECD, *eecd);
4588 E1000_WRITE_FLUSH(); 3420 E1000_WRITE_FLUSH();
4589 udelay(hw->eeprom.delay_usec); 3421 udelay(hw->eeprom.delay_usec);
4590} 3422}
4591 3423
4592/****************************************************************************** 3424/**
4593 * Lowers the EEPROM's clock input. 3425 * e1000_lower_ee_clk - Lowers the EEPROM's clock input.
4594 * 3426 * @hw: Struct containing variables accessed by shared code
4595 * hw - Struct containing variables accessed by shared code 3427 * @eecd: EECD's current value
4596 * eecd - EECD's current value 3428 */
4597 *****************************************************************************/
4598static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) 3429static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
4599{ 3430{
4600 /* Lower the clock input to the EEPROM (by clearing the SK bit), and then 3431 /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
4601 * wait 50 microseconds. 3432 * wait 50 microseconds.
4602 */ 3433 */
4603 *eecd = *eecd & ~E1000_EECD_SK; 3434 *eecd = *eecd & ~E1000_EECD_SK;
4604 ew32(EECD, *eecd); 3435 ew32(EECD, *eecd);
4605 E1000_WRITE_FLUSH(); 3436 E1000_WRITE_FLUSH();
4606 udelay(hw->eeprom.delay_usec); 3437 udelay(hw->eeprom.delay_usec);
4607} 3438}
4608 3439
4609/****************************************************************************** 3440/**
4610 * Shift data bits out to the EEPROM. 3441 * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM.
4611 * 3442 * @hw: Struct containing variables accessed by shared code
4612 * hw - Struct containing variables accessed by shared code 3443 * @data: data to send to the EEPROM
4613 * data - data to send to the EEPROM 3444 * @count: number of bits to shift out
4614 * count - number of bits to shift out 3445 */
4615 *****************************************************************************/
4616static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) 3446static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
4617{ 3447{
4618 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3448 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4619 u32 eecd; 3449 u32 eecd;
4620 u32 mask; 3450 u32 mask;
4621 3451
4622 /* We need to shift "count" bits out to the EEPROM. So, value in the 3452 /* We need to shift "count" bits out to the EEPROM. So, value in the
4623 * "data" parameter will be shifted out to the EEPROM one bit at a time. 3453 * "data" parameter will be shifted out to the EEPROM one bit at a time.
4624 * In order to do this, "data" must be broken down into bits. 3454 * In order to do this, "data" must be broken down into bits.
4625 */ 3455 */
4626 mask = 0x01 << (count - 1); 3456 mask = 0x01 << (count - 1);
4627 eecd = er32(EECD); 3457 eecd = er32(EECD);
4628 if (eeprom->type == e1000_eeprom_microwire) { 3458 if (eeprom->type == e1000_eeprom_microwire) {
4629 eecd &= ~E1000_EECD_DO; 3459 eecd &= ~E1000_EECD_DO;
4630 } else if (eeprom->type == e1000_eeprom_spi) { 3460 } else if (eeprom->type == e1000_eeprom_spi) {
4631 eecd |= E1000_EECD_DO; 3461 eecd |= E1000_EECD_DO;
4632 } 3462 }
4633 do { 3463 do {
4634 /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1", 3464 /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
4635 * and then raising and then lowering the clock (the SK bit controls 3465 * and then raising and then lowering the clock (the SK bit controls
4636 * the clock input to the EEPROM). A "0" is shifted out to the EEPROM 3466 * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
4637 * by setting "DI" to "0" and then raising and then lowering the clock. 3467 * by setting "DI" to "0" and then raising and then lowering the clock.
4638 */ 3468 */
4639 eecd &= ~E1000_EECD_DI; 3469 eecd &= ~E1000_EECD_DI;
4640 3470
4641 if (data & mask) 3471 if (data & mask)
4642 eecd |= E1000_EECD_DI; 3472 eecd |= E1000_EECD_DI;
4643 3473
4644 ew32(EECD, eecd); 3474 ew32(EECD, eecd);
4645 E1000_WRITE_FLUSH(); 3475 E1000_WRITE_FLUSH();
4646 3476
4647 udelay(eeprom->delay_usec); 3477 udelay(eeprom->delay_usec);
4648 3478
4649 e1000_raise_ee_clk(hw, &eecd); 3479 e1000_raise_ee_clk(hw, &eecd);
4650 e1000_lower_ee_clk(hw, &eecd); 3480 e1000_lower_ee_clk(hw, &eecd);
4651 3481
4652 mask = mask >> 1; 3482 mask = mask >> 1;
4653 3483
4654 } while (mask); 3484 } while (mask);
4655 3485
4656 /* We leave the "DI" bit set to "0" when we leave this routine. */ 3486 /* We leave the "DI" bit set to "0" when we leave this routine. */
4657 eecd &= ~E1000_EECD_DI; 3487 eecd &= ~E1000_EECD_DI;
4658 ew32(EECD, eecd); 3488 ew32(EECD, eecd);
4659} 3489}
4660 3490
4661/****************************************************************************** 3491/**
4662 * Shift data bits in from the EEPROM 3492 * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM
4663 * 3493 * @hw: Struct containing variables accessed by shared code
4664 * hw - Struct containing variables accessed by shared code 3494 * @count: number of bits to shift in
4665 *****************************************************************************/ 3495 */
4666static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) 3496static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
4667{ 3497{
4668 u32 eecd; 3498 u32 eecd;
4669 u32 i; 3499 u32 i;
4670 u16 data; 3500 u16 data;
4671 3501
4672 /* In order to read a register from the EEPROM, we need to shift 'count' 3502 /* In order to read a register from the EEPROM, we need to shift 'count'
4673 * bits in from the EEPROM. Bits are "shifted in" by raising the clock 3503 * bits in from the EEPROM. Bits are "shifted in" by raising the clock
4674 * input to the EEPROM (setting the SK bit), and then reading the value of 3504 * input to the EEPROM (setting the SK bit), and then reading the value of
4675 * the "DO" bit. During this "shifting in" process the "DI" bit should 3505 * the "DO" bit. During this "shifting in" process the "DI" bit should
4676 * always be clear. 3506 * always be clear.
4677 */ 3507 */
4678 3508
4679 eecd = er32(EECD); 3509 eecd = er32(EECD);
4680 3510
4681 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); 3511 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
4682 data = 0; 3512 data = 0;
4683 3513
4684 for (i = 0; i < count; i++) { 3514 for (i = 0; i < count; i++) {
4685 data = data << 1; 3515 data = data << 1;
4686 e1000_raise_ee_clk(hw, &eecd); 3516 e1000_raise_ee_clk(hw, &eecd);
4687 3517
4688 eecd = er32(EECD); 3518 eecd = er32(EECD);
4689 3519
4690 eecd &= ~(E1000_EECD_DI); 3520 eecd &= ~(E1000_EECD_DI);
4691 if (eecd & E1000_EECD_DO) 3521 if (eecd & E1000_EECD_DO)
4692 data |= 1; 3522 data |= 1;
4693 3523
4694 e1000_lower_ee_clk(hw, &eecd); 3524 e1000_lower_ee_clk(hw, &eecd);
4695 } 3525 }
4696 3526
4697 return data; 3527 return data;
4698} 3528}
4699 3529
4700/****************************************************************************** 3530/**
4701 * Prepares EEPROM for access 3531 * e1000_acquire_eeprom - Prepares EEPROM for access
4702 * 3532 * @hw: Struct containing variables accessed by shared code
4703 * hw - Struct containing variables accessed by shared code
4704 * 3533 *
4705 * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This 3534 * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
4706 * function should be called before issuing a command to the EEPROM. 3535 * function should be called before issuing a command to the EEPROM.
4707 *****************************************************************************/ 3536 */
4708static s32 e1000_acquire_eeprom(struct e1000_hw *hw) 3537static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
4709{ 3538{
4710 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3539 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4711 u32 eecd, i=0; 3540 u32 eecd, i = 0;
4712 3541
4713 DEBUGFUNC("e1000_acquire_eeprom"); 3542 DEBUGFUNC("e1000_acquire_eeprom");
4714 3543
4715 if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) 3544 eecd = er32(EECD);
4716 return -E1000_ERR_SWFW_SYNC; 3545
4717 eecd = er32(EECD); 3546 /* Request EEPROM Access */
4718 3547 if (hw->mac_type > e1000_82544) {
4719 if (hw->mac_type != e1000_82573) { 3548 eecd |= E1000_EECD_REQ;
4720 /* Request EEPROM Access */ 3549 ew32(EECD, eecd);
4721 if (hw->mac_type > e1000_82544) { 3550 eecd = er32(EECD);
4722 eecd |= E1000_EECD_REQ; 3551 while ((!(eecd & E1000_EECD_GNT)) &&
4723 ew32(EECD, eecd); 3552 (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
4724 eecd = er32(EECD); 3553 i++;
4725 while ((!(eecd & E1000_EECD_GNT)) && 3554 udelay(5);
4726 (i < E1000_EEPROM_GRANT_ATTEMPTS)) { 3555 eecd = er32(EECD);
4727 i++; 3556 }
4728 udelay(5); 3557 if (!(eecd & E1000_EECD_GNT)) {
4729 eecd = er32(EECD); 3558 eecd &= ~E1000_EECD_REQ;
4730 } 3559 ew32(EECD, eecd);
4731 if (!(eecd & E1000_EECD_GNT)) { 3560 DEBUGOUT("Could not acquire EEPROM grant\n");
4732 eecd &= ~E1000_EECD_REQ; 3561 return -E1000_ERR_EEPROM;
4733 ew32(EECD, eecd); 3562 }
4734 DEBUGOUT("Could not acquire EEPROM grant\n"); 3563 }
4735 e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); 3564
4736 return -E1000_ERR_EEPROM; 3565 /* Setup EEPROM for Read/Write */
4737 } 3566
4738 } 3567 if (eeprom->type == e1000_eeprom_microwire) {
4739 } 3568 /* Clear SK and DI */
4740 3569 eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
4741 /* Setup EEPROM for Read/Write */ 3570 ew32(EECD, eecd);
4742 3571
4743 if (eeprom->type == e1000_eeprom_microwire) { 3572 /* Set CS */
4744 /* Clear SK and DI */ 3573 eecd |= E1000_EECD_CS;
4745 eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); 3574 ew32(EECD, eecd);
4746 ew32(EECD, eecd); 3575 } else if (eeprom->type == e1000_eeprom_spi) {
4747 3576 /* Clear SK and CS */
4748 /* Set CS */ 3577 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
4749 eecd |= E1000_EECD_CS; 3578 ew32(EECD, eecd);
4750 ew32(EECD, eecd); 3579 udelay(1);
4751 } else if (eeprom->type == e1000_eeprom_spi) { 3580 }
4752 /* Clear SK and CS */ 3581
4753 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); 3582 return E1000_SUCCESS;
4754 ew32(EECD, eecd);
4755 udelay(1);
4756 }
4757
4758 return E1000_SUCCESS;
4759} 3583}
4760 3584
4761/****************************************************************************** 3585/**
4762 * Returns EEPROM to a "standby" state 3586 * e1000_standby_eeprom - Returns EEPROM to a "standby" state
4763 * 3587 * @hw: Struct containing variables accessed by shared code
4764 * hw - Struct containing variables accessed by shared code 3588 */
4765 *****************************************************************************/
4766static void e1000_standby_eeprom(struct e1000_hw *hw) 3589static void e1000_standby_eeprom(struct e1000_hw *hw)
4767{ 3590{
4768 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3591 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4769 u32 eecd; 3592 u32 eecd;
4770 3593
4771 eecd = er32(EECD); 3594 eecd = er32(EECD);
4772 3595
4773 if (eeprom->type == e1000_eeprom_microwire) { 3596 if (eeprom->type == e1000_eeprom_microwire) {
4774 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); 3597 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
4775 ew32(EECD, eecd); 3598 ew32(EECD, eecd);
4776 E1000_WRITE_FLUSH(); 3599 E1000_WRITE_FLUSH();
4777 udelay(eeprom->delay_usec); 3600 udelay(eeprom->delay_usec);
4778 3601
4779 /* Clock high */ 3602 /* Clock high */
4780 eecd |= E1000_EECD_SK; 3603 eecd |= E1000_EECD_SK;
4781 ew32(EECD, eecd); 3604 ew32(EECD, eecd);
4782 E1000_WRITE_FLUSH(); 3605 E1000_WRITE_FLUSH();
4783 udelay(eeprom->delay_usec); 3606 udelay(eeprom->delay_usec);
4784 3607
4785 /* Select EEPROM */ 3608 /* Select EEPROM */
4786 eecd |= E1000_EECD_CS; 3609 eecd |= E1000_EECD_CS;
4787 ew32(EECD, eecd); 3610 ew32(EECD, eecd);
4788 E1000_WRITE_FLUSH(); 3611 E1000_WRITE_FLUSH();
4789 udelay(eeprom->delay_usec); 3612 udelay(eeprom->delay_usec);
4790 3613
4791 /* Clock low */ 3614 /* Clock low */
4792 eecd &= ~E1000_EECD_SK; 3615 eecd &= ~E1000_EECD_SK;
4793 ew32(EECD, eecd); 3616 ew32(EECD, eecd);
4794 E1000_WRITE_FLUSH(); 3617 E1000_WRITE_FLUSH();
4795 udelay(eeprom->delay_usec); 3618 udelay(eeprom->delay_usec);
4796 } else if (eeprom->type == e1000_eeprom_spi) { 3619 } else if (eeprom->type == e1000_eeprom_spi) {
4797 /* Toggle CS to flush commands */ 3620 /* Toggle CS to flush commands */
4798 eecd |= E1000_EECD_CS; 3621 eecd |= E1000_EECD_CS;
4799 ew32(EECD, eecd); 3622 ew32(EECD, eecd);
4800 E1000_WRITE_FLUSH(); 3623 E1000_WRITE_FLUSH();
4801 udelay(eeprom->delay_usec); 3624 udelay(eeprom->delay_usec);
4802 eecd &= ~E1000_EECD_CS; 3625 eecd &= ~E1000_EECD_CS;
4803 ew32(EECD, eecd); 3626 ew32(EECD, eecd);
4804 E1000_WRITE_FLUSH(); 3627 E1000_WRITE_FLUSH();
4805 udelay(eeprom->delay_usec); 3628 udelay(eeprom->delay_usec);
4806 } 3629 }
4807} 3630}
4808 3631
4809/****************************************************************************** 3632/**
4810 * Terminates a command by inverting the EEPROM's chip select pin 3633 * e1000_release_eeprom - drop chip select
3634 * @hw: Struct containing variables accessed by shared code
4811 * 3635 *
4812 * hw - Struct containing variables accessed by shared code 3636 * Terminates a command by inverting the EEPROM's chip select pin
4813 *****************************************************************************/ 3637 */
4814static void e1000_release_eeprom(struct e1000_hw *hw) 3638static void e1000_release_eeprom(struct e1000_hw *hw)
4815{ 3639{
4816 u32 eecd; 3640 u32 eecd;
4817
4818 DEBUGFUNC("e1000_release_eeprom");
4819 3641
4820 eecd = er32(EECD); 3642 DEBUGFUNC("e1000_release_eeprom");
4821 3643
4822 if (hw->eeprom.type == e1000_eeprom_spi) { 3644 eecd = er32(EECD);
4823 eecd |= E1000_EECD_CS; /* Pull CS high */
4824 eecd &= ~E1000_EECD_SK; /* Lower SCK */
4825 3645
4826 ew32(EECD, eecd); 3646 if (hw->eeprom.type == e1000_eeprom_spi) {
3647 eecd |= E1000_EECD_CS; /* Pull CS high */
3648 eecd &= ~E1000_EECD_SK; /* Lower SCK */
4827 3649
4828 udelay(hw->eeprom.delay_usec); 3650 ew32(EECD, eecd);
4829 } else if (hw->eeprom.type == e1000_eeprom_microwire) {
4830 /* cleanup eeprom */
4831 3651
4832 /* CS on Microwire is active-high */ 3652 udelay(hw->eeprom.delay_usec);
4833 eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); 3653 } else if (hw->eeprom.type == e1000_eeprom_microwire) {
3654 /* cleanup eeprom */
4834 3655
4835 ew32(EECD, eecd); 3656 /* CS on Microwire is active-high */
3657 eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
4836 3658
4837 /* Rising edge of clock */ 3659 ew32(EECD, eecd);
4838 eecd |= E1000_EECD_SK;
4839 ew32(EECD, eecd);
4840 E1000_WRITE_FLUSH();
4841 udelay(hw->eeprom.delay_usec);
4842 3660
4843 /* Falling edge of clock */ 3661 /* Rising edge of clock */
4844 eecd &= ~E1000_EECD_SK; 3662 eecd |= E1000_EECD_SK;
4845 ew32(EECD, eecd); 3663 ew32(EECD, eecd);
4846 E1000_WRITE_FLUSH(); 3664 E1000_WRITE_FLUSH();
4847 udelay(hw->eeprom.delay_usec); 3665 udelay(hw->eeprom.delay_usec);
4848 }
4849 3666
4850 /* Stop requesting EEPROM access */ 3667 /* Falling edge of clock */
4851 if (hw->mac_type > e1000_82544) { 3668 eecd &= ~E1000_EECD_SK;
4852 eecd &= ~E1000_EECD_REQ; 3669 ew32(EECD, eecd);
4853 ew32(EECD, eecd); 3670 E1000_WRITE_FLUSH();
4854 } 3671 udelay(hw->eeprom.delay_usec);
3672 }
4855 3673
4856 e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); 3674 /* Stop requesting EEPROM access */
3675 if (hw->mac_type > e1000_82544) {
3676 eecd &= ~E1000_EECD_REQ;
3677 ew32(EECD, eecd);
3678 }
4857} 3679}
4858 3680
4859/****************************************************************************** 3681/**
4860 * Reads a 16 bit word from the EEPROM. 3682 * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM.
4861 * 3683 * @hw: Struct containing variables accessed by shared code
4862 * hw - Struct containing variables accessed by shared code 3684 */
4863 *****************************************************************************/
4864static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) 3685static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
4865{ 3686{
4866 u16 retry_count = 0; 3687 u16 retry_count = 0;
4867 u8 spi_stat_reg; 3688 u8 spi_stat_reg;
4868
4869 DEBUGFUNC("e1000_spi_eeprom_ready");
4870
4871 /* Read "Status Register" repeatedly until the LSB is cleared. The
4872 * EEPROM will signal that the command has been completed by clearing
4873 * bit 0 of the internal status register. If it's not cleared within
4874 * 5 milliseconds, then error out.
4875 */
4876 retry_count = 0;
4877 do {
4878 e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
4879 hw->eeprom.opcode_bits);
4880 spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8);
4881 if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
4882 break;
4883
4884 udelay(5);
4885 retry_count += 5;
4886
4887 e1000_standby_eeprom(hw);
4888 } while (retry_count < EEPROM_MAX_RETRY_SPI);
4889
4890 /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
4891 * only 0-5mSec on 5V devices)
4892 */
4893 if (retry_count >= EEPROM_MAX_RETRY_SPI) {
4894 DEBUGOUT("SPI EEPROM Status error\n");
4895 return -E1000_ERR_EEPROM;
4896 }
4897
4898 return E1000_SUCCESS;
4899}
4900
4901/******************************************************************************
4902 * Reads a 16 bit word from the EEPROM.
4903 *
4904 * hw - Struct containing variables accessed by shared code
4905 * offset - offset of word in the EEPROM to read
4906 * data - word read from the EEPROM
4907 * words - number of words to read
4908 *****************************************************************************/
4909s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
4910{
4911 s32 ret;
4912 spin_lock(&e1000_eeprom_lock);
4913 ret = e1000_do_read_eeprom(hw, offset, words, data);
4914 spin_unlock(&e1000_eeprom_lock);
4915 return ret;
4916}
4917
4918static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
4919{
4920 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4921 u32 i = 0;
4922
4923 DEBUGFUNC("e1000_read_eeprom");
4924
4925 /* If eeprom is not yet detected, do so now */
4926 if (eeprom->word_size == 0)
4927 e1000_init_eeprom_params(hw);
4928
4929 /* A check for invalid values: offset too large, too many words, and not
4930 * enough words.
4931 */
4932 if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
4933 (words == 0)) {
4934 DEBUGOUT2("\"words\" parameter out of bounds. Words = %d, size = %d\n", offset, eeprom->word_size);
4935 return -E1000_ERR_EEPROM;
4936 }
4937
4938 /* EEPROM's that don't use EERD to read require us to bit-bang the SPI
4939 * directly. In this case, we need to acquire the EEPROM so that
4940 * FW or other port software does not interrupt.
4941 */
4942 if (e1000_is_onboard_nvm_eeprom(hw) && !hw->eeprom.use_eerd) {
4943 /* Prepare the EEPROM for bit-bang reading */
4944 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
4945 return -E1000_ERR_EEPROM;
4946 }
4947
4948 /* Eerd register EEPROM access requires no eeprom aquire/release */
4949 if (eeprom->use_eerd)
4950 return e1000_read_eeprom_eerd(hw, offset, words, data);
4951
4952 /* ICH EEPROM access is done via the ICH flash controller */
4953 if (eeprom->type == e1000_eeprom_ich8)
4954 return e1000_read_eeprom_ich8(hw, offset, words, data);
4955
4956 /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
4957 * acquired the EEPROM at this point, so any returns should relase it */
4958 if (eeprom->type == e1000_eeprom_spi) {
4959 u16 word_in;
4960 u8 read_opcode = EEPROM_READ_OPCODE_SPI;
4961
4962 if (e1000_spi_eeprom_ready(hw)) {
4963 e1000_release_eeprom(hw);
4964 return -E1000_ERR_EEPROM;
4965 }
4966
4967 e1000_standby_eeprom(hw);
4968
4969 /* Some SPI eeproms use the 8th address bit embedded in the opcode */
4970 if ((eeprom->address_bits == 8) && (offset >= 128))
4971 read_opcode |= EEPROM_A8_OPCODE_SPI;
4972
4973 /* Send the READ command (opcode + addr) */
4974 e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
4975 e1000_shift_out_ee_bits(hw, (u16)(offset*2), eeprom->address_bits);
4976
4977 /* Read the data. The address of the eeprom internally increments with
4978 * each byte (spi) being read, saving on the overhead of eeprom setup
4979 * and tear-down. The address counter will roll over if reading beyond
4980 * the size of the eeprom, thus allowing the entire memory to be read
4981 * starting from any offset. */
4982 for (i = 0; i < words; i++) {
4983 word_in = e1000_shift_in_ee_bits(hw, 16);
4984 data[i] = (word_in >> 8) | (word_in << 8);
4985 }
4986 } else if (eeprom->type == e1000_eeprom_microwire) {
4987 for (i = 0; i < words; i++) {
4988 /* Send the READ command (opcode + addr) */
4989 e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE,
4990 eeprom->opcode_bits);
4991 e1000_shift_out_ee_bits(hw, (u16)(offset + i),
4992 eeprom->address_bits);
4993
4994 /* Read the data. For microwire, each word requires the overhead
4995 * of eeprom setup and tear-down. */
4996 data[i] = e1000_shift_in_ee_bits(hw, 16);
4997 e1000_standby_eeprom(hw);
4998 }
4999 }
5000
5001 /* End this read operation */
5002 e1000_release_eeprom(hw);
5003
5004 return E1000_SUCCESS;
5005}
5006 3689
5007/****************************************************************************** 3690 DEBUGFUNC("e1000_spi_eeprom_ready");
5008 * Reads a 16 bit word from the EEPROM using the EERD register.
5009 *
5010 * hw - Struct containing variables accessed by shared code
5011 * offset - offset of word in the EEPROM to read
5012 * data - word read from the EEPROM
5013 * words - number of words to read
5014 *****************************************************************************/
5015static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
5016 u16 *data)
5017{
5018 u32 i, eerd = 0;
5019 s32 error = 0;
5020 3691
5021 for (i = 0; i < words; i++) { 3692 /* Read "Status Register" repeatedly until the LSB is cleared. The
5022 eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) + 3693 * EEPROM will signal that the command has been completed by clearing
5023 E1000_EEPROM_RW_REG_START; 3694 * bit 0 of the internal status register. If it's not cleared within
3695 * 5 milliseconds, then error out.
3696 */
3697 retry_count = 0;
3698 do {
3699 e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
3700 hw->eeprom.opcode_bits);
3701 spi_stat_reg = (u8) e1000_shift_in_ee_bits(hw, 8);
3702 if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
3703 break;
5024 3704
5025 ew32(EERD, eerd); 3705 udelay(5);
5026 error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ); 3706 retry_count += 5;
5027 3707
5028 if (error) { 3708 e1000_standby_eeprom(hw);
5029 break; 3709 } while (retry_count < EEPROM_MAX_RETRY_SPI);
5030 }
5031 data[i] = (er32(EERD) >> E1000_EEPROM_RW_REG_DATA);
5032 3710
5033 } 3711 /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
3712 * only 0-5mSec on 5V devices)
3713 */
3714 if (retry_count >= EEPROM_MAX_RETRY_SPI) {
3715 DEBUGOUT("SPI EEPROM Status error\n");
3716 return -E1000_ERR_EEPROM;
3717 }
5034 3718
5035 return error; 3719 return E1000_SUCCESS;
5036} 3720}
5037 3721
5038/****************************************************************************** 3722/**
5039 * Writes a 16 bit word from the EEPROM using the EEWR register. 3723 * e1000_read_eeprom - Reads a 16 bit word from the EEPROM.
5040 * 3724 * @hw: Struct containing variables accessed by shared code
5041 * hw - Struct containing variables accessed by shared code 3725 * @offset: offset of word in the EEPROM to read
5042 * offset - offset of word in the EEPROM to read 3726 * @data: word read from the EEPROM
5043 * data - word read from the EEPROM 3727 * @words: number of words to read
5044 * words - number of words to read 3728 */
5045 *****************************************************************************/ 3729s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
5046static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
5047 u16 *data)
5048{ 3730{
5049 u32 register_value = 0; 3731 s32 ret;
5050 u32 i = 0; 3732 spin_lock(&e1000_eeprom_lock);
5051 s32 error = 0; 3733 ret = e1000_do_read_eeprom(hw, offset, words, data);
5052 3734 spin_unlock(&e1000_eeprom_lock);
5053 if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) 3735 return ret;
5054 return -E1000_ERR_SWFW_SYNC;
5055
5056 for (i = 0; i < words; i++) {
5057 register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
5058 ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
5059 E1000_EEPROM_RW_REG_START;
5060
5061 error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
5062 if (error) {
5063 break;
5064 }
5065
5066 ew32(EEWR, register_value);
5067
5068 error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
5069
5070 if (error) {
5071 break;
5072 }
5073 }
5074
5075 e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
5076 return error;
5077} 3736}
5078 3737
5079/****************************************************************************** 3738static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
5080 * Polls the status bit (bit 1) of the EERD to determine when the read is done. 3739 u16 *data)
5081 *
5082 * hw - Struct containing variables accessed by shared code
5083 *****************************************************************************/
5084static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
5085{ 3740{
5086 u32 attempts = 100000; 3741 struct e1000_eeprom_info *eeprom = &hw->eeprom;
5087 u32 i, reg = 0; 3742 u32 i = 0;
5088 s32 done = E1000_ERR_EEPROM;
5089
5090 for (i = 0; i < attempts; i++) {
5091 if (eerd == E1000_EEPROM_POLL_READ)
5092 reg = er32(EERD);
5093 else
5094 reg = er32(EEWR);
5095
5096 if (reg & E1000_EEPROM_RW_REG_DONE) {
5097 done = E1000_SUCCESS;
5098 break;
5099 }
5100 udelay(5);
5101 }
5102
5103 return done;
5104}
5105 3743
5106/*************************************************************************** 3744 DEBUGFUNC("e1000_read_eeprom");
5107* Description: Determines if the onboard NVM is FLASH or EEPROM.
5108*
5109* hw - Struct containing variables accessed by shared code
5110****************************************************************************/
5111static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
5112{
5113 u32 eecd = 0;
5114 3745
5115 DEBUGFUNC("e1000_is_onboard_nvm_eeprom"); 3746 /* If eeprom is not yet detected, do so now */
3747 if (eeprom->word_size == 0)
3748 e1000_init_eeprom_params(hw);
3749
3750 /* A check for invalid values: offset too large, too many words, and not
3751 * enough words.
3752 */
3753 if ((offset >= eeprom->word_size)
3754 || (words > eeprom->word_size - offset) || (words == 0)) {
3755 DEBUGOUT2
3756 ("\"words\" parameter out of bounds. Words = %d, size = %d\n",
3757 offset, eeprom->word_size);
3758 return -E1000_ERR_EEPROM;
3759 }
5116 3760
5117 if (hw->mac_type == e1000_ich8lan) 3761 /* EEPROM's that don't use EERD to read require us to bit-bang the SPI
5118 return false; 3762 * directly. In this case, we need to acquire the EEPROM so that
3763 * FW or other port software does not interrupt.
3764 */
3765 /* Prepare the EEPROM for bit-bang reading */
3766 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
3767 return -E1000_ERR_EEPROM;
3768
3769 /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
3770 * acquired the EEPROM at this point, so any returns should release it */
3771 if (eeprom->type == e1000_eeprom_spi) {
3772 u16 word_in;
3773 u8 read_opcode = EEPROM_READ_OPCODE_SPI;
3774
3775 if (e1000_spi_eeprom_ready(hw)) {
3776 e1000_release_eeprom(hw);
3777 return -E1000_ERR_EEPROM;
3778 }
5119 3779
5120 if (hw->mac_type == e1000_82573) { 3780 e1000_standby_eeprom(hw);
5121 eecd = er32(EECD); 3781
3782 /* Some SPI eeproms use the 8th address bit embedded in the opcode */
3783 if ((eeprom->address_bits == 8) && (offset >= 128))
3784 read_opcode |= EEPROM_A8_OPCODE_SPI;
3785
3786 /* Send the READ command (opcode + addr) */
3787 e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
3788 e1000_shift_out_ee_bits(hw, (u16) (offset * 2),
3789 eeprom->address_bits);
3790
3791 /* Read the data. The address of the eeprom internally increments with
3792 * each byte (spi) being read, saving on the overhead of eeprom setup
3793 * and tear-down. The address counter will roll over if reading beyond
3794 * the size of the eeprom, thus allowing the entire memory to be read
3795 * starting from any offset. */
3796 for (i = 0; i < words; i++) {
3797 word_in = e1000_shift_in_ee_bits(hw, 16);
3798 data[i] = (word_in >> 8) | (word_in << 8);
3799 }
3800 } else if (eeprom->type == e1000_eeprom_microwire) {
3801 for (i = 0; i < words; i++) {
3802 /* Send the READ command (opcode + addr) */
3803 e1000_shift_out_ee_bits(hw,
3804 EEPROM_READ_OPCODE_MICROWIRE,
3805 eeprom->opcode_bits);
3806 e1000_shift_out_ee_bits(hw, (u16) (offset + i),
3807 eeprom->address_bits);
3808
3809 /* Read the data. For microwire, each word requires the overhead
3810 * of eeprom setup and tear-down. */
3811 data[i] = e1000_shift_in_ee_bits(hw, 16);
3812 e1000_standby_eeprom(hw);
3813 }
3814 }
5122 3815
5123 /* Isolate bits 15 & 16 */ 3816 /* End this read operation */
5124 eecd = ((eecd >> 15) & 0x03); 3817 e1000_release_eeprom(hw);
5125 3818
5126 /* If both bits are set, device is Flash type */ 3819 return E1000_SUCCESS;
5127 if (eecd == 0x03) {
5128 return false;
5129 }
5130 }
5131 return true;
5132} 3820}
5133 3821
5134/****************************************************************************** 3822/**
5135 * Verifies that the EEPROM has a valid checksum 3823 * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum
5136 * 3824 * @hw: Struct containing variables accessed by shared code
5137 * hw - Struct containing variables accessed by shared code
5138 * 3825 *
5139 * Reads the first 64 16 bit words of the EEPROM and sums the values read. 3826 * Reads the first 64 16 bit words of the EEPROM and sums the values read.
5140 * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is 3827 * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
5141 * valid. 3828 * valid.
5142 *****************************************************************************/ 3829 */
5143s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) 3830s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
5144{ 3831{
5145 u16 checksum = 0; 3832 u16 checksum = 0;
5146 u16 i, eeprom_data; 3833 u16 i, eeprom_data;
5147 3834
5148 DEBUGFUNC("e1000_validate_eeprom_checksum"); 3835 DEBUGFUNC("e1000_validate_eeprom_checksum");
5149 3836
5150 if ((hw->mac_type == e1000_82573) && !e1000_is_onboard_nvm_eeprom(hw)) { 3837 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
5151 /* Check bit 4 of word 10h. If it is 0, firmware is done updating 3838 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
5152 * 10h-12h. Checksum may need to be fixed. */ 3839 DEBUGOUT("EEPROM Read Error\n");
5153 e1000_read_eeprom(hw, 0x10, 1, &eeprom_data); 3840 return -E1000_ERR_EEPROM;
5154 if ((eeprom_data & 0x10) == 0) { 3841 }
5155 /* Read 0x23 and check bit 15. This bit is a 1 when the checksum 3842 checksum += eeprom_data;
5156 * has already been fixed. If the checksum is still wrong and this 3843 }
5157 * bit is a 1, we need to return bad checksum. Otherwise, we need 3844
5158 * to set this bit to a 1 and update the checksum. */ 3845 if (checksum == (u16) EEPROM_SUM)
5159 e1000_read_eeprom(hw, 0x23, 1, &eeprom_data); 3846 return E1000_SUCCESS;
5160 if ((eeprom_data & 0x8000) == 0) { 3847 else {
5161 eeprom_data |= 0x8000; 3848 DEBUGOUT("EEPROM Checksum Invalid\n");
5162 e1000_write_eeprom(hw, 0x23, 1, &eeprom_data); 3849 return -E1000_ERR_EEPROM;
5163 e1000_update_eeprom_checksum(hw); 3850 }
5164 }
5165 }
5166 }
5167
5168 if (hw->mac_type == e1000_ich8lan) {
5169 /* Drivers must allocate the shadow ram structure for the
5170 * EEPROM checksum to be updated. Otherwise, this bit as well
5171 * as the checksum must both be set correctly for this
5172 * validation to pass.
5173 */
5174 e1000_read_eeprom(hw, 0x19, 1, &eeprom_data);
5175 if ((eeprom_data & 0x40) == 0) {
5176 eeprom_data |= 0x40;
5177 e1000_write_eeprom(hw, 0x19, 1, &eeprom_data);
5178 e1000_update_eeprom_checksum(hw);
5179 }
5180 }
5181
5182 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
5183 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
5184 DEBUGOUT("EEPROM Read Error\n");
5185 return -E1000_ERR_EEPROM;
5186 }
5187 checksum += eeprom_data;
5188 }
5189
5190 if (checksum == (u16)EEPROM_SUM)
5191 return E1000_SUCCESS;
5192 else {
5193 DEBUGOUT("EEPROM Checksum Invalid\n");
5194 return -E1000_ERR_EEPROM;
5195 }
5196} 3851}
5197 3852
5198/****************************************************************************** 3853/**
5199 * Calculates the EEPROM checksum and writes it to the EEPROM 3854 * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum
5200 * 3855 * @hw: Struct containing variables accessed by shared code
5201 * hw - Struct containing variables accessed by shared code
5202 * 3856 *
5203 * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. 3857 * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
5204 * Writes the difference to word offset 63 of the EEPROM. 3858 * Writes the difference to word offset 63 of the EEPROM.
5205 *****************************************************************************/ 3859 */
5206s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) 3860s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
5207{ 3861{
5208 u32 ctrl_ext; 3862 u16 checksum = 0;
5209 u16 checksum = 0; 3863 u16 i, eeprom_data;
5210 u16 i, eeprom_data; 3864
5211 3865 DEBUGFUNC("e1000_update_eeprom_checksum");
5212 DEBUGFUNC("e1000_update_eeprom_checksum"); 3866
5213 3867 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
5214 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { 3868 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
5215 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { 3869 DEBUGOUT("EEPROM Read Error\n");
5216 DEBUGOUT("EEPROM Read Error\n"); 3870 return -E1000_ERR_EEPROM;
5217 return -E1000_ERR_EEPROM; 3871 }
5218 } 3872 checksum += eeprom_data;
5219 checksum += eeprom_data; 3873 }
5220 } 3874 checksum = (u16) EEPROM_SUM - checksum;
5221 checksum = (u16)EEPROM_SUM - checksum; 3875 if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
5222 if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { 3876 DEBUGOUT("EEPROM Write Error\n");
5223 DEBUGOUT("EEPROM Write Error\n"); 3877 return -E1000_ERR_EEPROM;
5224 return -E1000_ERR_EEPROM; 3878 }
5225 } else if (hw->eeprom.type == e1000_eeprom_flash) { 3879 return E1000_SUCCESS;
5226 e1000_commit_shadow_ram(hw);
5227 } else if (hw->eeprom.type == e1000_eeprom_ich8) {
5228 e1000_commit_shadow_ram(hw);
5229 /* Reload the EEPROM, or else modifications will not appear
5230 * until after next adapter reset. */
5231 ctrl_ext = er32(CTRL_EXT);
5232 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
5233 ew32(CTRL_EXT, ctrl_ext);
5234 msleep(10);
5235 }
5236 return E1000_SUCCESS;
5237} 3880}
5238 3881
5239/****************************************************************************** 3882/**
5240 * Parent function for writing words to the different EEPROM types. 3883 * e1000_write_eeprom - write words to the different EEPROM types.
5241 * 3884 * @hw: Struct containing variables accessed by shared code
5242 * hw - Struct containing variables accessed by shared code 3885 * @offset: offset within the EEPROM to be written to
5243 * offset - offset within the EEPROM to be written to 3886 * @words: number of words to write
5244 * words - number of words to write 3887 * @data: 16 bit word to be written to the EEPROM
5245 * data - 16 bit word to be written to the EEPROM
5246 * 3888 *
5247 * If e1000_update_eeprom_checksum is not called after this function, the 3889 * If e1000_update_eeprom_checksum is not called after this function, the
5248 * EEPROM will most likely contain an invalid checksum. 3890 * EEPROM will most likely contain an invalid checksum.
5249 *****************************************************************************/ 3891 */
5250s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) 3892s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
5251{ 3893{
5252 s32 ret; 3894 s32 ret;
5253 spin_lock(&e1000_eeprom_lock); 3895 spin_lock(&e1000_eeprom_lock);
5254 ret = e1000_do_write_eeprom(hw, offset, words, data); 3896 ret = e1000_do_write_eeprom(hw, offset, words, data);
5255 spin_unlock(&e1000_eeprom_lock); 3897 spin_unlock(&e1000_eeprom_lock);
5256 return ret; 3898 return ret;
5257} 3899}
5258 3900
5259 3901static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
5260static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) 3902 u16 *data)
5261{ 3903{
5262 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3904 struct e1000_eeprom_info *eeprom = &hw->eeprom;
5263 s32 status = 0; 3905 s32 status = 0;
5264 3906
5265 DEBUGFUNC("e1000_write_eeprom"); 3907 DEBUGFUNC("e1000_write_eeprom");
5266 3908
5267 /* If eeprom is not yet detected, do so now */ 3909 /* If eeprom is not yet detected, do so now */
5268 if (eeprom->word_size == 0) 3910 if (eeprom->word_size == 0)
5269 e1000_init_eeprom_params(hw); 3911 e1000_init_eeprom_params(hw);
5270 3912
5271 /* A check for invalid values: offset too large, too many words, and not 3913 /* A check for invalid values: offset too large, too many words, and not
5272 * enough words. 3914 * enough words.
5273 */ 3915 */
5274 if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) || 3916 if ((offset >= eeprom->word_size)
5275 (words == 0)) { 3917 || (words > eeprom->word_size - offset) || (words == 0)) {
5276 DEBUGOUT("\"words\" parameter out of bounds\n"); 3918 DEBUGOUT("\"words\" parameter out of bounds\n");
5277 return -E1000_ERR_EEPROM; 3919 return -E1000_ERR_EEPROM;
5278 } 3920 }
5279 3921
5280 /* 82573 writes only through eewr */ 3922 /* Prepare the EEPROM for writing */
5281 if (eeprom->use_eewr) 3923 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
5282 return e1000_write_eeprom_eewr(hw, offset, words, data); 3924 return -E1000_ERR_EEPROM;
5283 3925
5284 if (eeprom->type == e1000_eeprom_ich8) 3926 if (eeprom->type == e1000_eeprom_microwire) {
5285 return e1000_write_eeprom_ich8(hw, offset, words, data); 3927 status = e1000_write_eeprom_microwire(hw, offset, words, data);
5286 3928 } else {
5287 /* Prepare the EEPROM for writing */ 3929 status = e1000_write_eeprom_spi(hw, offset, words, data);
5288 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) 3930 msleep(10);
5289 return -E1000_ERR_EEPROM; 3931 }
5290 3932
5291 if (eeprom->type == e1000_eeprom_microwire) { 3933 /* Done with writing */
5292 status = e1000_write_eeprom_microwire(hw, offset, words, data); 3934 e1000_release_eeprom(hw);
5293 } else { 3935
5294 status = e1000_write_eeprom_spi(hw, offset, words, data); 3936 return status;
5295 msleep(10);
5296 }
5297
5298 /* Done with writing */
5299 e1000_release_eeprom(hw);
5300
5301 return status;
5302} 3937}
5303 3938
5304/****************************************************************************** 3939/**
5305 * Writes a 16 bit word to a given offset in an SPI EEPROM. 3940 * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM.
5306 * 3941 * @hw: Struct containing variables accessed by shared code
5307 * hw - Struct containing variables accessed by shared code 3942 * @offset: offset within the EEPROM to be written to
5308 * offset - offset within the EEPROM to be written to 3943 * @words: number of words to write
5309 * words - number of words to write 3944 * @data: pointer to array of 8 bit words to be written to the EEPROM
5310 * data - pointer to array of 8 bit words to be written to the EEPROM 3945 */
5311 *
5312 *****************************************************************************/
5313static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, 3946static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
5314 u16 *data) 3947 u16 *data)
5315{ 3948{
5316 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3949 struct e1000_eeprom_info *eeprom = &hw->eeprom;
5317 u16 widx = 0; 3950 u16 widx = 0;
5318 3951
5319 DEBUGFUNC("e1000_write_eeprom_spi"); 3952 DEBUGFUNC("e1000_write_eeprom_spi");
5320 3953
5321 while (widx < words) { 3954 while (widx < words) {
5322 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; 3955 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
5323 3956
5324 if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM; 3957 if (e1000_spi_eeprom_ready(hw))
3958 return -E1000_ERR_EEPROM;
5325 3959
5326 e1000_standby_eeprom(hw); 3960 e1000_standby_eeprom(hw);
5327 3961
5328 /* Send the WRITE ENABLE command (8 bit opcode ) */ 3962 /* Send the WRITE ENABLE command (8 bit opcode ) */
5329 e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, 3963 e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI,
5330 eeprom->opcode_bits); 3964 eeprom->opcode_bits);
5331 3965
5332 e1000_standby_eeprom(hw); 3966 e1000_standby_eeprom(hw);
5333 3967
5334 /* Some SPI eeproms use the 8th address bit embedded in the opcode */ 3968 /* Some SPI eeproms use the 8th address bit embedded in the opcode */
5335 if ((eeprom->address_bits == 8) && (offset >= 128)) 3969 if ((eeprom->address_bits == 8) && (offset >= 128))
5336 write_opcode |= EEPROM_A8_OPCODE_SPI; 3970 write_opcode |= EEPROM_A8_OPCODE_SPI;
5337 3971
5338 /* Send the Write command (8-bit opcode + addr) */ 3972 /* Send the Write command (8-bit opcode + addr) */
5339 e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); 3973 e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
5340 3974
5341 e1000_shift_out_ee_bits(hw, (u16)((offset + widx)*2), 3975 e1000_shift_out_ee_bits(hw, (u16) ((offset + widx) * 2),
5342 eeprom->address_bits); 3976 eeprom->address_bits);
5343 3977
5344 /* Send the data */ 3978 /* Send the data */
5345 3979
5346 /* Loop to allow for up to whole page write (32 bytes) of eeprom */ 3980 /* Loop to allow for up to whole page write (32 bytes) of eeprom */
5347 while (widx < words) { 3981 while (widx < words) {
5348 u16 word_out = data[widx]; 3982 u16 word_out = data[widx];
5349 word_out = (word_out >> 8) | (word_out << 8); 3983 word_out = (word_out >> 8) | (word_out << 8);
5350 e1000_shift_out_ee_bits(hw, word_out, 16); 3984 e1000_shift_out_ee_bits(hw, word_out, 16);
5351 widx++; 3985 widx++;
5352 3986
5353 /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE 3987 /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
5354 * operation, while the smaller eeproms are capable of an 8-byte 3988 * operation, while the smaller eeproms are capable of an 8-byte
5355 * PAGE WRITE operation. Break the inner loop to pass new address 3989 * PAGE WRITE operation. Break the inner loop to pass new address
5356 */ 3990 */
5357 if ((((offset + widx)*2) % eeprom->page_size) == 0) { 3991 if ((((offset + widx) * 2) % eeprom->page_size) == 0) {
5358 e1000_standby_eeprom(hw); 3992 e1000_standby_eeprom(hw);
5359 break; 3993 break;
5360 } 3994 }
5361 } 3995 }
5362 } 3996 }
5363 3997
5364 return E1000_SUCCESS; 3998 return E1000_SUCCESS;
5365} 3999}
5366 4000
5367/****************************************************************************** 4001/**
5368 * Writes a 16 bit word to a given offset in a Microwire EEPROM. 4002 * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM.
5369 * 4003 * @hw: Struct containing variables accessed by shared code
5370 * hw - Struct containing variables accessed by shared code 4004 * @offset: offset within the EEPROM to be written to
5371 * offset - offset within the EEPROM to be written to 4005 * @words: number of words to write
5372 * words - number of words to write 4006 * @data: pointer to array of 8 bit words to be written to the EEPROM
5373 * data - pointer to array of 16 bit words to be written to the EEPROM 4007 */
5374 *
5375 *****************************************************************************/
5376static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, 4008static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
5377 u16 words, u16 *data) 4009 u16 words, u16 *data)
5378{ 4010{
5379 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4011 struct e1000_eeprom_info *eeprom = &hw->eeprom;
5380 u32 eecd; 4012 u32 eecd;
5381 u16 words_written = 0; 4013 u16 words_written = 0;
5382 u16 i = 0; 4014 u16 i = 0;
5383
5384 DEBUGFUNC("e1000_write_eeprom_microwire");
5385
5386 /* Send the write enable command to the EEPROM (3-bit opcode plus
5387 * 6/8-bit dummy address beginning with 11). It's less work to include
5388 * the 11 of the dummy address as part of the opcode than it is to shift
5389 * it over the correct number of bits for the address. This puts the
5390 * EEPROM into write/erase mode.
5391 */
5392 e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
5393 (u16)(eeprom->opcode_bits + 2));
5394
5395 e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
5396
5397 /* Prepare the EEPROM */
5398 e1000_standby_eeprom(hw);
5399
5400 while (words_written < words) {
5401 /* Send the Write command (3-bit opcode + addr) */
5402 e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
5403 eeprom->opcode_bits);
5404
5405 e1000_shift_out_ee_bits(hw, (u16)(offset + words_written),
5406 eeprom->address_bits);
5407
5408 /* Send the data */
5409 e1000_shift_out_ee_bits(hw, data[words_written], 16);
5410
5411 /* Toggle the CS line. This in effect tells the EEPROM to execute
5412 * the previous command.
5413 */
5414 e1000_standby_eeprom(hw);
5415
5416 /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will
5417 * signal that the command has been completed by raising the DO signal.
5418 * If DO does not go high in 10 milliseconds, then error out.
5419 */
5420 for (i = 0; i < 200; i++) {
5421 eecd = er32(EECD);
5422 if (eecd & E1000_EECD_DO) break;
5423 udelay(50);
5424 }
5425 if (i == 200) {
5426 DEBUGOUT("EEPROM Write did not complete\n");
5427 return -E1000_ERR_EEPROM;
5428 }
5429
5430 /* Recover from write */
5431 e1000_standby_eeprom(hw);
5432
5433 words_written++;
5434 }
5435
5436 /* Send the write disable command to the EEPROM (3-bit opcode plus
5437 * 6/8-bit dummy address beginning with 10). It's less work to include
5438 * the 10 of the dummy address as part of the opcode than it is to shift
5439 * it over the correct number of bits for the address. This takes the
5440 * EEPROM out of write/erase mode.
5441 */
5442 e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
5443 (u16)(eeprom->opcode_bits + 2));
5444
5445 e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
5446
5447 return E1000_SUCCESS;
5448}
5449 4015
5450/****************************************************************************** 4016 DEBUGFUNC("e1000_write_eeprom_microwire");
5451 * Flushes the cached eeprom to NVM. This is done by saving the modified values 4017
5452 * in the eeprom cache and the non modified values in the currently active bank 4018 /* Send the write enable command to the EEPROM (3-bit opcode plus
5453 * to the new bank. 4019 * 6/8-bit dummy address beginning with 11). It's less work to include
5454 * 4020 * the 11 of the dummy address as part of the opcode than it is to shift
5455 * hw - Struct containing variables accessed by shared code 4021 * it over the correct number of bits for the address. This puts the
5456 * offset - offset of word in the EEPROM to read 4022 * EEPROM into write/erase mode.
5457 * data - word read from the EEPROM 4023 */
5458 * words - number of words to read 4024 e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
5459 *****************************************************************************/ 4025 (u16) (eeprom->opcode_bits + 2));
5460static s32 e1000_commit_shadow_ram(struct e1000_hw *hw) 4026
5461{ 4027 e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2));
5462 u32 attempts = 100000; 4028
5463 u32 eecd = 0; 4029 /* Prepare the EEPROM */
5464 u32 flop = 0; 4030 e1000_standby_eeprom(hw);
5465 u32 i = 0; 4031
5466 s32 error = E1000_SUCCESS; 4032 while (words_written < words) {
5467 u32 old_bank_offset = 0; 4033 /* Send the Write command (3-bit opcode + addr) */
5468 u32 new_bank_offset = 0; 4034 e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
5469 u8 low_byte = 0; 4035 eeprom->opcode_bits);
5470 u8 high_byte = 0; 4036
5471 bool sector_write_failed = false; 4037 e1000_shift_out_ee_bits(hw, (u16) (offset + words_written),
5472 4038 eeprom->address_bits);
5473 if (hw->mac_type == e1000_82573) { 4039
5474 /* The flop register will be used to determine if flash type is STM */ 4040 /* Send the data */
5475 flop = er32(FLOP); 4041 e1000_shift_out_ee_bits(hw, data[words_written], 16);
5476 for (i=0; i < attempts; i++) { 4042
5477 eecd = er32(EECD); 4043 /* Toggle the CS line. This in effect tells the EEPROM to execute
5478 if ((eecd & E1000_EECD_FLUPD) == 0) { 4044 * the previous command.
5479 break; 4045 */
5480 } 4046 e1000_standby_eeprom(hw);
5481 udelay(5); 4047
5482 } 4048 /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will
5483 4049 * signal that the command has been completed by raising the DO signal.
5484 if (i == attempts) { 4050 * If DO does not go high in 10 milliseconds, then error out.
5485 return -E1000_ERR_EEPROM; 4051 */
5486 } 4052 for (i = 0; i < 200; i++) {
5487 4053 eecd = er32(EECD);
5488 /* If STM opcode located in bits 15:8 of flop, reset firmware */ 4054 if (eecd & E1000_EECD_DO)
5489 if ((flop & 0xFF00) == E1000_STM_OPCODE) { 4055 break;
5490 ew32(HICR, E1000_HICR_FW_RESET); 4056 udelay(50);
5491 } 4057 }
5492 4058 if (i == 200) {
5493 /* Perform the flash update */ 4059 DEBUGOUT("EEPROM Write did not complete\n");
5494 ew32(EECD, eecd | E1000_EECD_FLUPD); 4060 return -E1000_ERR_EEPROM;
5495 4061 }
5496 for (i=0; i < attempts; i++) { 4062
5497 eecd = er32(EECD); 4063 /* Recover from write */
5498 if ((eecd & E1000_EECD_FLUPD) == 0) { 4064 e1000_standby_eeprom(hw);
5499 break; 4065
5500 } 4066 words_written++;
5501 udelay(5); 4067 }
5502 } 4068
5503 4069 /* Send the write disable command to the EEPROM (3-bit opcode plus
5504 if (i == attempts) { 4070 * 6/8-bit dummy address beginning with 10). It's less work to include
5505 return -E1000_ERR_EEPROM; 4071 * the 10 of the dummy address as part of the opcode than it is to shift
5506 } 4072 * it over the correct number of bits for the address. This takes the
5507 } 4073 * EEPROM out of write/erase mode.
5508 4074 */
5509 if (hw->mac_type == e1000_ich8lan && hw->eeprom_shadow_ram != NULL) { 4075 e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
5510 /* We're writing to the opposite bank so if we're on bank 1, 4076 (u16) (eeprom->opcode_bits + 2));
5511 * write to bank 0 etc. We also need to erase the segment that 4077
5512 * is going to be written */ 4078 e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2));
5513 if (!(er32(EECD) & E1000_EECD_SEC1VAL)) { 4079
5514 new_bank_offset = hw->flash_bank_size * 2; 4080 return E1000_SUCCESS;
5515 old_bank_offset = 0;
5516 e1000_erase_ich8_4k_segment(hw, 1);
5517 } else {
5518 old_bank_offset = hw->flash_bank_size * 2;
5519 new_bank_offset = 0;
5520 e1000_erase_ich8_4k_segment(hw, 0);
5521 }
5522
5523 sector_write_failed = false;
5524 /* Loop for every byte in the shadow RAM,
5525 * which is in units of words. */
5526 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
5527 /* Determine whether to write the value stored
5528 * in the other NVM bank or a modified value stored
5529 * in the shadow RAM */
5530 if (hw->eeprom_shadow_ram[i].modified) {
5531 low_byte = (u8)hw->eeprom_shadow_ram[i].eeprom_word;
5532 udelay(100);
5533 error = e1000_verify_write_ich8_byte(hw,
5534 (i << 1) + new_bank_offset, low_byte);
5535
5536 if (error != E1000_SUCCESS)
5537 sector_write_failed = true;
5538 else {
5539 high_byte =
5540 (u8)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
5541 udelay(100);
5542 }
5543 } else {
5544 e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
5545 &low_byte);
5546 udelay(100);
5547 error = e1000_verify_write_ich8_byte(hw,
5548 (i << 1) + new_bank_offset, low_byte);
5549
5550 if (error != E1000_SUCCESS)
5551 sector_write_failed = true;
5552 else {
5553 e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
5554 &high_byte);
5555 udelay(100);
5556 }
5557 }
5558
5559 /* If the write of the low byte was successful, go ahead and
5560 * write the high byte while checking to make sure that if it
5561 * is the signature byte, then it is handled properly */
5562 if (!sector_write_failed) {
5563 /* If the word is 0x13, then make sure the signature bits
5564 * (15:14) are 11b until the commit has completed.
5565 * This will allow us to write 10b which indicates the
5566 * signature is valid. We want to do this after the write
5567 * has completed so that we don't mark the segment valid
5568 * while the write is still in progress */
5569 if (i == E1000_ICH_NVM_SIG_WORD)
5570 high_byte = E1000_ICH_NVM_SIG_MASK | high_byte;
5571
5572 error = e1000_verify_write_ich8_byte(hw,
5573 (i << 1) + new_bank_offset + 1, high_byte);
5574 if (error != E1000_SUCCESS)
5575 sector_write_failed = true;
5576
5577 } else {
5578 /* If the write failed then break from the loop and
5579 * return an error */
5580 break;
5581 }
5582 }
5583
5584 /* Don't bother writing the segment valid bits if sector
5585 * programming failed. */
5586 if (!sector_write_failed) {
5587 /* Finally validate the new segment by setting bit 15:14
5588 * to 10b in word 0x13 , this can be done without an
5589 * erase as well since these bits are 11 to start with
5590 * and we need to change bit 14 to 0b */
5591 e1000_read_ich8_byte(hw,
5592 E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
5593 &high_byte);
5594 high_byte &= 0xBF;
5595 error = e1000_verify_write_ich8_byte(hw,
5596 E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset, high_byte);
5597 /* And invalidate the previously valid segment by setting
5598 * its signature word (0x13) high_byte to 0b. This can be
5599 * done without an erase because flash erase sets all bits
5600 * to 1's. We can write 1's to 0's without an erase */
5601 if (error == E1000_SUCCESS) {
5602 error = e1000_verify_write_ich8_byte(hw,
5603 E1000_ICH_NVM_SIG_WORD * 2 + 1 + old_bank_offset, 0);
5604 }
5605
5606 /* Clear the now not used entry in the cache */
5607 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
5608 hw->eeprom_shadow_ram[i].modified = false;
5609 hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
5610 }
5611 }
5612 }
5613
5614 return error;
5615} 4081}
5616 4082
5617/****************************************************************************** 4083/**
4084 * e1000_read_mac_addr - read the adapters MAC from eeprom
4085 * @hw: Struct containing variables accessed by shared code
4086 *
5618 * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the 4087 * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the
5619 * second function of dual function devices 4088 * second function of dual function devices
5620 * 4089 */
5621 * hw - Struct containing variables accessed by shared code
5622 *****************************************************************************/
5623s32 e1000_read_mac_addr(struct e1000_hw *hw) 4090s32 e1000_read_mac_addr(struct e1000_hw *hw)
5624{ 4091{
5625 u16 offset; 4092 u16 offset;
5626 u16 eeprom_data, i; 4093 u16 eeprom_data, i;
5627 4094
5628 DEBUGFUNC("e1000_read_mac_addr"); 4095 DEBUGFUNC("e1000_read_mac_addr");
5629 4096
5630 for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { 4097 for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
5631 offset = i >> 1; 4098 offset = i >> 1;
5632 if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { 4099 if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
5633 DEBUGOUT("EEPROM Read Error\n"); 4100 DEBUGOUT("EEPROM Read Error\n");
5634 return -E1000_ERR_EEPROM; 4101 return -E1000_ERR_EEPROM;
5635 } 4102 }
5636 hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF); 4103 hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF);
5637 hw->perm_mac_addr[i+1] = (u8)(eeprom_data >> 8); 4104 hw->perm_mac_addr[i + 1] = (u8) (eeprom_data >> 8);
5638 } 4105 }
5639 4106
5640 switch (hw->mac_type) { 4107 switch (hw->mac_type) {
5641 default: 4108 default:
5642 break; 4109 break;
5643 case e1000_82546: 4110 case e1000_82546:
5644 case e1000_82546_rev_3: 4111 case e1000_82546_rev_3:
5645 case e1000_82571: 4112 if (er32(STATUS) & E1000_STATUS_FUNC_1)
5646 case e1000_80003es2lan: 4113 hw->perm_mac_addr[5] ^= 0x01;
5647 if (er32(STATUS) & E1000_STATUS_FUNC_1) 4114 break;
5648 hw->perm_mac_addr[5] ^= 0x01; 4115 }
5649 break; 4116
5650 } 4117 for (i = 0; i < NODE_ADDRESS_SIZE; i++)
5651 4118 hw->mac_addr[i] = hw->perm_mac_addr[i];
5652 for (i = 0; i < NODE_ADDRESS_SIZE; i++) 4119 return E1000_SUCCESS;
5653 hw->mac_addr[i] = hw->perm_mac_addr[i];
5654 return E1000_SUCCESS;
5655} 4120}
5656 4121
5657/****************************************************************************** 4122/**
5658 * Initializes receive address filters. 4123 * e1000_init_rx_addrs - Initializes receive address filters.
5659 * 4124 * @hw: Struct containing variables accessed by shared code
5660 * hw - Struct containing variables accessed by shared code
5661 * 4125 *
5662 * Places the MAC address in receive address register 0 and clears the rest 4126 * Places the MAC address in receive address register 0 and clears the rest
5663 * of the receive addresss registers. Clears the multicast table. Assumes 4127 * of the receive address registers. Clears the multicast table. Assumes
5664 * the receiver is in reset when the routine is called. 4128 * the receiver is in reset when the routine is called.
5665 *****************************************************************************/ 4129 */
5666static void e1000_init_rx_addrs(struct e1000_hw *hw) 4130static void e1000_init_rx_addrs(struct e1000_hw *hw)
5667{ 4131{
5668 u32 i; 4132 u32 i;
5669 u32 rar_num; 4133 u32 rar_num;
5670 4134
5671 DEBUGFUNC("e1000_init_rx_addrs"); 4135 DEBUGFUNC("e1000_init_rx_addrs");
5672 4136
5673 /* Setup the receive address. */ 4137 /* Setup the receive address. */
5674 DEBUGOUT("Programming MAC Address into RAR[0]\n"); 4138 DEBUGOUT("Programming MAC Address into RAR[0]\n");
5675 4139
5676 e1000_rar_set(hw, hw->mac_addr, 0); 4140 e1000_rar_set(hw, hw->mac_addr, 0);
5677 4141
5678 rar_num = E1000_RAR_ENTRIES; 4142 rar_num = E1000_RAR_ENTRIES;
5679 4143
5680 /* Reserve a spot for the Locally Administered Address to work around 4144 /* Zero out the other 15 receive addresses. */
5681 * an 82571 issue in which a reset on one port will reload the MAC on 4145 DEBUGOUT("Clearing RAR[1-15]\n");
5682 * the other port. */ 4146 for (i = 1; i < rar_num; i++) {
5683 if ((hw->mac_type == e1000_82571) && (hw->laa_is_present)) 4147 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
5684 rar_num -= 1; 4148 E1000_WRITE_FLUSH();
5685 if (hw->mac_type == e1000_ich8lan) 4149 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
5686 rar_num = E1000_RAR_ENTRIES_ICH8LAN; 4150 E1000_WRITE_FLUSH();
5687 4151 }
5688 /* Zero out the other 15 receive addresses. */
5689 DEBUGOUT("Clearing RAR[1-15]\n");
5690 for (i = 1; i < rar_num; i++) {
5691 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
5692 E1000_WRITE_FLUSH();
5693 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
5694 E1000_WRITE_FLUSH();
5695 }
5696} 4152}
5697 4153
5698/****************************************************************************** 4154/**
5699 * Hashes an address to determine its location in the multicast table 4155 * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table
5700 * 4156 * @hw: Struct containing variables accessed by shared code
5701 * hw - Struct containing variables accessed by shared code 4157 * @mc_addr: the multicast address to hash
5702 * mc_addr - the multicast address to hash 4158 */
5703 *****************************************************************************/
5704u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) 4159u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
5705{ 4160{
5706 u32 hash_value = 0; 4161 u32 hash_value = 0;
5707 4162
5708 /* The portion of the address that is used for the hash table is 4163 /* The portion of the address that is used for the hash table is
5709 * determined by the mc_filter_type setting. 4164 * determined by the mc_filter_type setting.
5710 */ 4165 */
5711 switch (hw->mc_filter_type) { 4166 switch (hw->mc_filter_type) {
5712 /* [0] [1] [2] [3] [4] [5] 4167 /* [0] [1] [2] [3] [4] [5]
5713 * 01 AA 00 12 34 56 4168 * 01 AA 00 12 34 56
5714 * LSB MSB 4169 * LSB MSB
5715 */ 4170 */
5716 case 0: 4171 case 0:
5717 if (hw->mac_type == e1000_ich8lan) { 4172 /* [47:36] i.e. 0x563 for above example address */
5718 /* [47:38] i.e. 0x158 for above example address */ 4173 hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
5719 hash_value = ((mc_addr[4] >> 6) | (((u16)mc_addr[5]) << 2)); 4174 break;
5720 } else { 4175 case 1:
5721 /* [47:36] i.e. 0x563 for above example address */ 4176 /* [46:35] i.e. 0xAC6 for above example address */
5722 hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 4177 hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
5723 } 4178 break;
5724 break; 4179 case 2:
5725 case 1: 4180 /* [45:34] i.e. 0x5D8 for above example address */
5726 if (hw->mac_type == e1000_ich8lan) { 4181 hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
5727 /* [46:37] i.e. 0x2B1 for above example address */ 4182 break;
5728 hash_value = ((mc_addr[4] >> 5) | (((u16)mc_addr[5]) << 3)); 4183 case 3:
5729 } else { 4184 /* [43:32] i.e. 0x634 for above example address */
5730 /* [46:35] i.e. 0xAC6 for above example address */ 4185 hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
5731 hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 4186 break;
5732 } 4187 }
5733 break; 4188
5734 case 2: 4189 hash_value &= 0xFFF;
5735 if (hw->mac_type == e1000_ich8lan) { 4190 return hash_value;
5736 /*[45:36] i.e. 0x163 for above example address */
5737 hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
5738 } else {
5739 /* [45:34] i.e. 0x5D8 for above example address */
5740 hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
5741 }
5742 break;
5743 case 3:
5744 if (hw->mac_type == e1000_ich8lan) {
5745 /* [43:34] i.e. 0x18D for above example address */
5746 hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
5747 } else {
5748 /* [43:32] i.e. 0x634 for above example address */
5749 hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
5750 }
5751 break;
5752 }
5753
5754 hash_value &= 0xFFF;
5755 if (hw->mac_type == e1000_ich8lan)
5756 hash_value &= 0x3FF;
5757
5758 return hash_value;
5759} 4191}
5760 4192
5761/****************************************************************************** 4193/**
5762 * Puts an ethernet address into a receive address register. 4194 * e1000_rar_set - Puts an ethernet address into a receive address register.
5763 * 4195 * @hw: Struct containing variables accessed by shared code
5764 * hw - Struct containing variables accessed by shared code 4196 * @addr: Address to put into receive address register
5765 * addr - Address to put into receive address register 4197 * @index: Receive address register to write
5766 * index - Receive address register to write 4198 */
5767 *****************************************************************************/
5768void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) 4199void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
5769{ 4200{
5770 u32 rar_low, rar_high; 4201 u32 rar_low, rar_high;
5771 4202
5772 /* HW expects these in little endian so we reverse the byte order 4203 /* HW expects these in little endian so we reverse the byte order
5773 * from network order (big endian) to little endian 4204 * from network order (big endian) to little endian
5774 */ 4205 */
5775 rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | 4206 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
5776 ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); 4207 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5777 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); 4208 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5778 4209
5779 /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx 4210 /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
5780 * unit hang. 4211 * unit hang.
5781 * 4212 *
5782 * Description: 4213 * Description:
5783 * If there are any Rx frames queued up or otherwise present in the HW 4214 * If there are any Rx frames queued up or otherwise present in the HW
5784 * before RSS is enabled, and then we enable RSS, the HW Rx unit will 4215 * before RSS is enabled, and then we enable RSS, the HW Rx unit will
5785 * hang. To work around this issue, we have to disable receives and 4216 * hang. To work around this issue, we have to disable receives and
5786 * flush out all Rx frames before we enable RSS. To do so, we modify we 4217 * flush out all Rx frames before we enable RSS. To do so, we modify we
5787 * redirect all Rx traffic to manageability and then reset the HW. 4218 * redirect all Rx traffic to manageability and then reset the HW.
5788 * This flushes away Rx frames, and (since the redirections to 4219 * This flushes away Rx frames, and (since the redirections to
5789 * manageability persists across resets) keeps new ones from coming in 4220 * manageability persists across resets) keeps new ones from coming in
5790 * while we work. Then, we clear the Address Valid AV bit for all MAC 4221 * while we work. Then, we clear the Address Valid AV bit for all MAC
5791 * addresses and undo the re-direction to manageability. 4222 * addresses and undo the re-direction to manageability.
5792 * Now, frames are coming in again, but the MAC won't accept them, so 4223 * Now, frames are coming in again, but the MAC won't accept them, so
5793 * far so good. We now proceed to initialize RSS (if necessary) and 4224 * far so good. We now proceed to initialize RSS (if necessary) and
5794 * configure the Rx unit. Last, we re-enable the AV bits and continue 4225 * configure the Rx unit. Last, we re-enable the AV bits and continue
5795 * on our merry way. 4226 * on our merry way.
5796 */ 4227 */
5797 switch (hw->mac_type) { 4228 switch (hw->mac_type) {
5798 case e1000_82571: 4229 default:
5799 case e1000_82572: 4230 /* Indicate to hardware the Address is Valid. */
5800 case e1000_80003es2lan: 4231 rar_high |= E1000_RAH_AV;
5801 if (hw->leave_av_bit_off) 4232 break;
5802 break; 4233 }
5803 default: 4234
5804 /* Indicate to hardware the Address is Valid. */ 4235 E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
5805 rar_high |= E1000_RAH_AV; 4236 E1000_WRITE_FLUSH();
5806 break; 4237 E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
5807 } 4238 E1000_WRITE_FLUSH();
5808
5809 E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
5810 E1000_WRITE_FLUSH();
5811 E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
5812 E1000_WRITE_FLUSH();
5813} 4239}
5814 4240
5815/****************************************************************************** 4241/**
5816 * Writes a value to the specified offset in the VLAN filter table. 4242 * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table.
5817 * 4243 * @hw: Struct containing variables accessed by shared code
5818 * hw - Struct containing variables accessed by shared code 4244 * @offset: Offset in VLAN filer table to write
5819 * offset - Offset in VLAN filer table to write 4245 * @value: Value to write into VLAN filter table
5820 * value - Value to write into VLAN filter table 4246 */
5821 *****************************************************************************/
5822void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) 4247void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
5823{ 4248{
5824 u32 temp; 4249 u32 temp;
5825 4250
5826 if (hw->mac_type == e1000_ich8lan) 4251 if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
5827 return; 4252 temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
5828 4253 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
5829 if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { 4254 E1000_WRITE_FLUSH();
5830 temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); 4255 E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
5831 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); 4256 E1000_WRITE_FLUSH();
5832 E1000_WRITE_FLUSH(); 4257 } else {
5833 E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); 4258 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
5834 E1000_WRITE_FLUSH(); 4259 E1000_WRITE_FLUSH();
5835 } else { 4260 }
5836 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
5837 E1000_WRITE_FLUSH();
5838 }
5839} 4261}
5840 4262
5841/****************************************************************************** 4263/**
5842 * Clears the VLAN filer table 4264 * e1000_clear_vfta - Clears the VLAN filer table
5843 * 4265 * @hw: Struct containing variables accessed by shared code
5844 * hw - Struct containing variables accessed by shared code 4266 */
5845 *****************************************************************************/
5846static void e1000_clear_vfta(struct e1000_hw *hw) 4267static void e1000_clear_vfta(struct e1000_hw *hw)
5847{ 4268{
5848 u32 offset; 4269 u32 offset;
5849 u32 vfta_value = 0; 4270 u32 vfta_value = 0;
5850 u32 vfta_offset = 0; 4271 u32 vfta_offset = 0;
5851 u32 vfta_bit_in_reg = 0; 4272 u32 vfta_bit_in_reg = 0;
5852 4273
5853 if (hw->mac_type == e1000_ich8lan) 4274 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
5854 return; 4275 /* If the offset we want to clear is the same offset of the
5855 4276 * manageability VLAN ID, then clear all bits except that of the
5856 if (hw->mac_type == e1000_82573) { 4277 * manageability unit */
5857 if (hw->mng_cookie.vlan_id != 0) { 4278 vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
5858 /* The VFTA is a 4096b bit-field, each identifying a single VLAN 4279 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
5859 * ID. The following operations determine which 32b entry 4280 E1000_WRITE_FLUSH();
5860 * (i.e. offset) into the array we want to set the VLAN ID 4281 }
5861 * (i.e. bit) of the manageability unit. */
5862 vfta_offset = (hw->mng_cookie.vlan_id >>
5863 E1000_VFTA_ENTRY_SHIFT) &
5864 E1000_VFTA_ENTRY_MASK;
5865 vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
5866 E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
5867 }
5868 }
5869 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
5870 /* If the offset we want to clear is the same offset of the
5871 * manageability VLAN ID, then clear all bits except that of the
5872 * manageability unit */
5873 vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
5874 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
5875 E1000_WRITE_FLUSH();
5876 }
5877} 4282}
5878 4283
5879static s32 e1000_id_led_init(struct e1000_hw *hw) 4284static s32 e1000_id_led_init(struct e1000_hw *hw)
5880{ 4285{
5881 u32 ledctl; 4286 u32 ledctl;
5882 const u32 ledctl_mask = 0x000000FF; 4287 const u32 ledctl_mask = 0x000000FF;
5883 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; 4288 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
5884 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; 4289 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
5885 u16 eeprom_data, i, temp; 4290 u16 eeprom_data, i, temp;
5886 const u16 led_mask = 0x0F; 4291 const u16 led_mask = 0x0F;
5887 4292
5888 DEBUGFUNC("e1000_id_led_init"); 4293 DEBUGFUNC("e1000_id_led_init");
5889 4294
5890 if (hw->mac_type < e1000_82540) { 4295 if (hw->mac_type < e1000_82540) {
5891 /* Nothing to do */ 4296 /* Nothing to do */
5892 return E1000_SUCCESS; 4297 return E1000_SUCCESS;
5893 } 4298 }
5894 4299
5895 ledctl = er32(LEDCTL); 4300 ledctl = er32(LEDCTL);
5896 hw->ledctl_default = ledctl; 4301 hw->ledctl_default = ledctl;
5897 hw->ledctl_mode1 = hw->ledctl_default; 4302 hw->ledctl_mode1 = hw->ledctl_default;
5898 hw->ledctl_mode2 = hw->ledctl_default; 4303 hw->ledctl_mode2 = hw->ledctl_default;
5899 4304
5900 if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { 4305 if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
5901 DEBUGOUT("EEPROM Read Error\n"); 4306 DEBUGOUT("EEPROM Read Error\n");
5902 return -E1000_ERR_EEPROM; 4307 return -E1000_ERR_EEPROM;
5903 } 4308 }
5904 4309
5905 if ((hw->mac_type == e1000_82573) && 4310 if ((eeprom_data == ID_LED_RESERVED_0000) ||
5906 (eeprom_data == ID_LED_RESERVED_82573)) 4311 (eeprom_data == ID_LED_RESERVED_FFFF)) {
5907 eeprom_data = ID_LED_DEFAULT_82573; 4312 eeprom_data = ID_LED_DEFAULT;
5908 else if ((eeprom_data == ID_LED_RESERVED_0000) || 4313 }
5909 (eeprom_data == ID_LED_RESERVED_FFFF)) { 4314
5910 if (hw->mac_type == e1000_ich8lan) 4315 for (i = 0; i < 4; i++) {
5911 eeprom_data = ID_LED_DEFAULT_ICH8LAN; 4316 temp = (eeprom_data >> (i << 2)) & led_mask;
5912 else 4317 switch (temp) {
5913 eeprom_data = ID_LED_DEFAULT; 4318 case ID_LED_ON1_DEF2:
5914 } 4319 case ID_LED_ON1_ON2:
5915 4320 case ID_LED_ON1_OFF2:
5916 for (i = 0; i < 4; i++) { 4321 hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
5917 temp = (eeprom_data >> (i << 2)) & led_mask; 4322 hw->ledctl_mode1 |= ledctl_on << (i << 3);
5918 switch (temp) { 4323 break;
5919 case ID_LED_ON1_DEF2: 4324 case ID_LED_OFF1_DEF2:
5920 case ID_LED_ON1_ON2: 4325 case ID_LED_OFF1_ON2:
5921 case ID_LED_ON1_OFF2: 4326 case ID_LED_OFF1_OFF2:
5922 hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); 4327 hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
5923 hw->ledctl_mode1 |= ledctl_on << (i << 3); 4328 hw->ledctl_mode1 |= ledctl_off << (i << 3);
5924 break; 4329 break;
5925 case ID_LED_OFF1_DEF2: 4330 default:
5926 case ID_LED_OFF1_ON2: 4331 /* Do nothing */
5927 case ID_LED_OFF1_OFF2: 4332 break;
5928 hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); 4333 }
5929 hw->ledctl_mode1 |= ledctl_off << (i << 3); 4334 switch (temp) {
5930 break; 4335 case ID_LED_DEF1_ON2:
5931 default: 4336 case ID_LED_ON1_ON2:
5932 /* Do nothing */ 4337 case ID_LED_OFF1_ON2:
5933 break; 4338 hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
5934 } 4339 hw->ledctl_mode2 |= ledctl_on << (i << 3);
5935 switch (temp) { 4340 break;
5936 case ID_LED_DEF1_ON2: 4341 case ID_LED_DEF1_OFF2:
5937 case ID_LED_ON1_ON2: 4342 case ID_LED_ON1_OFF2:
5938 case ID_LED_OFF1_ON2: 4343 case ID_LED_OFF1_OFF2:
5939 hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); 4344 hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
5940 hw->ledctl_mode2 |= ledctl_on << (i << 3); 4345 hw->ledctl_mode2 |= ledctl_off << (i << 3);
5941 break; 4346 break;
5942 case ID_LED_DEF1_OFF2: 4347 default:
5943 case ID_LED_ON1_OFF2: 4348 /* Do nothing */
5944 case ID_LED_OFF1_OFF2: 4349 break;
5945 hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); 4350 }
5946 hw->ledctl_mode2 |= ledctl_off << (i << 3); 4351 }
5947 break; 4352 return E1000_SUCCESS;
5948 default:
5949 /* Do nothing */
5950 break;
5951 }
5952 }
5953 return E1000_SUCCESS;
5954} 4353}
5955 4354
5956/****************************************************************************** 4355/**
5957 * Prepares SW controlable LED for use and saves the current state of the LED. 4356 * e1000_setup_led
4357 * @hw: Struct containing variables accessed by shared code
5958 * 4358 *
5959 * hw - Struct containing variables accessed by shared code 4359 * Prepares SW controlable LED for use and saves the current state of the LED.
5960 *****************************************************************************/ 4360 */
5961s32 e1000_setup_led(struct e1000_hw *hw) 4361s32 e1000_setup_led(struct e1000_hw *hw)
5962{ 4362{
5963 u32 ledctl; 4363 u32 ledctl;
5964 s32 ret_val = E1000_SUCCESS; 4364 s32 ret_val = E1000_SUCCESS;
5965
5966 DEBUGFUNC("e1000_setup_led");
5967
5968 switch (hw->mac_type) {
5969 case e1000_82542_rev2_0:
5970 case e1000_82542_rev2_1:
5971 case e1000_82543:
5972 case e1000_82544:
5973 /* No setup necessary */
5974 break;
5975 case e1000_82541:
5976 case e1000_82547:
5977 case e1000_82541_rev_2:
5978 case e1000_82547_rev_2:
5979 /* Turn off PHY Smart Power Down (if enabled) */
5980 ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
5981 &hw->phy_spd_default);
5982 if (ret_val)
5983 return ret_val;
5984 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
5985 (u16)(hw->phy_spd_default &
5986 ~IGP01E1000_GMII_SPD));
5987 if (ret_val)
5988 return ret_val;
5989 /* Fall Through */
5990 default:
5991 if (hw->media_type == e1000_media_type_fiber) {
5992 ledctl = er32(LEDCTL);
5993 /* Save current LEDCTL settings */
5994 hw->ledctl_default = ledctl;
5995 /* Turn off LED0 */
5996 ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
5997 E1000_LEDCTL_LED0_BLINK |
5998 E1000_LEDCTL_LED0_MODE_MASK);
5999 ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
6000 E1000_LEDCTL_LED0_MODE_SHIFT);
6001 ew32(LEDCTL, ledctl);
6002 } else if (hw->media_type == e1000_media_type_copper)
6003 ew32(LEDCTL, hw->ledctl_mode1);
6004 break;
6005 }
6006
6007 return E1000_SUCCESS;
6008}
6009 4365
4366 DEBUGFUNC("e1000_setup_led");
6010 4367
6011/****************************************************************************** 4368 switch (hw->mac_type) {
6012 * Used on 82571 and later Si that has LED blink bits. 4369 case e1000_82542_rev2_0:
6013 * Callers must use their own timer and should have already called 4370 case e1000_82542_rev2_1:
6014 * e1000_id_led_init() 4371 case e1000_82543:
6015 * Call e1000_cleanup led() to stop blinking 4372 case e1000_82544:
6016 * 4373 /* No setup necessary */
6017 * hw - Struct containing variables accessed by shared code 4374 break;
6018 *****************************************************************************/ 4375 case e1000_82541:
6019s32 e1000_blink_led_start(struct e1000_hw *hw) 4376 case e1000_82547:
6020{ 4377 case e1000_82541_rev_2:
6021 s16 i; 4378 case e1000_82547_rev_2:
6022 u32 ledctl_blink = 0; 4379 /* Turn off PHY Smart Power Down (if enabled) */
6023 4380 ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
6024 DEBUGFUNC("e1000_id_led_blink_on"); 4381 &hw->phy_spd_default);
6025 4382 if (ret_val)
6026 if (hw->mac_type < e1000_82571) { 4383 return ret_val;
6027 /* Nothing to do */ 4384 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
6028 return E1000_SUCCESS; 4385 (u16) (hw->phy_spd_default &
6029 } 4386 ~IGP01E1000_GMII_SPD));
6030 if (hw->media_type == e1000_media_type_fiber) { 4387 if (ret_val)
6031 /* always blink LED0 for PCI-E fiber */ 4388 return ret_val;
6032 ledctl_blink = E1000_LEDCTL_LED0_BLINK | 4389 /* Fall Through */
6033 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); 4390 default:
6034 } else { 4391 if (hw->media_type == e1000_media_type_fiber) {
6035 /* set the blink bit for each LED that's "on" (0x0E) in ledctl_mode2 */ 4392 ledctl = er32(LEDCTL);
6036 ledctl_blink = hw->ledctl_mode2; 4393 /* Save current LEDCTL settings */
6037 for (i=0; i < 4; i++) 4394 hw->ledctl_default = ledctl;
6038 if (((hw->ledctl_mode2 >> (i * 8)) & 0xFF) == 4395 /* Turn off LED0 */
6039 E1000_LEDCTL_MODE_LED_ON) 4396 ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
6040 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8)); 4397 E1000_LEDCTL_LED0_BLINK |
6041 } 4398 E1000_LEDCTL_LED0_MODE_MASK);
6042 4399 ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
6043 ew32(LEDCTL, ledctl_blink); 4400 E1000_LEDCTL_LED0_MODE_SHIFT);
6044 4401 ew32(LEDCTL, ledctl);
6045 return E1000_SUCCESS; 4402 } else if (hw->media_type == e1000_media_type_copper)
4403 ew32(LEDCTL, hw->ledctl_mode1);
4404 break;
4405 }
4406
4407 return E1000_SUCCESS;
6046} 4408}
6047 4409
6048/****************************************************************************** 4410/**
6049 * Restores the saved state of the SW controlable LED. 4411 * e1000_cleanup_led - Restores the saved state of the SW controlable LED.
6050 * 4412 * @hw: Struct containing variables accessed by shared code
6051 * hw - Struct containing variables accessed by shared code 4413 */
6052 *****************************************************************************/
6053s32 e1000_cleanup_led(struct e1000_hw *hw) 4414s32 e1000_cleanup_led(struct e1000_hw *hw)
6054{ 4415{
6055 s32 ret_val = E1000_SUCCESS; 4416 s32 ret_val = E1000_SUCCESS;
6056 4417
6057 DEBUGFUNC("e1000_cleanup_led"); 4418 DEBUGFUNC("e1000_cleanup_led");
6058 4419
6059 switch (hw->mac_type) { 4420 switch (hw->mac_type) {
6060 case e1000_82542_rev2_0: 4421 case e1000_82542_rev2_0:
6061 case e1000_82542_rev2_1: 4422 case e1000_82542_rev2_1:
6062 case e1000_82543: 4423 case e1000_82543:
6063 case e1000_82544: 4424 case e1000_82544:
6064 /* No cleanup necessary */ 4425 /* No cleanup necessary */
6065 break; 4426 break;
6066 case e1000_82541: 4427 case e1000_82541:
6067 case e1000_82547: 4428 case e1000_82547:
6068 case e1000_82541_rev_2: 4429 case e1000_82541_rev_2:
6069 case e1000_82547_rev_2: 4430 case e1000_82547_rev_2:
6070 /* Turn on PHY Smart Power Down (if previously enabled) */ 4431 /* Turn on PHY Smart Power Down (if previously enabled) */
6071 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, 4432 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
6072 hw->phy_spd_default); 4433 hw->phy_spd_default);
6073 if (ret_val) 4434 if (ret_val)
6074 return ret_val; 4435 return ret_val;
6075 /* Fall Through */ 4436 /* Fall Through */
6076 default: 4437 default:
6077 if (hw->phy_type == e1000_phy_ife) { 4438 /* Restore LEDCTL settings */
6078 e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); 4439 ew32(LEDCTL, hw->ledctl_default);
6079 break; 4440 break;
6080 } 4441 }
6081 /* Restore LEDCTL settings */ 4442
6082 ew32(LEDCTL, hw->ledctl_default); 4443 return E1000_SUCCESS;
6083 break;
6084 }
6085
6086 return E1000_SUCCESS;
6087} 4444}
6088 4445
6089/****************************************************************************** 4446/**
6090 * Turns on the software controllable LED 4447 * e1000_led_on - Turns on the software controllable LED
6091 * 4448 * @hw: Struct containing variables accessed by shared code
6092 * hw - Struct containing variables accessed by shared code 4449 */
6093 *****************************************************************************/
6094s32 e1000_led_on(struct e1000_hw *hw) 4450s32 e1000_led_on(struct e1000_hw *hw)
6095{ 4451{
6096 u32 ctrl = er32(CTRL); 4452 u32 ctrl = er32(CTRL);
6097 4453
6098 DEBUGFUNC("e1000_led_on"); 4454 DEBUGFUNC("e1000_led_on");
6099 4455
6100 switch (hw->mac_type) { 4456 switch (hw->mac_type) {
6101 case e1000_82542_rev2_0: 4457 case e1000_82542_rev2_0:
6102 case e1000_82542_rev2_1: 4458 case e1000_82542_rev2_1:
6103 case e1000_82543: 4459 case e1000_82543:
6104 /* Set SW Defineable Pin 0 to turn on the LED */ 4460 /* Set SW Defineable Pin 0 to turn on the LED */
6105 ctrl |= E1000_CTRL_SWDPIN0; 4461 ctrl |= E1000_CTRL_SWDPIN0;
6106 ctrl |= E1000_CTRL_SWDPIO0; 4462 ctrl |= E1000_CTRL_SWDPIO0;
6107 break; 4463 break;
6108 case e1000_82544: 4464 case e1000_82544:
6109 if (hw->media_type == e1000_media_type_fiber) { 4465 if (hw->media_type == e1000_media_type_fiber) {
6110 /* Set SW Defineable Pin 0 to turn on the LED */ 4466 /* Set SW Defineable Pin 0 to turn on the LED */
6111 ctrl |= E1000_CTRL_SWDPIN0; 4467 ctrl |= E1000_CTRL_SWDPIN0;
6112 ctrl |= E1000_CTRL_SWDPIO0; 4468 ctrl |= E1000_CTRL_SWDPIO0;
6113 } else { 4469 } else {
6114 /* Clear SW Defineable Pin 0 to turn on the LED */ 4470 /* Clear SW Defineable Pin 0 to turn on the LED */
6115 ctrl &= ~E1000_CTRL_SWDPIN0; 4471 ctrl &= ~E1000_CTRL_SWDPIN0;
6116 ctrl |= E1000_CTRL_SWDPIO0; 4472 ctrl |= E1000_CTRL_SWDPIO0;
6117 } 4473 }
6118 break; 4474 break;
6119 default: 4475 default:
6120 if (hw->media_type == e1000_media_type_fiber) { 4476 if (hw->media_type == e1000_media_type_fiber) {
6121 /* Clear SW Defineable Pin 0 to turn on the LED */ 4477 /* Clear SW Defineable Pin 0 to turn on the LED */
6122 ctrl &= ~E1000_CTRL_SWDPIN0; 4478 ctrl &= ~E1000_CTRL_SWDPIN0;
6123 ctrl |= E1000_CTRL_SWDPIO0; 4479 ctrl |= E1000_CTRL_SWDPIO0;
6124 } else if (hw->phy_type == e1000_phy_ife) { 4480 } else if (hw->media_type == e1000_media_type_copper) {
6125 e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 4481 ew32(LEDCTL, hw->ledctl_mode2);
6126 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); 4482 return E1000_SUCCESS;
6127 } else if (hw->media_type == e1000_media_type_copper) { 4483 }
6128 ew32(LEDCTL, hw->ledctl_mode2); 4484 break;
6129 return E1000_SUCCESS; 4485 }
6130 } 4486
6131 break; 4487 ew32(CTRL, ctrl);
6132 } 4488
6133 4489 return E1000_SUCCESS;
6134 ew32(CTRL, ctrl);
6135
6136 return E1000_SUCCESS;
6137} 4490}
6138 4491
6139/****************************************************************************** 4492/**
6140 * Turns off the software controllable LED 4493 * e1000_led_off - Turns off the software controllable LED
6141 * 4494 * @hw: Struct containing variables accessed by shared code
6142 * hw - Struct containing variables accessed by shared code 4495 */
6143 *****************************************************************************/
6144s32 e1000_led_off(struct e1000_hw *hw) 4496s32 e1000_led_off(struct e1000_hw *hw)
6145{ 4497{
6146 u32 ctrl = er32(CTRL); 4498 u32 ctrl = er32(CTRL);
6147 4499
6148 DEBUGFUNC("e1000_led_off"); 4500 DEBUGFUNC("e1000_led_off");
6149 4501
6150 switch (hw->mac_type) { 4502 switch (hw->mac_type) {
6151 case e1000_82542_rev2_0: 4503 case e1000_82542_rev2_0:
6152 case e1000_82542_rev2_1: 4504 case e1000_82542_rev2_1:
6153 case e1000_82543: 4505 case e1000_82543:
6154 /* Clear SW Defineable Pin 0 to turn off the LED */ 4506 /* Clear SW Defineable Pin 0 to turn off the LED */
6155 ctrl &= ~E1000_CTRL_SWDPIN0; 4507 ctrl &= ~E1000_CTRL_SWDPIN0;
6156 ctrl |= E1000_CTRL_SWDPIO0; 4508 ctrl |= E1000_CTRL_SWDPIO0;
6157 break; 4509 break;
6158 case e1000_82544: 4510 case e1000_82544:
6159 if (hw->media_type == e1000_media_type_fiber) { 4511 if (hw->media_type == e1000_media_type_fiber) {
6160 /* Clear SW Defineable Pin 0 to turn off the LED */ 4512 /* Clear SW Defineable Pin 0 to turn off the LED */
6161 ctrl &= ~E1000_CTRL_SWDPIN0; 4513 ctrl &= ~E1000_CTRL_SWDPIN0;
6162 ctrl |= E1000_CTRL_SWDPIO0; 4514 ctrl |= E1000_CTRL_SWDPIO0;
6163 } else { 4515 } else {
6164 /* Set SW Defineable Pin 0 to turn off the LED */ 4516 /* Set SW Defineable Pin 0 to turn off the LED */
6165 ctrl |= E1000_CTRL_SWDPIN0; 4517 ctrl |= E1000_CTRL_SWDPIN0;
6166 ctrl |= E1000_CTRL_SWDPIO0; 4518 ctrl |= E1000_CTRL_SWDPIO0;
6167 } 4519 }
6168 break; 4520 break;
6169 default: 4521 default:
6170 if (hw->media_type == e1000_media_type_fiber) { 4522 if (hw->media_type == e1000_media_type_fiber) {
6171 /* Set SW Defineable Pin 0 to turn off the LED */ 4523 /* Set SW Defineable Pin 0 to turn off the LED */
6172 ctrl |= E1000_CTRL_SWDPIN0; 4524 ctrl |= E1000_CTRL_SWDPIN0;
6173 ctrl |= E1000_CTRL_SWDPIO0; 4525 ctrl |= E1000_CTRL_SWDPIO0;
6174 } else if (hw->phy_type == e1000_phy_ife) { 4526 } else if (hw->media_type == e1000_media_type_copper) {
6175 e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 4527 ew32(LEDCTL, hw->ledctl_mode1);
6176 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); 4528 return E1000_SUCCESS;
6177 } else if (hw->media_type == e1000_media_type_copper) { 4529 }
6178 ew32(LEDCTL, hw->ledctl_mode1); 4530 break;
6179 return E1000_SUCCESS; 4531 }
6180 } 4532
6181 break; 4533 ew32(CTRL, ctrl);
6182 } 4534
6183 4535 return E1000_SUCCESS;
6184 ew32(CTRL, ctrl);
6185
6186 return E1000_SUCCESS;
6187} 4536}
6188 4537
6189/****************************************************************************** 4538/**
6190 * Clears all hardware statistics counters. 4539 * e1000_clear_hw_cntrs - Clears all hardware statistics counters.
6191 * 4540 * @hw: Struct containing variables accessed by shared code
6192 * hw - Struct containing variables accessed by shared code 4541 */
6193 *****************************************************************************/
6194static void e1000_clear_hw_cntrs(struct e1000_hw *hw) 4542static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
6195{ 4543{
6196 volatile u32 temp; 4544 volatile u32 temp;
6197 4545
6198 temp = er32(CRCERRS); 4546 temp = er32(CRCERRS);
6199 temp = er32(SYMERRS); 4547 temp = er32(SYMERRS);
6200 temp = er32(MPC); 4548 temp = er32(MPC);
6201 temp = er32(SCC); 4549 temp = er32(SCC);
6202 temp = er32(ECOL); 4550 temp = er32(ECOL);
6203 temp = er32(MCC); 4551 temp = er32(MCC);
6204 temp = er32(LATECOL); 4552 temp = er32(LATECOL);
6205 temp = er32(COLC); 4553 temp = er32(COLC);
6206 temp = er32(DC); 4554 temp = er32(DC);
6207 temp = er32(SEC); 4555 temp = er32(SEC);
6208 temp = er32(RLEC); 4556 temp = er32(RLEC);
6209 temp = er32(XONRXC); 4557 temp = er32(XONRXC);
6210 temp = er32(XONTXC); 4558 temp = er32(XONTXC);
6211 temp = er32(XOFFRXC); 4559 temp = er32(XOFFRXC);
6212 temp = er32(XOFFTXC); 4560 temp = er32(XOFFTXC);
6213 temp = er32(FCRUC); 4561 temp = er32(FCRUC);
6214 4562
6215 if (hw->mac_type != e1000_ich8lan) { 4563 temp = er32(PRC64);
6216 temp = er32(PRC64); 4564 temp = er32(PRC127);
6217 temp = er32(PRC127); 4565 temp = er32(PRC255);
6218 temp = er32(PRC255); 4566 temp = er32(PRC511);
6219 temp = er32(PRC511); 4567 temp = er32(PRC1023);
6220 temp = er32(PRC1023); 4568 temp = er32(PRC1522);
6221 temp = er32(PRC1522); 4569
6222 } 4570 temp = er32(GPRC);
6223 4571 temp = er32(BPRC);
6224 temp = er32(GPRC); 4572 temp = er32(MPRC);
6225 temp = er32(BPRC); 4573 temp = er32(GPTC);
6226 temp = er32(MPRC); 4574 temp = er32(GORCL);
6227 temp = er32(GPTC); 4575 temp = er32(GORCH);
6228 temp = er32(GORCL); 4576 temp = er32(GOTCL);
6229 temp = er32(GORCH); 4577 temp = er32(GOTCH);
6230 temp = er32(GOTCL); 4578 temp = er32(RNBC);
6231 temp = er32(GOTCH); 4579 temp = er32(RUC);
6232 temp = er32(RNBC); 4580 temp = er32(RFC);
6233 temp = er32(RUC); 4581 temp = er32(ROC);
6234 temp = er32(RFC); 4582 temp = er32(RJC);
6235 temp = er32(ROC); 4583 temp = er32(TORL);
6236 temp = er32(RJC); 4584 temp = er32(TORH);
6237 temp = er32(TORL); 4585 temp = er32(TOTL);
6238 temp = er32(TORH); 4586 temp = er32(TOTH);
6239 temp = er32(TOTL); 4587 temp = er32(TPR);
6240 temp = er32(TOTH); 4588 temp = er32(TPT);
6241 temp = er32(TPR); 4589
6242 temp = er32(TPT); 4590 temp = er32(PTC64);
6243 4591 temp = er32(PTC127);
6244 if (hw->mac_type != e1000_ich8lan) { 4592 temp = er32(PTC255);
6245 temp = er32(PTC64); 4593 temp = er32(PTC511);
6246 temp = er32(PTC127); 4594 temp = er32(PTC1023);
6247 temp = er32(PTC255); 4595 temp = er32(PTC1522);
6248 temp = er32(PTC511); 4596
6249 temp = er32(PTC1023); 4597 temp = er32(MPTC);
6250 temp = er32(PTC1522); 4598 temp = er32(BPTC);
6251 } 4599
6252 4600 if (hw->mac_type < e1000_82543)
6253 temp = er32(MPTC); 4601 return;
6254 temp = er32(BPTC); 4602
6255 4603 temp = er32(ALGNERRC);
6256 if (hw->mac_type < e1000_82543) return; 4604 temp = er32(RXERRC);
6257 4605 temp = er32(TNCRS);
6258 temp = er32(ALGNERRC); 4606 temp = er32(CEXTERR);
6259 temp = er32(RXERRC); 4607 temp = er32(TSCTC);
6260 temp = er32(TNCRS); 4608 temp = er32(TSCTFC);
6261 temp = er32(CEXTERR); 4609
6262 temp = er32(TSCTC); 4610 if (hw->mac_type <= e1000_82544)
6263 temp = er32(TSCTFC); 4611 return;
6264 4612
6265 if (hw->mac_type <= e1000_82544) return; 4613 temp = er32(MGTPRC);
6266 4614 temp = er32(MGTPDC);
6267 temp = er32(MGTPRC); 4615 temp = er32(MGTPTC);
6268 temp = er32(MGTPDC); 4616}
6269 temp = er32(MGTPTC); 4617
6270 4618/**
6271 if (hw->mac_type <= e1000_82547_rev_2) return; 4619 * e1000_reset_adaptive - Resets Adaptive IFS to its default state.
6272 4620 * @hw: Struct containing variables accessed by shared code
6273 temp = er32(IAC);
6274 temp = er32(ICRXOC);
6275
6276 if (hw->mac_type == e1000_ich8lan) return;
6277
6278 temp = er32(ICRXPTC);
6279 temp = er32(ICRXATC);
6280 temp = er32(ICTXPTC);
6281 temp = er32(ICTXATC);
6282 temp = er32(ICTXQEC);
6283 temp = er32(ICTXQMTC);
6284 temp = er32(ICRXDMTC);
6285}
6286
6287/******************************************************************************
6288 * Resets Adaptive IFS to its default state.
6289 *
6290 * hw - Struct containing variables accessed by shared code
6291 * 4621 *
6292 * Call this after e1000_init_hw. You may override the IFS defaults by setting 4622 * Call this after e1000_init_hw. You may override the IFS defaults by setting
6293 * hw->ifs_params_forced to true. However, you must initialize hw-> 4623 * hw->ifs_params_forced to true. However, you must initialize hw->
6294 * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio 4624 * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
6295 * before calling this function. 4625 * before calling this function.
6296 *****************************************************************************/ 4626 */
6297void e1000_reset_adaptive(struct e1000_hw *hw) 4627void e1000_reset_adaptive(struct e1000_hw *hw)
6298{ 4628{
6299 DEBUGFUNC("e1000_reset_adaptive"); 4629 DEBUGFUNC("e1000_reset_adaptive");
6300 4630
6301 if (hw->adaptive_ifs) { 4631 if (hw->adaptive_ifs) {
6302 if (!hw->ifs_params_forced) { 4632 if (!hw->ifs_params_forced) {
6303 hw->current_ifs_val = 0; 4633 hw->current_ifs_val = 0;
6304 hw->ifs_min_val = IFS_MIN; 4634 hw->ifs_min_val = IFS_MIN;
6305 hw->ifs_max_val = IFS_MAX; 4635 hw->ifs_max_val = IFS_MAX;
6306 hw->ifs_step_size = IFS_STEP; 4636 hw->ifs_step_size = IFS_STEP;
6307 hw->ifs_ratio = IFS_RATIO; 4637 hw->ifs_ratio = IFS_RATIO;
6308 } 4638 }
6309 hw->in_ifs_mode = false; 4639 hw->in_ifs_mode = false;
6310 ew32(AIT, 0); 4640 ew32(AIT, 0);
6311 } else { 4641 } else {
6312 DEBUGOUT("Not in Adaptive IFS mode!\n"); 4642 DEBUGOUT("Not in Adaptive IFS mode!\n");
6313 } 4643 }
6314} 4644}
6315 4645
6316/****************************************************************************** 4646/**
4647 * e1000_update_adaptive - update adaptive IFS
4648 * @hw: Struct containing variables accessed by shared code
4649 * @tx_packets: Number of transmits since last callback
4650 * @total_collisions: Number of collisions since last callback
4651 *
6317 * Called during the callback/watchdog routine to update IFS value based on 4652 * Called during the callback/watchdog routine to update IFS value based on
6318 * the ratio of transmits to collisions. 4653 * the ratio of transmits to collisions.
6319 * 4654 */
6320 * hw - Struct containing variables accessed by shared code
6321 * tx_packets - Number of transmits since last callback
6322 * total_collisions - Number of collisions since last callback
6323 *****************************************************************************/
6324void e1000_update_adaptive(struct e1000_hw *hw) 4655void e1000_update_adaptive(struct e1000_hw *hw)
6325{ 4656{
6326 DEBUGFUNC("e1000_update_adaptive"); 4657 DEBUGFUNC("e1000_update_adaptive");
6327 4658
6328 if (hw->adaptive_ifs) { 4659 if (hw->adaptive_ifs) {
6329 if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { 4660 if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) {
6330 if (hw->tx_packet_delta > MIN_NUM_XMITS) { 4661 if (hw->tx_packet_delta > MIN_NUM_XMITS) {
6331 hw->in_ifs_mode = true; 4662 hw->in_ifs_mode = true;
6332 if (hw->current_ifs_val < hw->ifs_max_val) { 4663 if (hw->current_ifs_val < hw->ifs_max_val) {
6333 if (hw->current_ifs_val == 0) 4664 if (hw->current_ifs_val == 0)
6334 hw->current_ifs_val = hw->ifs_min_val; 4665 hw->current_ifs_val =
6335 else 4666 hw->ifs_min_val;
6336 hw->current_ifs_val += hw->ifs_step_size; 4667 else
6337 ew32(AIT, hw->current_ifs_val); 4668 hw->current_ifs_val +=
6338 } 4669 hw->ifs_step_size;
6339 } 4670 ew32(AIT, hw->current_ifs_val);
6340 } else { 4671 }
6341 if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { 4672 }
6342 hw->current_ifs_val = 0; 4673 } else {
6343 hw->in_ifs_mode = false; 4674 if (hw->in_ifs_mode
6344 ew32(AIT, 0); 4675 && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
6345 } 4676 hw->current_ifs_val = 0;
6346 } 4677 hw->in_ifs_mode = false;
6347 } else { 4678 ew32(AIT, 0);
6348 DEBUGOUT("Not in Adaptive IFS mode!\n"); 4679 }
6349 } 4680 }
4681 } else {
4682 DEBUGOUT("Not in Adaptive IFS mode!\n");
4683 }
6350} 4684}
6351 4685
6352/****************************************************************************** 4686/**
6353 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT 4687 * e1000_tbi_adjust_stats
4688 * @hw: Struct containing variables accessed by shared code
4689 * @frame_len: The length of the frame in question
4690 * @mac_addr: The Ethernet destination address of the frame in question
6354 * 4691 *
6355 * hw - Struct containing variables accessed by shared code 4692 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
6356 * frame_len - The length of the frame in question 4693 */
6357 * mac_addr - The Ethernet destination address of the frame in question
6358 *****************************************************************************/
6359void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, 4694void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
6360 u32 frame_len, u8 *mac_addr) 4695 u32 frame_len, u8 *mac_addr)
6361{ 4696{
6362 u64 carry_bit; 4697 u64 carry_bit;
6363 4698
6364 /* First adjust the frame length. */ 4699 /* First adjust the frame length. */
6365 frame_len--; 4700 frame_len--;
6366 /* We need to adjust the statistics counters, since the hardware 4701 /* We need to adjust the statistics counters, since the hardware
6367 * counters overcount this packet as a CRC error and undercount 4702 * counters overcount this packet as a CRC error and undercount
6368 * the packet as a good packet 4703 * the packet as a good packet
6369 */ 4704 */
6370 /* This packet should not be counted as a CRC error. */ 4705 /* This packet should not be counted as a CRC error. */
6371 stats->crcerrs--; 4706 stats->crcerrs--;
6372 /* This packet does count as a Good Packet Received. */ 4707 /* This packet does count as a Good Packet Received. */
6373 stats->gprc++; 4708 stats->gprc++;
6374 4709
6375 /* Adjust the Good Octets received counters */ 4710 /* Adjust the Good Octets received counters */
6376 carry_bit = 0x80000000 & stats->gorcl; 4711 carry_bit = 0x80000000 & stats->gorcl;
6377 stats->gorcl += frame_len; 4712 stats->gorcl += frame_len;
6378 /* If the high bit of Gorcl (the low 32 bits of the Good Octets 4713 /* If the high bit of Gorcl (the low 32 bits of the Good Octets
6379 * Received Count) was one before the addition, 4714 * Received Count) was one before the addition,
6380 * AND it is zero after, then we lost the carry out, 4715 * AND it is zero after, then we lost the carry out,
6381 * need to add one to Gorch (Good Octets Received Count High). 4716 * need to add one to Gorch (Good Octets Received Count High).
6382 * This could be simplified if all environments supported 4717 * This could be simplified if all environments supported
6383 * 64-bit integers. 4718 * 64-bit integers.
6384 */ 4719 */
6385 if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) 4720 if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
6386 stats->gorch++; 4721 stats->gorch++;
6387 /* Is this a broadcast or multicast? Check broadcast first, 4722 /* Is this a broadcast or multicast? Check broadcast first,
6388 * since the test for a multicast frame will test positive on 4723 * since the test for a multicast frame will test positive on
6389 * a broadcast frame. 4724 * a broadcast frame.
6390 */ 4725 */
6391 if ((mac_addr[0] == (u8)0xff) && (mac_addr[1] == (u8)0xff)) 4726 if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff))
6392 /* Broadcast packet */ 4727 /* Broadcast packet */
6393 stats->bprc++; 4728 stats->bprc++;
6394 else if (*mac_addr & 0x01) 4729 else if (*mac_addr & 0x01)
6395 /* Multicast packet */ 4730 /* Multicast packet */
6396 stats->mprc++; 4731 stats->mprc++;
6397 4732
6398 if (frame_len == hw->max_frame_size) { 4733 if (frame_len == hw->max_frame_size) {
6399 /* In this case, the hardware has overcounted the number of 4734 /* In this case, the hardware has overcounted the number of
6400 * oversize frames. 4735 * oversize frames.
6401 */ 4736 */
6402 if (stats->roc > 0) 4737 if (stats->roc > 0)
6403 stats->roc--; 4738 stats->roc--;
6404 } 4739 }
6405 4740
6406 /* Adjust the bin counters when the extra byte put the frame in the 4741 /* Adjust the bin counters when the extra byte put the frame in the
6407 * wrong bin. Remember that the frame_len was adjusted above. 4742 * wrong bin. Remember that the frame_len was adjusted above.
6408 */ 4743 */
6409 if (frame_len == 64) { 4744 if (frame_len == 64) {
6410 stats->prc64++; 4745 stats->prc64++;
6411 stats->prc127--; 4746 stats->prc127--;
6412 } else if (frame_len == 127) { 4747 } else if (frame_len == 127) {
6413 stats->prc127++; 4748 stats->prc127++;
6414 stats->prc255--; 4749 stats->prc255--;
6415 } else if (frame_len == 255) { 4750 } else if (frame_len == 255) {
6416 stats->prc255++; 4751 stats->prc255++;
6417 stats->prc511--; 4752 stats->prc511--;
6418 } else if (frame_len == 511) { 4753 } else if (frame_len == 511) {
6419 stats->prc511++; 4754 stats->prc511++;
6420 stats->prc1023--; 4755 stats->prc1023--;
6421 } else if (frame_len == 1023) { 4756 } else if (frame_len == 1023) {
6422 stats->prc1023++; 4757 stats->prc1023++;
6423 stats->prc1522--; 4758 stats->prc1522--;
6424 } else if (frame_len == 1522) { 4759 } else if (frame_len == 1522) {
6425 stats->prc1522++; 4760 stats->prc1522++;
6426 } 4761 }
6427} 4762}
6428 4763
6429/****************************************************************************** 4764/**
6430 * Gets the current PCI bus type, speed, and width of the hardware 4765 * e1000_get_bus_info
4766 * @hw: Struct containing variables accessed by shared code
6431 * 4767 *
6432 * hw - Struct containing variables accessed by shared code 4768 * Gets the current PCI bus type, speed, and width of the hardware
6433 *****************************************************************************/ 4769 */
6434void e1000_get_bus_info(struct e1000_hw *hw) 4770void e1000_get_bus_info(struct e1000_hw *hw)
6435{ 4771{
6436 s32 ret_val; 4772 u32 status;
6437 u16 pci_ex_link_status; 4773
6438 u32 status; 4774 switch (hw->mac_type) {
6439 4775 case e1000_82542_rev2_0:
6440 switch (hw->mac_type) { 4776 case e1000_82542_rev2_1:
6441 case e1000_82542_rev2_0: 4777 hw->bus_type = e1000_bus_type_pci;
6442 case e1000_82542_rev2_1: 4778 hw->bus_speed = e1000_bus_speed_unknown;
6443 hw->bus_type = e1000_bus_type_pci; 4779 hw->bus_width = e1000_bus_width_unknown;
6444 hw->bus_speed = e1000_bus_speed_unknown; 4780 break;
6445 hw->bus_width = e1000_bus_width_unknown; 4781 default:
6446 break; 4782 status = er32(STATUS);
6447 case e1000_82571: 4783 hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
6448 case e1000_82572: 4784 e1000_bus_type_pcix : e1000_bus_type_pci;
6449 case e1000_82573: 4785
6450 case e1000_80003es2lan: 4786 if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
6451 hw->bus_type = e1000_bus_type_pci_express; 4787 hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
6452 hw->bus_speed = e1000_bus_speed_2500; 4788 e1000_bus_speed_66 : e1000_bus_speed_120;
6453 ret_val = e1000_read_pcie_cap_reg(hw, 4789 } else if (hw->bus_type == e1000_bus_type_pci) {
6454 PCI_EX_LINK_STATUS, 4790 hw->bus_speed = (status & E1000_STATUS_PCI66) ?
6455 &pci_ex_link_status); 4791 e1000_bus_speed_66 : e1000_bus_speed_33;
6456 if (ret_val) 4792 } else {
6457 hw->bus_width = e1000_bus_width_unknown; 4793 switch (status & E1000_STATUS_PCIX_SPEED) {
6458 else 4794 case E1000_STATUS_PCIX_SPEED_66:
6459 hw->bus_width = (pci_ex_link_status & PCI_EX_LINK_WIDTH_MASK) >> 4795 hw->bus_speed = e1000_bus_speed_66;
6460 PCI_EX_LINK_WIDTH_SHIFT; 4796 break;
6461 break; 4797 case E1000_STATUS_PCIX_SPEED_100:
6462 case e1000_ich8lan: 4798 hw->bus_speed = e1000_bus_speed_100;
6463 hw->bus_type = e1000_bus_type_pci_express; 4799 break;
6464 hw->bus_speed = e1000_bus_speed_2500; 4800 case E1000_STATUS_PCIX_SPEED_133:
6465 hw->bus_width = e1000_bus_width_pciex_1; 4801 hw->bus_speed = e1000_bus_speed_133;
6466 break; 4802 break;
6467 default: 4803 default:
6468 status = er32(STATUS); 4804 hw->bus_speed = e1000_bus_speed_reserved;
6469 hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? 4805 break;
6470 e1000_bus_type_pcix : e1000_bus_type_pci; 4806 }
6471 4807 }
6472 if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { 4808 hw->bus_width = (status & E1000_STATUS_BUS64) ?
6473 hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? 4809 e1000_bus_width_64 : e1000_bus_width_32;
6474 e1000_bus_speed_66 : e1000_bus_speed_120; 4810 break;
6475 } else if (hw->bus_type == e1000_bus_type_pci) { 4811 }
6476 hw->bus_speed = (status & E1000_STATUS_PCI66) ?
6477 e1000_bus_speed_66 : e1000_bus_speed_33;
6478 } else {
6479 switch (status & E1000_STATUS_PCIX_SPEED) {
6480 case E1000_STATUS_PCIX_SPEED_66:
6481 hw->bus_speed = e1000_bus_speed_66;
6482 break;
6483 case E1000_STATUS_PCIX_SPEED_100:
6484 hw->bus_speed = e1000_bus_speed_100;
6485 break;
6486 case E1000_STATUS_PCIX_SPEED_133:
6487 hw->bus_speed = e1000_bus_speed_133;
6488 break;
6489 default:
6490 hw->bus_speed = e1000_bus_speed_reserved;
6491 break;
6492 }
6493 }
6494 hw->bus_width = (status & E1000_STATUS_BUS64) ?
6495 e1000_bus_width_64 : e1000_bus_width_32;
6496 break;
6497 }
6498} 4812}
6499 4813
6500/****************************************************************************** 4814/**
4815 * e1000_write_reg_io
4816 * @hw: Struct containing variables accessed by shared code
4817 * @offset: offset to write to
4818 * @value: value to write
4819 *
6501 * Writes a value to one of the devices registers using port I/O (as opposed to 4820 * Writes a value to one of the devices registers using port I/O (as opposed to
6502 * memory mapped I/O). Only 82544 and newer devices support port I/O. 4821 * memory mapped I/O). Only 82544 and newer devices support port I/O.
6503 * 4822 */
6504 * hw - Struct containing variables accessed by shared code
6505 * offset - offset to write to
6506 * value - value to write
6507 *****************************************************************************/
6508static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value) 4823static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value)
6509{ 4824{
6510 unsigned long io_addr = hw->io_base; 4825 unsigned long io_addr = hw->io_base;
6511 unsigned long io_data = hw->io_base + 4; 4826 unsigned long io_data = hw->io_base + 4;
6512 4827
6513 e1000_io_write(hw, io_addr, offset); 4828 e1000_io_write(hw, io_addr, offset);
6514 e1000_io_write(hw, io_data, value); 4829 e1000_io_write(hw, io_data, value);
6515} 4830}
6516 4831
6517/****************************************************************************** 4832/**
6518 * Estimates the cable length. 4833 * e1000_get_cable_length - Estimates the cable length.
6519 * 4834 * @hw: Struct containing variables accessed by shared code
6520 * hw - Struct containing variables accessed by shared code 4835 * @min_length: The estimated minimum length
6521 * min_length - The estimated minimum length 4836 * @max_length: The estimated maximum length
6522 * max_length - The estimated maximum length
6523 * 4837 *
6524 * returns: - E1000_ERR_XXX 4838 * returns: - E1000_ERR_XXX
6525 * E1000_SUCCESS 4839 * E1000_SUCCESS
@@ -6528,185 +4842,115 @@ static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value)
6528 * So for M88 phy's, this function interprets the one value returned from the 4842 * So for M88 phy's, this function interprets the one value returned from the
6529 * register to the minimum and maximum range. 4843 * register to the minimum and maximum range.
6530 * For IGP phy's, the function calculates the range by the AGC registers. 4844 * For IGP phy's, the function calculates the range by the AGC registers.
6531 *****************************************************************************/ 4845 */
6532static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, 4846static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
6533 u16 *max_length) 4847 u16 *max_length)
6534{ 4848{
6535 s32 ret_val; 4849 s32 ret_val;
6536 u16 agc_value = 0; 4850 u16 agc_value = 0;
6537 u16 i, phy_data; 4851 u16 i, phy_data;
6538 u16 cable_length; 4852 u16 cable_length;
6539 4853
6540 DEBUGFUNC("e1000_get_cable_length"); 4854 DEBUGFUNC("e1000_get_cable_length");
6541 4855
6542 *min_length = *max_length = 0; 4856 *min_length = *max_length = 0;
6543 4857
6544 /* Use old method for Phy older than IGP */ 4858 /* Use old method for Phy older than IGP */
6545 if (hw->phy_type == e1000_phy_m88) { 4859 if (hw->phy_type == e1000_phy_m88) {
6546 4860
6547 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 4861 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
6548 &phy_data); 4862 &phy_data);
6549 if (ret_val) 4863 if (ret_val)
6550 return ret_val; 4864 return ret_val;
6551 cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> 4865 cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
6552 M88E1000_PSSR_CABLE_LENGTH_SHIFT; 4866 M88E1000_PSSR_CABLE_LENGTH_SHIFT;
6553 4867
6554 /* Convert the enum value to ranged values */ 4868 /* Convert the enum value to ranged values */
6555 switch (cable_length) { 4869 switch (cable_length) {
6556 case e1000_cable_length_50: 4870 case e1000_cable_length_50:
6557 *min_length = 0; 4871 *min_length = 0;
6558 *max_length = e1000_igp_cable_length_50; 4872 *max_length = e1000_igp_cable_length_50;
6559 break; 4873 break;
6560 case e1000_cable_length_50_80: 4874 case e1000_cable_length_50_80:
6561 *min_length = e1000_igp_cable_length_50; 4875 *min_length = e1000_igp_cable_length_50;
6562 *max_length = e1000_igp_cable_length_80; 4876 *max_length = e1000_igp_cable_length_80;
6563 break; 4877 break;
6564 case e1000_cable_length_80_110: 4878 case e1000_cable_length_80_110:
6565 *min_length = e1000_igp_cable_length_80; 4879 *min_length = e1000_igp_cable_length_80;
6566 *max_length = e1000_igp_cable_length_110; 4880 *max_length = e1000_igp_cable_length_110;
6567 break; 4881 break;
6568 case e1000_cable_length_110_140: 4882 case e1000_cable_length_110_140:
6569 *min_length = e1000_igp_cable_length_110; 4883 *min_length = e1000_igp_cable_length_110;
6570 *max_length = e1000_igp_cable_length_140; 4884 *max_length = e1000_igp_cable_length_140;
6571 break; 4885 break;
6572 case e1000_cable_length_140: 4886 case e1000_cable_length_140:
6573 *min_length = e1000_igp_cable_length_140; 4887 *min_length = e1000_igp_cable_length_140;
6574 *max_length = e1000_igp_cable_length_170; 4888 *max_length = e1000_igp_cable_length_170;
6575 break; 4889 break;
6576 default: 4890 default:
6577 return -E1000_ERR_PHY; 4891 return -E1000_ERR_PHY;
6578 break; 4892 break;
6579 } 4893 }
6580 } else if (hw->phy_type == e1000_phy_gg82563) { 4894 } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
6581 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE, 4895 u16 cur_agc_value;
6582 &phy_data); 4896 u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
6583 if (ret_val) 4897 u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
6584 return ret_val; 4898 { IGP01E1000_PHY_AGC_A,
6585 cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH; 4899 IGP01E1000_PHY_AGC_B,
6586 4900 IGP01E1000_PHY_AGC_C,
6587 switch (cable_length) { 4901 IGP01E1000_PHY_AGC_D
6588 case e1000_gg_cable_length_60: 4902 };
6589 *min_length = 0; 4903 /* Read the AGC registers for all channels */
6590 *max_length = e1000_igp_cable_length_60; 4904 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
6591 break; 4905
6592 case e1000_gg_cable_length_60_115: 4906 ret_val =
6593 *min_length = e1000_igp_cable_length_60; 4907 e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
6594 *max_length = e1000_igp_cable_length_115; 4908 if (ret_val)
6595 break; 4909 return ret_val;
6596 case e1000_gg_cable_length_115_150: 4910
6597 *min_length = e1000_igp_cable_length_115; 4911 cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
6598 *max_length = e1000_igp_cable_length_150; 4912
6599 break; 4913 /* Value bound check. */
6600 case e1000_gg_cable_length_150: 4914 if ((cur_agc_value >=
6601 *min_length = e1000_igp_cable_length_150; 4915 IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1)
6602 *max_length = e1000_igp_cable_length_180; 4916 || (cur_agc_value == 0))
6603 break; 4917 return -E1000_ERR_PHY;
6604 default: 4918
6605 return -E1000_ERR_PHY; 4919 agc_value += cur_agc_value;
6606 break; 4920
6607 } 4921 /* Update minimal AGC value. */
6608 } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ 4922 if (min_agc_value > cur_agc_value)
6609 u16 cur_agc_value; 4923 min_agc_value = cur_agc_value;
6610 u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; 4924 }
6611 u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = 4925
6612 {IGP01E1000_PHY_AGC_A, 4926 /* Remove the minimal AGC result for length < 50m */
6613 IGP01E1000_PHY_AGC_B, 4927 if (agc_value <
6614 IGP01E1000_PHY_AGC_C, 4928 IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
6615 IGP01E1000_PHY_AGC_D}; 4929 agc_value -= min_agc_value;
6616 /* Read the AGC registers for all channels */ 4930
6617 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { 4931 /* Get the average length of the remaining 3 channels */
6618 4932 agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
6619 ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); 4933 } else {
6620 if (ret_val) 4934 /* Get the average length of all the 4 channels. */
6621 return ret_val; 4935 agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
6622 4936 }
6623 cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; 4937
6624 4938 /* Set the range of the calculated length. */
6625 /* Value bound check. */ 4939 *min_length = ((e1000_igp_cable_length_table[agc_value] -
6626 if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || 4940 IGP01E1000_AGC_RANGE) > 0) ?
6627 (cur_agc_value == 0)) 4941 (e1000_igp_cable_length_table[agc_value] -
6628 return -E1000_ERR_PHY; 4942 IGP01E1000_AGC_RANGE) : 0;
6629 4943 *max_length = e1000_igp_cable_length_table[agc_value] +
6630 agc_value += cur_agc_value; 4944 IGP01E1000_AGC_RANGE;
6631 4945 }
6632 /* Update minimal AGC value. */ 4946
6633 if (min_agc_value > cur_agc_value) 4947 return E1000_SUCCESS;
6634 min_agc_value = cur_agc_value;
6635 }
6636
6637 /* Remove the minimal AGC result for length < 50m */
6638 if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
6639 agc_value -= min_agc_value;
6640
6641 /* Get the average length of the remaining 3 channels */
6642 agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
6643 } else {
6644 /* Get the average length of all the 4 channels. */
6645 agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
6646 }
6647
6648 /* Set the range of the calculated length. */
6649 *min_length = ((e1000_igp_cable_length_table[agc_value] -
6650 IGP01E1000_AGC_RANGE) > 0) ?
6651 (e1000_igp_cable_length_table[agc_value] -
6652 IGP01E1000_AGC_RANGE) : 0;
6653 *max_length = e1000_igp_cable_length_table[agc_value] +
6654 IGP01E1000_AGC_RANGE;
6655 } else if (hw->phy_type == e1000_phy_igp_2 ||
6656 hw->phy_type == e1000_phy_igp_3) {
6657 u16 cur_agc_index, max_agc_index = 0;
6658 u16 min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1;
6659 u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
6660 {IGP02E1000_PHY_AGC_A,
6661 IGP02E1000_PHY_AGC_B,
6662 IGP02E1000_PHY_AGC_C,
6663 IGP02E1000_PHY_AGC_D};
6664 /* Read the AGC registers for all channels */
6665 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
6666 ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
6667 if (ret_val)
6668 return ret_val;
6669
6670 /* Getting bits 15:9, which represent the combination of course and
6671 * fine gain values. The result is a number that can be put into
6672 * the lookup table to obtain the approximate cable length. */
6673 cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
6674 IGP02E1000_AGC_LENGTH_MASK;
6675
6676 /* Array index bound check. */
6677 if ((cur_agc_index >= IGP02E1000_AGC_LENGTH_TABLE_SIZE) ||
6678 (cur_agc_index == 0))
6679 return -E1000_ERR_PHY;
6680
6681 /* Remove min & max AGC values from calculation. */
6682 if (e1000_igp_2_cable_length_table[min_agc_index] >
6683 e1000_igp_2_cable_length_table[cur_agc_index])
6684 min_agc_index = cur_agc_index;
6685 if (e1000_igp_2_cable_length_table[max_agc_index] <
6686 e1000_igp_2_cable_length_table[cur_agc_index])
6687 max_agc_index = cur_agc_index;
6688
6689 agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
6690 }
6691
6692 agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
6693 e1000_igp_2_cable_length_table[max_agc_index]);
6694 agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
6695
6696 /* Calculate cable length with the error range of +/- 10 meters. */
6697 *min_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
6698 (agc_value - IGP02E1000_AGC_RANGE) : 0;
6699 *max_length = agc_value + IGP02E1000_AGC_RANGE;
6700 }
6701
6702 return E1000_SUCCESS;
6703} 4948}
6704 4949
6705/****************************************************************************** 4950/**
6706 * Check the cable polarity 4951 * e1000_check_polarity - Check the cable polarity
6707 * 4952 * @hw: Struct containing variables accessed by shared code
6708 * hw - Struct containing variables accessed by shared code 4953 * @polarity: output parameter : 0 - Polarity is not reversed
6709 * polarity - output parameter : 0 - Polarity is not reversed
6710 * 1 - Polarity is reversed. 4954 * 1 - Polarity is reversed.
6711 * 4955 *
6712 * returns: - E1000_ERR_XXX 4956 * returns: - E1000_ERR_XXX
@@ -6717,73 +4961,65 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
6717 * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will 4961 * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will
6718 * return 0. If the link speed is 1000 Mbps the polarity status is in the 4962 * return 0. If the link speed is 1000 Mbps the polarity status is in the
6719 * IGP01E1000_PHY_PCS_INIT_REG. 4963 * IGP01E1000_PHY_PCS_INIT_REG.
6720 *****************************************************************************/ 4964 */
6721static s32 e1000_check_polarity(struct e1000_hw *hw, 4965static s32 e1000_check_polarity(struct e1000_hw *hw,
6722 e1000_rev_polarity *polarity) 4966 e1000_rev_polarity *polarity)
6723{ 4967{
6724 s32 ret_val; 4968 s32 ret_val;
6725 u16 phy_data; 4969 u16 phy_data;
6726 4970
6727 DEBUGFUNC("e1000_check_polarity"); 4971 DEBUGFUNC("e1000_check_polarity");
6728 4972
6729 if ((hw->phy_type == e1000_phy_m88) || 4973 if (hw->phy_type == e1000_phy_m88) {
6730 (hw->phy_type == e1000_phy_gg82563)) { 4974 /* return the Polarity bit in the Status register. */
6731 /* return the Polarity bit in the Status register. */ 4975 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
6732 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 4976 &phy_data);
6733 &phy_data); 4977 if (ret_val)
6734 if (ret_val) 4978 return ret_val;
6735 return ret_val; 4979 *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >>
6736 *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >> 4980 M88E1000_PSSR_REV_POLARITY_SHIFT) ?
6737 M88E1000_PSSR_REV_POLARITY_SHIFT) ? 4981 e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
6738 e1000_rev_polarity_reversed : e1000_rev_polarity_normal; 4982
6739 4983 } else if (hw->phy_type == e1000_phy_igp) {
6740 } else if (hw->phy_type == e1000_phy_igp || 4984 /* Read the Status register to check the speed */
6741 hw->phy_type == e1000_phy_igp_3 || 4985 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
6742 hw->phy_type == e1000_phy_igp_2) { 4986 &phy_data);
6743 /* Read the Status register to check the speed */ 4987 if (ret_val)
6744 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, 4988 return ret_val;
6745 &phy_data); 4989
6746 if (ret_val) 4990 /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
6747 return ret_val; 4991 * find the polarity status */
6748 4992 if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
6749 /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to 4993 IGP01E1000_PSSR_SPEED_1000MBPS) {
6750 * find the polarity status */ 4994
6751 if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == 4995 /* Read the GIG initialization PCS register (0x00B4) */
6752 IGP01E1000_PSSR_SPEED_1000MBPS) { 4996 ret_val =
6753 4997 e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
6754 /* Read the GIG initialization PCS register (0x00B4) */ 4998 &phy_data);
6755 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, 4999 if (ret_val)
6756 &phy_data); 5000 return ret_val;
6757 if (ret_val) 5001
6758 return ret_val; 5002 /* Check the polarity bits */
6759 5003 *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ?
6760 /* Check the polarity bits */ 5004 e1000_rev_polarity_reversed :
6761 *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? 5005 e1000_rev_polarity_normal;
6762 e1000_rev_polarity_reversed : e1000_rev_polarity_normal; 5006 } else {
6763 } else { 5007 /* For 10 Mbps, read the polarity bit in the status register. (for
6764 /* For 10 Mbps, read the polarity bit in the status register. (for 5008 * 100 Mbps this bit is always 0) */
6765 * 100 Mbps this bit is always 0) */ 5009 *polarity =
6766 *polarity = (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? 5010 (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
6767 e1000_rev_polarity_reversed : e1000_rev_polarity_normal; 5011 e1000_rev_polarity_reversed :
6768 } 5012 e1000_rev_polarity_normal;
6769 } else if (hw->phy_type == e1000_phy_ife) { 5013 }
6770 ret_val = e1000_read_phy_reg(hw, IFE_PHY_EXTENDED_STATUS_CONTROL, 5014 }
6771 &phy_data); 5015 return E1000_SUCCESS;
6772 if (ret_val)
6773 return ret_val;
6774 *polarity = ((phy_data & IFE_PESC_POLARITY_REVERSED) >>
6775 IFE_PESC_POLARITY_REVERSED_SHIFT) ?
6776 e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
6777 }
6778 return E1000_SUCCESS;
6779} 5016}
6780 5017
6781/****************************************************************************** 5018/**
6782 * Check if Downshift occured 5019 * e1000_check_downshift - Check if Downshift occurred
6783 * 5020 * @hw: Struct containing variables accessed by shared code
6784 * hw - Struct containing variables accessed by shared code 5021 * @downshift: output parameter : 0 - No Downshift occurred.
6785 * downshift - output parameter : 0 - No Downshift ocured. 5022 * 1 - Downshift occurred.
6786 * 1 - Downshift ocured.
6787 * 5023 *
6788 * returns: - E1000_ERR_XXX 5024 * returns: - E1000_ERR_XXX
6789 * E1000_SUCCESS 5025 * E1000_SUCCESS
@@ -6792,2041 +5028,607 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
6792 * Specific Status register. For IGP phy's, it reads the Downgrade bit in the 5028 * Specific Status register. For IGP phy's, it reads the Downgrade bit in the
6793 * Link Health register. In IGP this bit is latched high, so the driver must 5029 * Link Health register. In IGP this bit is latched high, so the driver must
6794 * read it immediately after link is established. 5030 * read it immediately after link is established.
6795 *****************************************************************************/ 5031 */
6796static s32 e1000_check_downshift(struct e1000_hw *hw) 5032static s32 e1000_check_downshift(struct e1000_hw *hw)
6797{ 5033{
6798 s32 ret_val; 5034 s32 ret_val;
6799 u16 phy_data; 5035 u16 phy_data;
6800
6801 DEBUGFUNC("e1000_check_downshift");
6802
6803 if (hw->phy_type == e1000_phy_igp ||
6804 hw->phy_type == e1000_phy_igp_3 ||
6805 hw->phy_type == e1000_phy_igp_2) {
6806 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
6807 &phy_data);
6808 if (ret_val)
6809 return ret_val;
6810
6811 hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
6812 } else if ((hw->phy_type == e1000_phy_m88) ||
6813 (hw->phy_type == e1000_phy_gg82563)) {
6814 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
6815 &phy_data);
6816 if (ret_val)
6817 return ret_val;
6818
6819 hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
6820 M88E1000_PSSR_DOWNSHIFT_SHIFT;
6821 } else if (hw->phy_type == e1000_phy_ife) {
6822 /* e1000_phy_ife supports 10/100 speed only */
6823 hw->speed_downgraded = false;
6824 }
6825
6826 return E1000_SUCCESS;
6827}
6828 5036
6829/***************************************************************************** 5037 DEBUGFUNC("e1000_check_downshift");
6830 *
6831 * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
6832 * gigabit link is achieved to improve link quality.
6833 *
6834 * hw: Struct containing variables accessed by shared code
6835 *
6836 * returns: - E1000_ERR_PHY if fail to read/write the PHY
6837 * E1000_SUCCESS at any other case.
6838 *
6839 ****************************************************************************/
6840 5038
6841static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) 5039 if (hw->phy_type == e1000_phy_igp) {
6842{ 5040 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
6843 s32 ret_val; 5041 &phy_data);
6844 u16 phy_data, phy_saved_data, speed, duplex, i; 5042 if (ret_val)
6845 u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = 5043 return ret_val;
6846 {IGP01E1000_PHY_AGC_PARAM_A,
6847 IGP01E1000_PHY_AGC_PARAM_B,
6848 IGP01E1000_PHY_AGC_PARAM_C,
6849 IGP01E1000_PHY_AGC_PARAM_D};
6850 u16 min_length, max_length;
6851
6852 DEBUGFUNC("e1000_config_dsp_after_link_change");
6853
6854 if (hw->phy_type != e1000_phy_igp)
6855 return E1000_SUCCESS;
6856
6857 if (link_up) {
6858 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
6859 if (ret_val) {
6860 DEBUGOUT("Error getting link speed and duplex\n");
6861 return ret_val;
6862 }
6863
6864 if (speed == SPEED_1000) {
6865
6866 ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
6867 if (ret_val)
6868 return ret_val;
6869
6870 if ((hw->dsp_config_state == e1000_dsp_config_enabled) &&
6871 min_length >= e1000_igp_cable_length_50) {
6872
6873 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
6874 ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i],
6875 &phy_data);
6876 if (ret_val)
6877 return ret_val;
6878
6879 phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
6880
6881 ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i],
6882 phy_data);
6883 if (ret_val)
6884 return ret_val;
6885 }
6886 hw->dsp_config_state = e1000_dsp_config_activated;
6887 }
6888
6889 if ((hw->ffe_config_state == e1000_ffe_config_enabled) &&
6890 (min_length < e1000_igp_cable_length_50)) {
6891
6892 u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
6893 u32 idle_errs = 0;
6894
6895 /* clear previous idle error counts */
6896 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
6897 &phy_data);
6898 if (ret_val)
6899 return ret_val;
6900
6901 for (i = 0; i < ffe_idle_err_timeout; i++) {
6902 udelay(1000);
6903 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
6904 &phy_data);
6905 if (ret_val)
6906 return ret_val;
6907
6908 idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
6909 if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
6910 hw->ffe_config_state = e1000_ffe_config_active;
6911
6912 ret_val = e1000_write_phy_reg(hw,
6913 IGP01E1000_PHY_DSP_FFE,
6914 IGP01E1000_PHY_DSP_FFE_CM_CP);
6915 if (ret_val)
6916 return ret_val;
6917 break;
6918 }
6919
6920 if (idle_errs)
6921 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100;
6922 }
6923 }
6924 }
6925 } else {
6926 if (hw->dsp_config_state == e1000_dsp_config_activated) {
6927 /* Save off the current value of register 0x2F5B to be restored at
6928 * the end of the routines. */
6929 ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
6930
6931 if (ret_val)
6932 return ret_val;
6933
6934 /* Disable the PHY transmitter */
6935 ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
6936
6937 if (ret_val)
6938 return ret_val;
6939
6940 mdelay(20);
6941
6942 ret_val = e1000_write_phy_reg(hw, 0x0000,
6943 IGP01E1000_IEEE_FORCE_GIGA);
6944 if (ret_val)
6945 return ret_val;
6946 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
6947 ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data);
6948 if (ret_val)
6949 return ret_val;
6950
6951 phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
6952 phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
6953
6954 ret_val = e1000_write_phy_reg(hw,dsp_reg_array[i], phy_data);
6955 if (ret_val)
6956 return ret_val;
6957 }
6958
6959 ret_val = e1000_write_phy_reg(hw, 0x0000,
6960 IGP01E1000_IEEE_RESTART_AUTONEG);
6961 if (ret_val)
6962 return ret_val;
6963
6964 mdelay(20);
6965
6966 /* Now enable the transmitter */
6967 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
6968
6969 if (ret_val)
6970 return ret_val;
6971
6972 hw->dsp_config_state = e1000_dsp_config_enabled;
6973 }
6974
6975 if (hw->ffe_config_state == e1000_ffe_config_active) {
6976 /* Save off the current value of register 0x2F5B to be restored at
6977 * the end of the routines. */
6978 ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
6979
6980 if (ret_val)
6981 return ret_val;
6982
6983 /* Disable the PHY transmitter */
6984 ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
6985
6986 if (ret_val)
6987 return ret_val;
6988
6989 mdelay(20);
6990
6991 ret_val = e1000_write_phy_reg(hw, 0x0000,
6992 IGP01E1000_IEEE_FORCE_GIGA);
6993 if (ret_val)
6994 return ret_val;
6995 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
6996 IGP01E1000_PHY_DSP_FFE_DEFAULT);
6997 if (ret_val)
6998 return ret_val;
6999
7000 ret_val = e1000_write_phy_reg(hw, 0x0000,
7001 IGP01E1000_IEEE_RESTART_AUTONEG);
7002 if (ret_val)
7003 return ret_val;
7004
7005 mdelay(20);
7006
7007 /* Now enable the transmitter */
7008 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
7009
7010 if (ret_val)
7011 return ret_val;
7012
7013 hw->ffe_config_state = e1000_ffe_config_enabled;
7014 }
7015 }
7016 return E1000_SUCCESS;
7017}
7018 5044
7019/***************************************************************************** 5045 hw->speed_downgraded =
7020 * Set PHY to class A mode 5046 (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
7021 * Assumes the following operations will follow to enable the new class mode. 5047 } else if (hw->phy_type == e1000_phy_m88) {
7022 * 1. Do a PHY soft reset 5048 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
7023 * 2. Restart auto-negotiation or force link. 5049 &phy_data);
7024 * 5050 if (ret_val)
7025 * hw - Struct containing variables accessed by shared code 5051 return ret_val;
7026 ****************************************************************************/
7027static s32 e1000_set_phy_mode(struct e1000_hw *hw)
7028{
7029 s32 ret_val;
7030 u16 eeprom_data;
7031
7032 DEBUGFUNC("e1000_set_phy_mode");
7033
7034 if ((hw->mac_type == e1000_82545_rev_3) &&
7035 (hw->media_type == e1000_media_type_copper)) {
7036 ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data);
7037 if (ret_val) {
7038 return ret_val;
7039 }
7040
7041 if ((eeprom_data != EEPROM_RESERVED_WORD) &&
7042 (eeprom_data & EEPROM_PHY_CLASS_A)) {
7043 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B);
7044 if (ret_val)
7045 return ret_val;
7046 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104);
7047 if (ret_val)
7048 return ret_val;
7049
7050 hw->phy_reset_disable = false;
7051 }
7052 }
7053
7054 return E1000_SUCCESS;
7055}
7056 5052
7057/***************************************************************************** 5053 hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
7058 * 5054 M88E1000_PSSR_DOWNSHIFT_SHIFT;
7059 * This function sets the lplu state according to the active flag. When 5055 }
7060 * activating lplu this function also disables smart speed and vise versa.
7061 * lplu will not be activated unless the device autonegotiation advertisment
7062 * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
7063 * hw: Struct containing variables accessed by shared code
7064 * active - true to enable lplu false to disable lplu.
7065 *
7066 * returns: - E1000_ERR_PHY if fail to read/write the PHY
7067 * E1000_SUCCESS at any other case.
7068 *
7069 ****************************************************************************/
7070 5056
7071static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) 5057 return E1000_SUCCESS;
7072{
7073 u32 phy_ctrl = 0;
7074 s32 ret_val;
7075 u16 phy_data;
7076 DEBUGFUNC("e1000_set_d3_lplu_state");
7077
7078 if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2
7079 && hw->phy_type != e1000_phy_igp_3)
7080 return E1000_SUCCESS;
7081
7082 /* During driver activity LPLU should not be used or it will attain link
7083 * from the lowest speeds starting from 10Mbps. The capability is used for
7084 * Dx transitions and states */
7085 if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) {
7086 ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
7087 if (ret_val)
7088 return ret_val;
7089 } else if (hw->mac_type == e1000_ich8lan) {
7090 /* MAC writes into PHY register based on the state transition
7091 * and start auto-negotiation. SW driver can overwrite the settings
7092 * in CSR PHY power control E1000_PHY_CTRL register. */
7093 phy_ctrl = er32(PHY_CTRL);
7094 } else {
7095 ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
7096 if (ret_val)
7097 return ret_val;
7098 }
7099
7100 if (!active) {
7101 if (hw->mac_type == e1000_82541_rev_2 ||
7102 hw->mac_type == e1000_82547_rev_2) {
7103 phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
7104 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
7105 if (ret_val)
7106 return ret_val;
7107 } else {
7108 if (hw->mac_type == e1000_ich8lan) {
7109 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
7110 ew32(PHY_CTRL, phy_ctrl);
7111 } else {
7112 phy_data &= ~IGP02E1000_PM_D3_LPLU;
7113 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
7114 phy_data);
7115 if (ret_val)
7116 return ret_val;
7117 }
7118 }
7119
7120 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
7121 * Dx states where the power conservation is most important. During
7122 * driver activity we should enable SmartSpeed, so performance is
7123 * maintained. */
7124 if (hw->smart_speed == e1000_smart_speed_on) {
7125 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7126 &phy_data);
7127 if (ret_val)
7128 return ret_val;
7129
7130 phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
7131 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7132 phy_data);
7133 if (ret_val)
7134 return ret_val;
7135 } else if (hw->smart_speed == e1000_smart_speed_off) {
7136 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7137 &phy_data);
7138 if (ret_val)
7139 return ret_val;
7140
7141 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
7142 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7143 phy_data);
7144 if (ret_val)
7145 return ret_val;
7146 }
7147
7148 } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
7149 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
7150 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
7151
7152 if (hw->mac_type == e1000_82541_rev_2 ||
7153 hw->mac_type == e1000_82547_rev_2) {
7154 phy_data |= IGP01E1000_GMII_FLEX_SPD;
7155 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
7156 if (ret_val)
7157 return ret_val;
7158 } else {
7159 if (hw->mac_type == e1000_ich8lan) {
7160 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
7161 ew32(PHY_CTRL, phy_ctrl);
7162 } else {
7163 phy_data |= IGP02E1000_PM_D3_LPLU;
7164 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
7165 phy_data);
7166 if (ret_val)
7167 return ret_val;
7168 }
7169 }
7170
7171 /* When LPLU is enabled we should disable SmartSpeed */
7172 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
7173 if (ret_val)
7174 return ret_val;
7175
7176 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
7177 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
7178 if (ret_val)
7179 return ret_val;
7180
7181 }
7182 return E1000_SUCCESS;
7183} 5058}
7184 5059
7185/***************************************************************************** 5060/**
7186 * 5061 * e1000_config_dsp_after_link_change
7187 * This function sets the lplu d0 state according to the active flag. When 5062 * @hw: Struct containing variables accessed by shared code
7188 * activating lplu this function also disables smart speed and vise versa. 5063 * @link_up: was link up at the time this was called
7189 * lplu will not be activated unless the device autonegotiation advertisment
7190 * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
7191 * hw: Struct containing variables accessed by shared code
7192 * active - true to enable lplu false to disable lplu.
7193 * 5064 *
7194 * returns: - E1000_ERR_PHY if fail to read/write the PHY 5065 * returns: - E1000_ERR_PHY if fail to read/write the PHY
7195 * E1000_SUCCESS at any other case. 5066 * E1000_SUCCESS at any other case.
7196 * 5067 *
7197 ****************************************************************************/ 5068 * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
7198 5069 * gigabit link is achieved to improve link quality.
7199static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) 5070 */
7200{
7201 u32 phy_ctrl = 0;
7202 s32 ret_val;
7203 u16 phy_data;
7204 DEBUGFUNC("e1000_set_d0_lplu_state");
7205
7206 if (hw->mac_type <= e1000_82547_rev_2)
7207 return E1000_SUCCESS;
7208
7209 if (hw->mac_type == e1000_ich8lan) {
7210 phy_ctrl = er32(PHY_CTRL);
7211 } else {
7212 ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
7213 if (ret_val)
7214 return ret_val;
7215 }
7216
7217 if (!active) {
7218 if (hw->mac_type == e1000_ich8lan) {
7219 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
7220 ew32(PHY_CTRL, phy_ctrl);
7221 } else {
7222 phy_data &= ~IGP02E1000_PM_D0_LPLU;
7223 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
7224 if (ret_val)
7225 return ret_val;
7226 }
7227
7228 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
7229 * Dx states where the power conservation is most important. During
7230 * driver activity we should enable SmartSpeed, so performance is
7231 * maintained. */
7232 if (hw->smart_speed == e1000_smart_speed_on) {
7233 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7234 &phy_data);
7235 if (ret_val)
7236 return ret_val;
7237
7238 phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
7239 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7240 phy_data);
7241 if (ret_val)
7242 return ret_val;
7243 } else if (hw->smart_speed == e1000_smart_speed_off) {
7244 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7245 &phy_data);
7246 if (ret_val)
7247 return ret_val;
7248
7249 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
7250 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7251 phy_data);
7252 if (ret_val)
7253 return ret_val;
7254 }
7255
7256
7257 } else {
7258
7259 if (hw->mac_type == e1000_ich8lan) {
7260 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
7261 ew32(PHY_CTRL, phy_ctrl);
7262 } else {
7263 phy_data |= IGP02E1000_PM_D0_LPLU;
7264 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
7265 if (ret_val)
7266 return ret_val;
7267 }
7268
7269 /* When LPLU is enabled we should disable SmartSpeed */
7270 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
7271 if (ret_val)
7272 return ret_val;
7273
7274 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
7275 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
7276 if (ret_val)
7277 return ret_val;
7278
7279 }
7280 return E1000_SUCCESS;
7281}
7282 5071
7283/****************************************************************************** 5072static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
7284 * Change VCO speed register to improve Bit Error Rate performance of SERDES.
7285 *
7286 * hw - Struct containing variables accessed by shared code
7287 *****************************************************************************/
7288static s32 e1000_set_vco_speed(struct e1000_hw *hw)
7289{ 5073{
7290 s32 ret_val; 5074 s32 ret_val;
7291 u16 default_page = 0; 5075 u16 phy_data, phy_saved_data, speed, duplex, i;
7292 u16 phy_data; 5076 u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
7293 5077 { IGP01E1000_PHY_AGC_PARAM_A,
7294 DEBUGFUNC("e1000_set_vco_speed"); 5078 IGP01E1000_PHY_AGC_PARAM_B,
5079 IGP01E1000_PHY_AGC_PARAM_C,
5080 IGP01E1000_PHY_AGC_PARAM_D
5081 };
5082 u16 min_length, max_length;
5083
5084 DEBUGFUNC("e1000_config_dsp_after_link_change");
5085
5086 if (hw->phy_type != e1000_phy_igp)
5087 return E1000_SUCCESS;
5088
5089 if (link_up) {
5090 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
5091 if (ret_val) {
5092 DEBUGOUT("Error getting link speed and duplex\n");
5093 return ret_val;
5094 }
7295 5095
7296 switch (hw->mac_type) { 5096 if (speed == SPEED_1000) {
7297 case e1000_82545_rev_3: 5097
7298 case e1000_82546_rev_3: 5098 ret_val =
7299 break; 5099 e1000_get_cable_length(hw, &min_length,
7300 default: 5100 &max_length);
7301 return E1000_SUCCESS; 5101 if (ret_val)
7302 } 5102 return ret_val;
5103
5104 if ((hw->dsp_config_state == e1000_dsp_config_enabled)
5105 && min_length >= e1000_igp_cable_length_50) {
5106
5107 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
5108 ret_val =
5109 e1000_read_phy_reg(hw,
5110 dsp_reg_array[i],
5111 &phy_data);
5112 if (ret_val)
5113 return ret_val;
5114
5115 phy_data &=
5116 ~IGP01E1000_PHY_EDAC_MU_INDEX;
5117
5118 ret_val =
5119 e1000_write_phy_reg(hw,
5120 dsp_reg_array
5121 [i], phy_data);
5122 if (ret_val)
5123 return ret_val;
5124 }
5125 hw->dsp_config_state =
5126 e1000_dsp_config_activated;
5127 }
5128
5129 if ((hw->ffe_config_state == e1000_ffe_config_enabled)
5130 && (min_length < e1000_igp_cable_length_50)) {
5131
5132 u16 ffe_idle_err_timeout =
5133 FFE_IDLE_ERR_COUNT_TIMEOUT_20;
5134 u32 idle_errs = 0;
5135
5136 /* clear previous idle error counts */
5137 ret_val =
5138 e1000_read_phy_reg(hw, PHY_1000T_STATUS,
5139 &phy_data);
5140 if (ret_val)
5141 return ret_val;
5142
5143 for (i = 0; i < ffe_idle_err_timeout; i++) {
5144 udelay(1000);
5145 ret_val =
5146 e1000_read_phy_reg(hw,
5147 PHY_1000T_STATUS,
5148 &phy_data);
5149 if (ret_val)
5150 return ret_val;
5151
5152 idle_errs +=
5153 (phy_data &
5154 SR_1000T_IDLE_ERROR_CNT);
5155 if (idle_errs >
5156 SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT)
5157 {
5158 hw->ffe_config_state =
5159 e1000_ffe_config_active;
5160
5161 ret_val =
5162 e1000_write_phy_reg(hw,
5163 IGP01E1000_PHY_DSP_FFE,
5164 IGP01E1000_PHY_DSP_FFE_CM_CP);
5165 if (ret_val)
5166 return ret_val;
5167 break;
5168 }
5169
5170 if (idle_errs)
5171 ffe_idle_err_timeout =
5172 FFE_IDLE_ERR_COUNT_TIMEOUT_100;
5173 }
5174 }
5175 }
5176 } else {
5177 if (hw->dsp_config_state == e1000_dsp_config_activated) {
5178 /* Save off the current value of register 0x2F5B to be restored at
5179 * the end of the routines. */
5180 ret_val =
5181 e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
5182
5183 if (ret_val)
5184 return ret_val;
5185
5186 /* Disable the PHY transmitter */
5187 ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
5188
5189 if (ret_val)
5190 return ret_val;
5191
5192 mdelay(20);
5193
5194 ret_val = e1000_write_phy_reg(hw, 0x0000,
5195 IGP01E1000_IEEE_FORCE_GIGA);
5196 if (ret_val)
5197 return ret_val;
5198 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
5199 ret_val =
5200 e1000_read_phy_reg(hw, dsp_reg_array[i],
5201 &phy_data);
5202 if (ret_val)
5203 return ret_val;
5204
5205 phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
5206 phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
5207
5208 ret_val =
5209 e1000_write_phy_reg(hw, dsp_reg_array[i],
5210 phy_data);
5211 if (ret_val)
5212 return ret_val;
5213 }
5214
5215 ret_val = e1000_write_phy_reg(hw, 0x0000,
5216 IGP01E1000_IEEE_RESTART_AUTONEG);
5217 if (ret_val)
5218 return ret_val;
5219
5220 mdelay(20);
5221
5222 /* Now enable the transmitter */
5223 ret_val =
5224 e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
5225
5226 if (ret_val)
5227 return ret_val;
5228
5229 hw->dsp_config_state = e1000_dsp_config_enabled;
5230 }
7303 5231
7304 /* Set PHY register 30, page 5, bit 8 to 0 */ 5232 if (hw->ffe_config_state == e1000_ffe_config_active) {
5233 /* Save off the current value of register 0x2F5B to be restored at
5234 * the end of the routines. */
5235 ret_val =
5236 e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
7305 5237
7306 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); 5238 if (ret_val)
7307 if (ret_val) 5239 return ret_val;
7308 return ret_val;
7309 5240
7310 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); 5241 /* Disable the PHY transmitter */
7311 if (ret_val) 5242 ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
7312 return ret_val;
7313 5243
7314 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); 5244 if (ret_val)
7315 if (ret_val) 5245 return ret_val;
7316 return ret_val;
7317 5246
7318 phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; 5247 mdelay(20);
7319 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
7320 if (ret_val)
7321 return ret_val;
7322 5248
7323 /* Set PHY register 30, page 4, bit 11 to 1 */ 5249 ret_val = e1000_write_phy_reg(hw, 0x0000,
5250 IGP01E1000_IEEE_FORCE_GIGA);
5251 if (ret_val)
5252 return ret_val;
5253 ret_val =
5254 e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
5255 IGP01E1000_PHY_DSP_FFE_DEFAULT);
5256 if (ret_val)
5257 return ret_val;
7324 5258
7325 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); 5259 ret_val = e1000_write_phy_reg(hw, 0x0000,
7326 if (ret_val) 5260 IGP01E1000_IEEE_RESTART_AUTONEG);
7327 return ret_val; 5261 if (ret_val)
5262 return ret_val;
7328 5263
7329 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); 5264 mdelay(20);
7330 if (ret_val)
7331 return ret_val;
7332 5265
7333 phy_data |= M88E1000_PHY_VCO_REG_BIT11; 5266 /* Now enable the transmitter */
7334 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); 5267 ret_val =
7335 if (ret_val) 5268 e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
7336 return ret_val;
7337 5269
7338 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); 5270 if (ret_val)
7339 if (ret_val) 5271 return ret_val;
7340 return ret_val;
7341 5272
7342 return E1000_SUCCESS; 5273 hw->ffe_config_state = e1000_ffe_config_enabled;
5274 }
5275 }
5276 return E1000_SUCCESS;
7343} 5277}
7344 5278
7345 5279/**
7346/***************************************************************************** 5280 * e1000_set_phy_mode - Set PHY to class A mode
7347 * This function reads the cookie from ARC ram. 5281 * @hw: Struct containing variables accessed by shared code
7348 * 5282 *
7349 * returns: - E1000_SUCCESS . 5283 * Assumes the following operations will follow to enable the new class mode.
7350 ****************************************************************************/ 5284 * 1. Do a PHY soft reset
7351static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer) 5285 * 2. Restart auto-negotiation or force link.
5286 */
5287static s32 e1000_set_phy_mode(struct e1000_hw *hw)
7352{ 5288{
7353 u8 i; 5289 s32 ret_val;
7354 u32 offset = E1000_MNG_DHCP_COOKIE_OFFSET; 5290 u16 eeprom_data;
7355 u8 length = E1000_MNG_DHCP_COOKIE_LENGTH;
7356
7357 length = (length >> 2);
7358 offset = (offset >> 2);
7359
7360 for (i = 0; i < length; i++) {
7361 *((u32 *)buffer + i) =
7362 E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i);
7363 }
7364 return E1000_SUCCESS;
7365}
7366 5291
5292 DEBUGFUNC("e1000_set_phy_mode");
7367 5293
7368/***************************************************************************** 5294 if ((hw->mac_type == e1000_82545_rev_3) &&
7369 * This function checks whether the HOST IF is enabled for command operaton 5295 (hw->media_type == e1000_media_type_copper)) {
7370 * and also checks whether the previous command is completed. 5296 ret_val =
7371 * It busy waits in case of previous command is not completed. 5297 e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1,
7372 * 5298 &eeprom_data);
7373 * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or 5299 if (ret_val) {
7374 * timeout 5300 return ret_val;
7375 * - E1000_SUCCESS for success. 5301 }
7376 ****************************************************************************/
7377static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
7378{
7379 u32 hicr;
7380 u8 i;
7381
7382 /* Check that the host interface is enabled. */
7383 hicr = er32(HICR);
7384 if ((hicr & E1000_HICR_EN) == 0) {
7385 DEBUGOUT("E1000_HOST_EN bit disabled.\n");
7386 return -E1000_ERR_HOST_INTERFACE_COMMAND;
7387 }
7388 /* check the previous command is completed */
7389 for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
7390 hicr = er32(HICR);
7391 if (!(hicr & E1000_HICR_C))
7392 break;
7393 mdelay(1);
7394 }
7395
7396 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
7397 DEBUGOUT("Previous command timeout failed .\n");
7398 return -E1000_ERR_HOST_INTERFACE_COMMAND;
7399 }
7400 return E1000_SUCCESS;
7401}
7402 5302
7403/***************************************************************************** 5303 if ((eeprom_data != EEPROM_RESERVED_WORD) &&
7404 * This function writes the buffer content at the offset given on the host if. 5304 (eeprom_data & EEPROM_PHY_CLASS_A)) {
7405 * It also does alignment considerations to do the writes in most efficient way. 5305 ret_val =
7406 * Also fills up the sum of the buffer in *buffer parameter. 5306 e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT,
7407 * 5307 0x000B);
7408 * returns - E1000_SUCCESS for success. 5308 if (ret_val)
7409 ****************************************************************************/ 5309 return ret_val;
7410static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, 5310 ret_val =
7411 u16 offset, u8 *sum) 5311 e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL,
7412{ 5312 0x8104);
7413 u8 *tmp; 5313 if (ret_val)
7414 u8 *bufptr = buffer; 5314 return ret_val;
7415 u32 data = 0; 5315
7416 u16 remaining, i, j, prev_bytes; 5316 hw->phy_reset_disable = false;
7417 5317 }
7418 /* sum = only sum of the data and it is not checksum */ 5318 }
7419
7420 if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
7421 return -E1000_ERR_PARAM;
7422 }
7423
7424 tmp = (u8 *)&data;
7425 prev_bytes = offset & 0x3;
7426 offset &= 0xFFFC;
7427 offset >>= 2;
7428
7429 if (prev_bytes) {
7430 data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset);
7431 for (j = prev_bytes; j < sizeof(u32); j++) {
7432 *(tmp + j) = *bufptr++;
7433 *sum += *(tmp + j);
7434 }
7435 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset, data);
7436 length -= j - prev_bytes;
7437 offset++;
7438 }
7439
7440 remaining = length & 0x3;
7441 length -= remaining;
7442
7443 /* Calculate length in DWORDs */
7444 length >>= 2;
7445
7446 /* The device driver writes the relevant command block into the
7447 * ram area. */
7448 for (i = 0; i < length; i++) {
7449 for (j = 0; j < sizeof(u32); j++) {
7450 *(tmp + j) = *bufptr++;
7451 *sum += *(tmp + j);
7452 }
7453
7454 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
7455 }
7456 if (remaining) {
7457 for (j = 0; j < sizeof(u32); j++) {
7458 if (j < remaining)
7459 *(tmp + j) = *bufptr++;
7460 else
7461 *(tmp + j) = 0;
7462
7463 *sum += *(tmp + j);
7464 }
7465 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
7466 }
7467
7468 return E1000_SUCCESS;
7469}
7470 5319
5320 return E1000_SUCCESS;
5321}
7471 5322
7472/***************************************************************************** 5323/**
7473 * This function writes the command header after does the checksum calculation. 5324 * e1000_set_d3_lplu_state - set d3 link power state
5325 * @hw: Struct containing variables accessed by shared code
5326 * @active: true to enable lplu false to disable lplu.
5327 *
5328 * This function sets the lplu state according to the active flag. When
5329 * activating lplu this function also disables smart speed and vise versa.
5330 * lplu will not be activated unless the device autonegotiation advertisement
5331 * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
7474 * 5332 *
7475 * returns - E1000_SUCCESS for success. 5333 * returns: - E1000_ERR_PHY if fail to read/write the PHY
7476 ****************************************************************************/ 5334 * E1000_SUCCESS at any other case.
7477static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, 5335 */
7478 struct e1000_host_mng_command_header *hdr) 5336static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
7479{ 5337{
7480 u16 i; 5338 s32 ret_val;
7481 u8 sum; 5339 u16 phy_data;
7482 u8 *buffer; 5340 DEBUGFUNC("e1000_set_d3_lplu_state");
7483 5341
7484 /* Write the whole command header structure which includes sum of 5342 if (hw->phy_type != e1000_phy_igp)
7485 * the buffer */ 5343 return E1000_SUCCESS;
7486 5344
7487 u16 length = sizeof(struct e1000_host_mng_command_header); 5345 /* During driver activity LPLU should not be used or it will attain link
5346 * from the lowest speeds starting from 10Mbps. The capability is used for
5347 * Dx transitions and states */
5348 if (hw->mac_type == e1000_82541_rev_2
5349 || hw->mac_type == e1000_82547_rev_2) {
5350 ret_val =
5351 e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
5352 if (ret_val)
5353 return ret_val;
5354 }
7488 5355
7489 sum = hdr->checksum; 5356 if (!active) {
7490 hdr->checksum = 0; 5357 if (hw->mac_type == e1000_82541_rev_2 ||
5358 hw->mac_type == e1000_82547_rev_2) {
5359 phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
5360 ret_val =
5361 e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
5362 phy_data);
5363 if (ret_val)
5364 return ret_val;
5365 }
7491 5366
7492 buffer = (u8 *)hdr; 5367 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
7493 i = length; 5368 * Dx states where the power conservation is most important. During
7494 while (i--) 5369 * driver activity we should enable SmartSpeed, so performance is
7495 sum += buffer[i]; 5370 * maintained. */
5371 if (hw->smart_speed == e1000_smart_speed_on) {
5372 ret_val =
5373 e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
5374 &phy_data);
5375 if (ret_val)
5376 return ret_val;
5377
5378 phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
5379 ret_val =
5380 e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
5381 phy_data);
5382 if (ret_val)
5383 return ret_val;
5384 } else if (hw->smart_speed == e1000_smart_speed_off) {
5385 ret_val =
5386 e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
5387 &phy_data);
5388 if (ret_val)
5389 return ret_val;
5390
5391 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
5392 ret_val =
5393 e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
5394 phy_data);
5395 if (ret_val)
5396 return ret_val;
5397 }
5398 } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT)
5399 || (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL)
5400 || (hw->autoneg_advertised ==
5401 AUTONEG_ADVERTISE_10_100_ALL)) {
5402
5403 if (hw->mac_type == e1000_82541_rev_2 ||
5404 hw->mac_type == e1000_82547_rev_2) {
5405 phy_data |= IGP01E1000_GMII_FLEX_SPD;
5406 ret_val =
5407 e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
5408 phy_data);
5409 if (ret_val)
5410 return ret_val;
5411 }
7496 5412
7497 hdr->checksum = 0 - sum; 5413 /* When LPLU is enabled we should disable SmartSpeed */
5414 ret_val =
5415 e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
5416 &phy_data);
5417 if (ret_val)
5418 return ret_val;
7498 5419
7499 length >>= 2; 5420 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
7500 /* The device driver writes the relevant command block into the ram area. */ 5421 ret_val =
7501 for (i = 0; i < length; i++) { 5422 e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7502 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *)hdr + i)); 5423 phy_data);
7503 E1000_WRITE_FLUSH(); 5424 if (ret_val)
7504 } 5425 return ret_val;
7505 5426
7506 return E1000_SUCCESS; 5427 }
5428 return E1000_SUCCESS;
7507} 5429}
7508 5430
7509 5431/**
7510/***************************************************************************** 5432 * e1000_set_vco_speed
7511 * This function indicates to ARC that a new command is pending which completes 5433 * @hw: Struct containing variables accessed by shared code
7512 * one write operation by the driver.
7513 * 5434 *
7514 * returns - E1000_SUCCESS for success. 5435 * Change VCO speed register to improve Bit Error Rate performance of SERDES.
7515 ****************************************************************************/ 5436 */
7516static s32 e1000_mng_write_commit(struct e1000_hw *hw) 5437static s32 e1000_set_vco_speed(struct e1000_hw *hw)
7517{ 5438{
7518 u32 hicr; 5439 s32 ret_val;
5440 u16 default_page = 0;
5441 u16 phy_data;
7519 5442
7520 hicr = er32(HICR); 5443 DEBUGFUNC("e1000_set_vco_speed");
7521 /* Setting this bit tells the ARC that a new command is pending. */
7522 ew32(HICR, hicr | E1000_HICR_C);
7523 5444
7524 return E1000_SUCCESS; 5445 switch (hw->mac_type) {
7525} 5446 case e1000_82545_rev_3:
5447 case e1000_82546_rev_3:
5448 break;
5449 default:
5450 return E1000_SUCCESS;
5451 }
7526 5452
5453 /* Set PHY register 30, page 5, bit 8 to 0 */
7527 5454
7528/***************************************************************************** 5455 ret_val =
7529 * This function checks the mode of the firmware. 5456 e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
7530 * 5457 if (ret_val)
7531 * returns - true when the mode is IAMT or false. 5458 return ret_val;
7532 ****************************************************************************/
7533bool e1000_check_mng_mode(struct e1000_hw *hw)
7534{
7535 u32 fwsm;
7536
7537 fwsm = er32(FWSM);
7538 5459
7539 if (hw->mac_type == e1000_ich8lan) { 5460 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
7540 if ((fwsm & E1000_FWSM_MODE_MASK) == 5461 if (ret_val)
7541 (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) 5462 return ret_val;
7542 return true;
7543 } else if ((fwsm & E1000_FWSM_MODE_MASK) ==
7544 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
7545 return true;
7546 5463
7547 return false; 5464 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
7548} 5465 if (ret_val)
5466 return ret_val;
7549 5467
5468 phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
5469 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
5470 if (ret_val)
5471 return ret_val;
7550 5472
7551/***************************************************************************** 5473 /* Set PHY register 30, page 4, bit 11 to 1 */
7552 * This function writes the dhcp info .
7553 ****************************************************************************/
7554s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
7555{
7556 s32 ret_val;
7557 struct e1000_host_mng_command_header hdr;
7558
7559 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
7560 hdr.command_length = length;
7561 hdr.reserved1 = 0;
7562 hdr.reserved2 = 0;
7563 hdr.checksum = 0;
7564
7565 ret_val = e1000_mng_enable_host_if(hw);
7566 if (ret_val == E1000_SUCCESS) {
7567 ret_val = e1000_mng_host_if_write(hw, buffer, length, sizeof(hdr),
7568 &(hdr.checksum));
7569 if (ret_val == E1000_SUCCESS) {
7570 ret_val = e1000_mng_write_cmd_header(hw, &hdr);
7571 if (ret_val == E1000_SUCCESS)
7572 ret_val = e1000_mng_write_commit(hw);
7573 }
7574 }
7575 return ret_val;
7576}
7577 5474
5475 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
5476 if (ret_val)
5477 return ret_val;
7578 5478
7579/***************************************************************************** 5479 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
7580 * This function calculates the checksum. 5480 if (ret_val)
7581 * 5481 return ret_val;
7582 * returns - checksum of buffer contents.
7583 ****************************************************************************/
7584static u8 e1000_calculate_mng_checksum(char *buffer, u32 length)
7585{
7586 u8 sum = 0;
7587 u32 i;
7588 5482
7589 if (!buffer) 5483 phy_data |= M88E1000_PHY_VCO_REG_BIT11;
7590 return 0; 5484 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
5485 if (ret_val)
5486 return ret_val;
7591 5487
7592 for (i=0; i < length; i++) 5488 ret_val =
7593 sum += buffer[i]; 5489 e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
5490 if (ret_val)
5491 return ret_val;
7594 5492
7595 return (u8)(0 - sum); 5493 return E1000_SUCCESS;
7596} 5494}
7597 5495
7598/*****************************************************************************
7599 * This function checks whether tx pkt filtering needs to be enabled or not.
7600 *
7601 * returns - true for packet filtering or false.
7602 ****************************************************************************/
7603bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
7604{
7605 /* called in init as well as watchdog timer functions */
7606
7607 s32 ret_val, checksum;
7608 bool tx_filter = false;
7609 struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie);
7610 u8 *buffer = (u8 *) &(hw->mng_cookie);
7611
7612 if (e1000_check_mng_mode(hw)) {
7613 ret_val = e1000_mng_enable_host_if(hw);
7614 if (ret_val == E1000_SUCCESS) {
7615 ret_val = e1000_host_if_read_cookie(hw, buffer);
7616 if (ret_val == E1000_SUCCESS) {
7617 checksum = hdr->checksum;
7618 hdr->checksum = 0;
7619 if ((hdr->signature == E1000_IAMT_SIGNATURE) &&
7620 checksum == e1000_calculate_mng_checksum((char *)buffer,
7621 E1000_MNG_DHCP_COOKIE_LENGTH)) {
7622 if (hdr->status &
7623 E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT)
7624 tx_filter = true;
7625 } else
7626 tx_filter = true;
7627 } else
7628 tx_filter = true;
7629 }
7630 }
7631
7632 hw->tx_pkt_filtering = tx_filter;
7633 return tx_filter;
7634}
7635 5496
7636/****************************************************************************** 5497/**
7637 * Verifies the hardware needs to allow ARPs to be processed by the host 5498 * e1000_enable_mng_pass_thru - check for bmc pass through
7638 * 5499 * @hw: Struct containing variables accessed by shared code
7639 * hw - Struct containing variables accessed by shared code
7640 * 5500 *
5501 * Verifies the hardware needs to allow ARPs to be processed by the host
7641 * returns: - true/false 5502 * returns: - true/false
7642 * 5503 */
7643 *****************************************************************************/
7644u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw) 5504u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw)
7645{ 5505{
7646 u32 manc; 5506 u32 manc;
7647 u32 fwsm, factps;
7648
7649 if (hw->asf_firmware_present) {
7650 manc = er32(MANC);
7651
7652 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
7653 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
7654 return false;
7655 if (e1000_arc_subsystem_valid(hw)) {
7656 fwsm = er32(FWSM);
7657 factps = er32(FACTPS);
7658
7659 if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) ==
7660 e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG))
7661 return true;
7662 } else
7663 if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
7664 return true;
7665 }
7666 return false;
7667}
7668 5507
7669static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw) 5508 if (hw->asf_firmware_present) {
7670{ 5509 manc = er32(MANC);
7671 s32 ret_val; 5510
7672 u16 mii_status_reg; 5511 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
7673 u16 i; 5512 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
7674 5513 return false;
7675 /* Polarity reversal workaround for forced 10F/10H links. */ 5514 if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
7676 5515 return true;
7677 /* Disable the transmitter on the PHY */ 5516 }
7678 5517 return false;
7679 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
7680 if (ret_val)
7681 return ret_val;
7682 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
7683 if (ret_val)
7684 return ret_val;
7685
7686 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
7687 if (ret_val)
7688 return ret_val;
7689
7690 /* This loop will early-out if the NO link condition has been met. */
7691 for (i = PHY_FORCE_TIME; i > 0; i--) {
7692 /* Read the MII Status Register and wait for Link Status bit
7693 * to be clear.
7694 */
7695
7696 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
7697 if (ret_val)
7698 return ret_val;
7699
7700 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
7701 if (ret_val)
7702 return ret_val;
7703
7704 if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break;
7705 mdelay(100);
7706 }
7707
7708 /* Recommended delay time after link has been lost */
7709 mdelay(1000);
7710
7711 /* Now we will re-enable th transmitter on the PHY */
7712
7713 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
7714 if (ret_val)
7715 return ret_val;
7716 mdelay(50);
7717 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
7718 if (ret_val)
7719 return ret_val;
7720 mdelay(50);
7721 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
7722 if (ret_val)
7723 return ret_val;
7724 mdelay(50);
7725 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
7726 if (ret_val)
7727 return ret_val;
7728
7729 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
7730 if (ret_val)
7731 return ret_val;
7732
7733 /* This loop will early-out if the link condition has been met. */
7734 for (i = PHY_FORCE_TIME; i > 0; i--) {
7735 /* Read the MII Status Register and wait for Link Status bit
7736 * to be set.
7737 */
7738
7739 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
7740 if (ret_val)
7741 return ret_val;
7742
7743 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
7744 if (ret_val)
7745 return ret_val;
7746
7747 if (mii_status_reg & MII_SR_LINK_STATUS) break;
7748 mdelay(100);
7749 }
7750 return E1000_SUCCESS;
7751} 5518}
7752 5519
7753/*************************************************************************** 5520static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
7754 *
7755 * Disables PCI-Express master access.
7756 *
7757 * hw: Struct containing variables accessed by shared code
7758 *
7759 * returns: - none.
7760 *
7761 ***************************************************************************/
7762static void e1000_set_pci_express_master_disable(struct e1000_hw *hw)
7763{ 5521{
7764 u32 ctrl; 5522 s32 ret_val;
5523 u16 mii_status_reg;
5524 u16 i;
7765 5525
7766 DEBUGFUNC("e1000_set_pci_express_master_disable"); 5526 /* Polarity reversal workaround for forced 10F/10H links. */
7767 5527
7768 if (hw->bus_type != e1000_bus_type_pci_express) 5528 /* Disable the transmitter on the PHY */
7769 return;
7770 5529
7771 ctrl = er32(CTRL); 5530 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
7772 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; 5531 if (ret_val)
7773 ew32(CTRL, ctrl); 5532 return ret_val;
7774} 5533 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
5534 if (ret_val)
5535 return ret_val;
7775 5536
7776/******************************************************************************* 5537 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
7777 * 5538 if (ret_val)
7778 * Disables PCI-Express master access and verifies there are no pending requests 5539 return ret_val;
7779 *
7780 * hw: Struct containing variables accessed by shared code
7781 *
7782 * returns: - E1000_ERR_MASTER_REQUESTS_PENDING if master disable bit hasn't
7783 * caused the master requests to be disabled.
7784 * E1000_SUCCESS master requests disabled.
7785 *
7786 ******************************************************************************/
7787s32 e1000_disable_pciex_master(struct e1000_hw *hw)
7788{
7789 s32 timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */
7790 5540
7791 DEBUGFUNC("e1000_disable_pciex_master"); 5541 /* This loop will early-out if the NO link condition has been met. */
5542 for (i = PHY_FORCE_TIME; i > 0; i--) {
5543 /* Read the MII Status Register and wait for Link Status bit
5544 * to be clear.
5545 */
7792 5546
7793 if (hw->bus_type != e1000_bus_type_pci_express) 5547 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
7794 return E1000_SUCCESS; 5548 if (ret_val)
5549 return ret_val;
7795 5550
7796 e1000_set_pci_express_master_disable(hw); 5551 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
5552 if (ret_val)
5553 return ret_val;
7797 5554
7798 while (timeout) { 5555 if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0)
7799 if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) 5556 break;
7800 break; 5557 mdelay(100);
7801 else 5558 }
7802 udelay(100);
7803 timeout--;
7804 }
7805
7806 if (!timeout) {
7807 DEBUGOUT("Master requests are pending.\n");
7808 return -E1000_ERR_MASTER_REQUESTS_PENDING;
7809 }
7810 5559
7811 return E1000_SUCCESS; 5560 /* Recommended delay time after link has been lost */
5561 mdelay(1000);
5562
5563 /* Now we will re-enable th transmitter on the PHY */
5564
5565 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
5566 if (ret_val)
5567 return ret_val;
5568 mdelay(50);
5569 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
5570 if (ret_val)
5571 return ret_val;
5572 mdelay(50);
5573 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
5574 if (ret_val)
5575 return ret_val;
5576 mdelay(50);
5577 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
5578 if (ret_val)
5579 return ret_val;
5580
5581 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
5582 if (ret_val)
5583 return ret_val;
5584
5585 /* This loop will early-out if the link condition has been met. */
5586 for (i = PHY_FORCE_TIME; i > 0; i--) {
5587 /* Read the MII Status Register and wait for Link Status bit
5588 * to be set.
5589 */
5590
5591 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
5592 if (ret_val)
5593 return ret_val;
5594
5595 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
5596 if (ret_val)
5597 return ret_val;
5598
5599 if (mii_status_reg & MII_SR_LINK_STATUS)
5600 break;
5601 mdelay(100);
5602 }
5603 return E1000_SUCCESS;
7812} 5604}
7813 5605
7814/******************************************************************************* 5606/**
5607 * e1000_get_auto_rd_done
5608 * @hw: Struct containing variables accessed by shared code
7815 * 5609 *
7816 * Check for EEPROM Auto Read bit done. 5610 * Check for EEPROM Auto Read bit done.
7817 *
7818 * hw: Struct containing variables accessed by shared code
7819 *
7820 * returns: - E1000_ERR_RESET if fail to reset MAC 5611 * returns: - E1000_ERR_RESET if fail to reset MAC
7821 * E1000_SUCCESS at any other case. 5612 * E1000_SUCCESS at any other case.
7822 * 5613 */
7823 ******************************************************************************/
7824static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) 5614static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
7825{ 5615{
7826 s32 timeout = AUTO_READ_DONE_TIMEOUT; 5616 DEBUGFUNC("e1000_get_auto_rd_done");
7827 5617 msleep(5);
7828 DEBUGFUNC("e1000_get_auto_rd_done"); 5618 return E1000_SUCCESS;
7829
7830 switch (hw->mac_type) {
7831 default:
7832 msleep(5);
7833 break;
7834 case e1000_82571:
7835 case e1000_82572:
7836 case e1000_82573:
7837 case e1000_80003es2lan:
7838 case e1000_ich8lan:
7839 while (timeout) {
7840 if (er32(EECD) & E1000_EECD_AUTO_RD)
7841 break;
7842 else msleep(1);
7843 timeout--;
7844 }
7845
7846 if (!timeout) {
7847 DEBUGOUT("Auto read by HW from EEPROM has not completed.\n");
7848 return -E1000_ERR_RESET;
7849 }
7850 break;
7851 }
7852
7853 /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
7854 * Need to wait for PHY configuration completion before accessing NVM
7855 * and PHY. */
7856 if (hw->mac_type == e1000_82573)
7857 msleep(25);
7858
7859 return E1000_SUCCESS;
7860} 5619}
7861 5620
7862/*************************************************************************** 5621/**
7863 * Checks if the PHY configuration is done 5622 * e1000_get_phy_cfg_done
7864 * 5623 * @hw: Struct containing variables accessed by shared code
7865 * hw: Struct containing variables accessed by shared code
7866 * 5624 *
5625 * Checks if the PHY configuration is done
7867 * returns: - E1000_ERR_RESET if fail to reset MAC 5626 * returns: - E1000_ERR_RESET if fail to reset MAC
7868 * E1000_SUCCESS at any other case. 5627 * E1000_SUCCESS at any other case.
7869 * 5628 */
7870 ***************************************************************************/
7871static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) 5629static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
7872{ 5630{
7873 s32 timeout = PHY_CFG_TIMEOUT; 5631 DEBUGFUNC("e1000_get_phy_cfg_done");
7874 u32 cfg_mask = E1000_EEPROM_CFG_DONE; 5632 mdelay(10);
7875 5633 return E1000_SUCCESS;
7876 DEBUGFUNC("e1000_get_phy_cfg_done");
7877
7878 switch (hw->mac_type) {
7879 default:
7880 mdelay(10);
7881 break;
7882 case e1000_80003es2lan:
7883 /* Separate *_CFG_DONE_* bit for each port */
7884 if (er32(STATUS) & E1000_STATUS_FUNC_1)
7885 cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1;
7886 /* Fall Through */
7887 case e1000_82571:
7888 case e1000_82572:
7889 while (timeout) {
7890 if (er32(EEMNGCTL) & cfg_mask)
7891 break;
7892 else
7893 msleep(1);
7894 timeout--;
7895 }
7896 if (!timeout) {
7897 DEBUGOUT("MNG configuration cycle has not completed.\n");
7898 return -E1000_ERR_RESET;
7899 }
7900 break;
7901 }
7902
7903 return E1000_SUCCESS;
7904}
7905
7906/***************************************************************************
7907 *
7908 * Using the combination of SMBI and SWESMBI semaphore bits when resetting
7909 * adapter or Eeprom access.
7910 *
7911 * hw: Struct containing variables accessed by shared code
7912 *
7913 * returns: - E1000_ERR_EEPROM if fail to access EEPROM.
7914 * E1000_SUCCESS at any other case.
7915 *
7916 ***************************************************************************/
7917static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
7918{
7919 s32 timeout;
7920 u32 swsm;
7921
7922 DEBUGFUNC("e1000_get_hw_eeprom_semaphore");
7923
7924 if (!hw->eeprom_semaphore_present)
7925 return E1000_SUCCESS;
7926
7927 if (hw->mac_type == e1000_80003es2lan) {
7928 /* Get the SW semaphore. */
7929 if (e1000_get_software_semaphore(hw) != E1000_SUCCESS)
7930 return -E1000_ERR_EEPROM;
7931 }
7932
7933 /* Get the FW semaphore. */
7934 timeout = hw->eeprom.word_size + 1;
7935 while (timeout) {
7936 swsm = er32(SWSM);
7937 swsm |= E1000_SWSM_SWESMBI;
7938 ew32(SWSM, swsm);
7939 /* if we managed to set the bit we got the semaphore. */
7940 swsm = er32(SWSM);
7941 if (swsm & E1000_SWSM_SWESMBI)
7942 break;
7943
7944 udelay(50);
7945 timeout--;
7946 }
7947
7948 if (!timeout) {
7949 /* Release semaphores */
7950 e1000_put_hw_eeprom_semaphore(hw);
7951 DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n");
7952 return -E1000_ERR_EEPROM;
7953 }
7954
7955 return E1000_SUCCESS;
7956}
7957
7958/***************************************************************************
7959 * This function clears HW semaphore bits.
7960 *
7961 * hw: Struct containing variables accessed by shared code
7962 *
7963 * returns: - None.
7964 *
7965 ***************************************************************************/
7966static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
7967{
7968 u32 swsm;
7969
7970 DEBUGFUNC("e1000_put_hw_eeprom_semaphore");
7971
7972 if (!hw->eeprom_semaphore_present)
7973 return;
7974
7975 swsm = er32(SWSM);
7976 if (hw->mac_type == e1000_80003es2lan) {
7977 /* Release both semaphores. */
7978 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
7979 } else
7980 swsm &= ~(E1000_SWSM_SWESMBI);
7981 ew32(SWSM, swsm);
7982}
7983
7984/***************************************************************************
7985 *
7986 * Obtaining software semaphore bit (SMBI) before resetting PHY.
7987 *
7988 * hw: Struct containing variables accessed by shared code
7989 *
7990 * returns: - E1000_ERR_RESET if fail to obtain semaphore.
7991 * E1000_SUCCESS at any other case.
7992 *
7993 ***************************************************************************/
7994static s32 e1000_get_software_semaphore(struct e1000_hw *hw)
7995{
7996 s32 timeout = hw->eeprom.word_size + 1;
7997 u32 swsm;
7998
7999 DEBUGFUNC("e1000_get_software_semaphore");
8000
8001 if (hw->mac_type != e1000_80003es2lan) {
8002 return E1000_SUCCESS;
8003 }
8004
8005 while (timeout) {
8006 swsm = er32(SWSM);
8007 /* If SMBI bit cleared, it is now set and we hold the semaphore */
8008 if (!(swsm & E1000_SWSM_SMBI))
8009 break;
8010 mdelay(1);
8011 timeout--;
8012 }
8013
8014 if (!timeout) {
8015 DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
8016 return -E1000_ERR_RESET;
8017 }
8018
8019 return E1000_SUCCESS;
8020}
8021
8022/***************************************************************************
8023 *
8024 * Release semaphore bit (SMBI).
8025 *
8026 * hw: Struct containing variables accessed by shared code
8027 *
8028 ***************************************************************************/
8029static void e1000_release_software_semaphore(struct e1000_hw *hw)
8030{
8031 u32 swsm;
8032
8033 DEBUGFUNC("e1000_release_software_semaphore");
8034
8035 if (hw->mac_type != e1000_80003es2lan) {
8036 return;
8037 }
8038
8039 swsm = er32(SWSM);
8040 /* Release the SW semaphores.*/
8041 swsm &= ~E1000_SWSM_SMBI;
8042 ew32(SWSM, swsm);
8043}
8044
8045/******************************************************************************
8046 * Checks if PHY reset is blocked due to SOL/IDER session, for example.
8047 * Returning E1000_BLK_PHY_RESET isn't necessarily an error. But it's up to
8048 * the caller to figure out how to deal with it.
8049 *
8050 * hw - Struct containing variables accessed by shared code
8051 *
8052 * returns: - E1000_BLK_PHY_RESET
8053 * E1000_SUCCESS
8054 *
8055 *****************************************************************************/
8056s32 e1000_check_phy_reset_block(struct e1000_hw *hw)
8057{
8058 u32 manc = 0;
8059 u32 fwsm = 0;
8060
8061 if (hw->mac_type == e1000_ich8lan) {
8062 fwsm = er32(FWSM);
8063 return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS
8064 : E1000_BLK_PHY_RESET;
8065 }
8066
8067 if (hw->mac_type > e1000_82547_rev_2)
8068 manc = er32(MANC);
8069 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
8070 E1000_BLK_PHY_RESET : E1000_SUCCESS;
8071}
8072
8073static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw)
8074{
8075 u32 fwsm;
8076
8077 /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC
8078 * may not be provided a DMA clock when no manageability features are
8079 * enabled. We do not want to perform any reads/writes to these registers
8080 * if this is the case. We read FWSM to determine the manageability mode.
8081 */
8082 switch (hw->mac_type) {
8083 case e1000_82571:
8084 case e1000_82572:
8085 case e1000_82573:
8086 case e1000_80003es2lan:
8087 fwsm = er32(FWSM);
8088 if ((fwsm & E1000_FWSM_MODE_MASK) != 0)
8089 return true;
8090 break;
8091 case e1000_ich8lan:
8092 return true;
8093 default:
8094 break;
8095 }
8096 return false;
8097}
8098
8099
8100/******************************************************************************
8101 * Configure PCI-Ex no-snoop
8102 *
8103 * hw - Struct containing variables accessed by shared code.
8104 * no_snoop - Bitmap of no-snoop events.
8105 *
8106 * returns: E1000_SUCCESS
8107 *
8108 *****************************************************************************/
8109static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop)
8110{
8111 u32 gcr_reg = 0;
8112
8113 DEBUGFUNC("e1000_set_pci_ex_no_snoop");
8114
8115 if (hw->bus_type == e1000_bus_type_unknown)
8116 e1000_get_bus_info(hw);
8117
8118 if (hw->bus_type != e1000_bus_type_pci_express)
8119 return E1000_SUCCESS;
8120
8121 if (no_snoop) {
8122 gcr_reg = er32(GCR);
8123 gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL);
8124 gcr_reg |= no_snoop;
8125 ew32(GCR, gcr_reg);
8126 }
8127 if (hw->mac_type == e1000_ich8lan) {
8128 u32 ctrl_ext;
8129
8130 ew32(GCR, PCI_EX_82566_SNOOP_ALL);
8131
8132 ctrl_ext = er32(CTRL_EXT);
8133 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
8134 ew32(CTRL_EXT, ctrl_ext);
8135 }
8136
8137 return E1000_SUCCESS;
8138}
8139
8140/***************************************************************************
8141 *
8142 * Get software semaphore FLAG bit (SWFLAG).
8143 * SWFLAG is used to synchronize the access to all shared resource between
8144 * SW, FW and HW.
8145 *
8146 * hw: Struct containing variables accessed by shared code
8147 *
8148 ***************************************************************************/
8149static s32 e1000_get_software_flag(struct e1000_hw *hw)
8150{
8151 s32 timeout = PHY_CFG_TIMEOUT;
8152 u32 extcnf_ctrl;
8153
8154 DEBUGFUNC("e1000_get_software_flag");
8155
8156 if (hw->mac_type == e1000_ich8lan) {
8157 while (timeout) {
8158 extcnf_ctrl = er32(EXTCNF_CTRL);
8159 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8160 ew32(EXTCNF_CTRL, extcnf_ctrl);
8161
8162 extcnf_ctrl = er32(EXTCNF_CTRL);
8163 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8164 break;
8165 mdelay(1);
8166 timeout--;
8167 }
8168
8169 if (!timeout) {
8170 DEBUGOUT("FW or HW locks the resource too long.\n");
8171 return -E1000_ERR_CONFIG;
8172 }
8173 }
8174
8175 return E1000_SUCCESS;
8176}
8177
8178/***************************************************************************
8179 *
8180 * Release software semaphore FLAG bit (SWFLAG).
8181 * SWFLAG is used to synchronize the access to all shared resource between
8182 * SW, FW and HW.
8183 *
8184 * hw: Struct containing variables accessed by shared code
8185 *
8186 ***************************************************************************/
8187static void e1000_release_software_flag(struct e1000_hw *hw)
8188{
8189 u32 extcnf_ctrl;
8190
8191 DEBUGFUNC("e1000_release_software_flag");
8192
8193 if (hw->mac_type == e1000_ich8lan) {
8194 extcnf_ctrl= er32(EXTCNF_CTRL);
8195 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8196 ew32(EXTCNF_CTRL, extcnf_ctrl);
8197 }
8198
8199 return;
8200}
8201
8202/******************************************************************************
8203 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8204 * register.
8205 *
8206 * hw - Struct containing variables accessed by shared code
8207 * offset - offset of word in the EEPROM to read
8208 * data - word read from the EEPROM
8209 * words - number of words to read
8210 *****************************************************************************/
8211static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
8212 u16 *data)
8213{
8214 s32 error = E1000_SUCCESS;
8215 u32 flash_bank = 0;
8216 u32 act_offset = 0;
8217 u32 bank_offset = 0;
8218 u16 word = 0;
8219 u16 i = 0;
8220
8221 /* We need to know which is the valid flash bank. In the event
8222 * that we didn't allocate eeprom_shadow_ram, we may not be
8223 * managing flash_bank. So it cannot be trusted and needs
8224 * to be updated with each read.
8225 */
8226 /* Value of bit 22 corresponds to the flash bank we're on. */
8227 flash_bank = (er32(EECD) & E1000_EECD_SEC1VAL) ? 1 : 0;
8228
8229 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
8230 bank_offset = flash_bank * (hw->flash_bank_size * 2);
8231
8232 error = e1000_get_software_flag(hw);
8233 if (error != E1000_SUCCESS)
8234 return error;
8235
8236 for (i = 0; i < words; i++) {
8237 if (hw->eeprom_shadow_ram != NULL &&
8238 hw->eeprom_shadow_ram[offset+i].modified) {
8239 data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word;
8240 } else {
8241 /* The NVM part needs a byte offset, hence * 2 */
8242 act_offset = bank_offset + ((offset + i) * 2);
8243 error = e1000_read_ich8_word(hw, act_offset, &word);
8244 if (error != E1000_SUCCESS)
8245 break;
8246 data[i] = word;
8247 }
8248 }
8249
8250 e1000_release_software_flag(hw);
8251
8252 return error;
8253}
8254
8255/******************************************************************************
8256 * Writes a 16 bit word or words to the EEPROM using the ICH8's flash access
8257 * register. Actually, writes are written to the shadow ram cache in the hw
8258 * structure hw->e1000_shadow_ram. e1000_commit_shadow_ram flushes this to
8259 * the NVM, which occurs when the NVM checksum is updated.
8260 *
8261 * hw - Struct containing variables accessed by shared code
8262 * offset - offset of word in the EEPROM to write
8263 * words - number of words to write
8264 * data - words to write to the EEPROM
8265 *****************************************************************************/
8266static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
8267 u16 *data)
8268{
8269 u32 i = 0;
8270 s32 error = E1000_SUCCESS;
8271
8272 error = e1000_get_software_flag(hw);
8273 if (error != E1000_SUCCESS)
8274 return error;
8275
8276 /* A driver can write to the NVM only if it has eeprom_shadow_ram
8277 * allocated. Subsequent reads to the modified words are read from
8278 * this cached structure as well. Writes will only go into this
8279 * cached structure unless it's followed by a call to
8280 * e1000_update_eeprom_checksum() where it will commit the changes
8281 * and clear the "modified" field.
8282 */
8283 if (hw->eeprom_shadow_ram != NULL) {
8284 for (i = 0; i < words; i++) {
8285 if ((offset + i) < E1000_SHADOW_RAM_WORDS) {
8286 hw->eeprom_shadow_ram[offset+i].modified = true;
8287 hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i];
8288 } else {
8289 error = -E1000_ERR_EEPROM;
8290 break;
8291 }
8292 }
8293 } else {
8294 /* Drivers have the option to not allocate eeprom_shadow_ram as long
8295 * as they don't perform any NVM writes. An attempt in doing so
8296 * will result in this error.
8297 */
8298 error = -E1000_ERR_EEPROM;
8299 }
8300
8301 e1000_release_software_flag(hw);
8302
8303 return error;
8304}
8305
8306/******************************************************************************
8307 * This function does initial flash setup so that a new read/write/erase cycle
8308 * can be started.
8309 *
8310 * hw - The pointer to the hw structure
8311 ****************************************************************************/
8312static s32 e1000_ich8_cycle_init(struct e1000_hw *hw)
8313{
8314 union ich8_hws_flash_status hsfsts;
8315 s32 error = E1000_ERR_EEPROM;
8316 s32 i = 0;
8317
8318 DEBUGFUNC("e1000_ich8_cycle_init");
8319
8320 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
8321
8322 /* May be check the Flash Des Valid bit in Hw status */
8323 if (hsfsts.hsf_status.fldesvalid == 0) {
8324 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.");
8325 return error;
8326 }
8327
8328 /* Clear FCERR in Hw status by writing 1 */
8329 /* Clear DAEL in Hw status by writing a 1 */
8330 hsfsts.hsf_status.flcerr = 1;
8331 hsfsts.hsf_status.dael = 1;
8332
8333 E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
8334
8335 /* Either we should have a hardware SPI cycle in progress bit to check
8336 * against, in order to start a new cycle or FDONE bit should be changed
8337 * in the hardware so that it is 1 after harware reset, which can then be
8338 * used as an indication whether a cycle is in progress or has been
8339 * completed .. we should also have some software semaphore mechanism to
8340 * guard FDONE or the cycle in progress bit so that two threads access to
8341 * those bits can be sequentiallized or a way so that 2 threads dont
8342 * start the cycle at the same time */
8343
8344 if (hsfsts.hsf_status.flcinprog == 0) {
8345 /* There is no cycle running at present, so we can start a cycle */
8346 /* Begin by setting Flash Cycle Done. */
8347 hsfsts.hsf_status.flcdone = 1;
8348 E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
8349 error = E1000_SUCCESS;
8350 } else {
8351 /* otherwise poll for sometime so the current cycle has a chance
8352 * to end before giving up. */
8353 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8354 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
8355 if (hsfsts.hsf_status.flcinprog == 0) {
8356 error = E1000_SUCCESS;
8357 break;
8358 }
8359 udelay(1);
8360 }
8361 if (error == E1000_SUCCESS) {
8362 /* Successful in waiting for previous cycle to timeout,
8363 * now set the Flash Cycle Done. */
8364 hsfsts.hsf_status.flcdone = 1;
8365 E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
8366 } else {
8367 DEBUGOUT("Flash controller busy, cannot get access");
8368 }
8369 }
8370 return error;
8371}
8372
8373/******************************************************************************
8374 * This function starts a flash cycle and waits for its completion
8375 *
8376 * hw - The pointer to the hw structure
8377 ****************************************************************************/
8378static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout)
8379{
8380 union ich8_hws_flash_ctrl hsflctl;
8381 union ich8_hws_flash_status hsfsts;
8382 s32 error = E1000_ERR_EEPROM;
8383 u32 i = 0;
8384
8385 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8386 hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
8387 hsflctl.hsf_ctrl.flcgo = 1;
8388 E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
8389
8390 /* wait till FDONE bit is set to 1 */
8391 do {
8392 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
8393 if (hsfsts.hsf_status.flcdone == 1)
8394 break;
8395 udelay(1);
8396 i++;
8397 } while (i < timeout);
8398 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) {
8399 error = E1000_SUCCESS;
8400 }
8401 return error;
8402}
8403
8404/******************************************************************************
8405 * Reads a byte or word from the NVM using the ICH8 flash access registers.
8406 *
8407 * hw - The pointer to the hw structure
8408 * index - The index of the byte or word to read.
8409 * size - Size of data to read, 1=byte 2=word
8410 * data - Pointer to the word to store the value read.
8411 *****************************************************************************/
8412static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
8413 u16 *data)
8414{
8415 union ich8_hws_flash_status hsfsts;
8416 union ich8_hws_flash_ctrl hsflctl;
8417 u32 flash_linear_address;
8418 u32 flash_data = 0;
8419 s32 error = -E1000_ERR_EEPROM;
8420 s32 count = 0;
8421
8422 DEBUGFUNC("e1000_read_ich8_data");
8423
8424 if (size < 1 || size > 2 || data == NULL ||
8425 index > ICH_FLASH_LINEAR_ADDR_MASK)
8426 return error;
8427
8428 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8429 hw->flash_base_addr;
8430
8431 do {
8432 udelay(1);
8433 /* Steps */
8434 error = e1000_ich8_cycle_init(hw);
8435 if (error != E1000_SUCCESS)
8436 break;
8437
8438 hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
8439 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8440 hsflctl.hsf_ctrl.fldbcount = size - 1;
8441 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
8442 E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
8443
8444 /* Write the last 24 bits of index into Flash Linear address field in
8445 * Flash Address */
8446 /* TODO: TBD maybe check the index against the size of flash */
8447
8448 E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
8449
8450 error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
8451
8452 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
8453 * sequence a few more times, else read in (shift in) the Flash Data0,
8454 * the order is least significant byte first msb to lsb */
8455 if (error == E1000_SUCCESS) {
8456 flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0);
8457 if (size == 1) {
8458 *data = (u8)(flash_data & 0x000000FF);
8459 } else if (size == 2) {
8460 *data = (u16)(flash_data & 0x0000FFFF);
8461 }
8462 break;
8463 } else {
8464 /* If we've gotten here, then things are probably completely hosed,
8465 * but if the error condition is detected, it won't hurt to give
8466 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
8467 */
8468 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
8469 if (hsfsts.hsf_status.flcerr == 1) {
8470 /* Repeat for some time before giving up. */
8471 continue;
8472 } else if (hsfsts.hsf_status.flcdone == 0) {
8473 DEBUGOUT("Timeout error - flash cycle did not complete.");
8474 break;
8475 }
8476 }
8477 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8478
8479 return error;
8480}
8481
8482/******************************************************************************
8483 * Writes One /two bytes to the NVM using the ICH8 flash access registers.
8484 *
8485 * hw - The pointer to the hw structure
8486 * index - The index of the byte/word to read.
8487 * size - Size of data to read, 1=byte 2=word
8488 * data - The byte(s) to write to the NVM.
8489 *****************************************************************************/
8490static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
8491 u16 data)
8492{
8493 union ich8_hws_flash_status hsfsts;
8494 union ich8_hws_flash_ctrl hsflctl;
8495 u32 flash_linear_address;
8496 u32 flash_data = 0;
8497 s32 error = -E1000_ERR_EEPROM;
8498 s32 count = 0;
8499
8500 DEBUGFUNC("e1000_write_ich8_data");
8501
8502 if (size < 1 || size > 2 || data > size * 0xff ||
8503 index > ICH_FLASH_LINEAR_ADDR_MASK)
8504 return error;
8505
8506 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8507 hw->flash_base_addr;
8508
8509 do {
8510 udelay(1);
8511 /* Steps */
8512 error = e1000_ich8_cycle_init(hw);
8513 if (error != E1000_SUCCESS)
8514 break;
8515
8516 hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
8517 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8518 hsflctl.hsf_ctrl.fldbcount = size -1;
8519 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
8520 E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
8521
8522 /* Write the last 24 bits of index into Flash Linear address field in
8523 * Flash Address */
8524 E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
8525
8526 if (size == 1)
8527 flash_data = (u32)data & 0x00FF;
8528 else
8529 flash_data = (u32)data;
8530
8531 E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
8532
8533 /* check if FCERR is set to 1 , if set to 1, clear it and try the whole
8534 * sequence a few more times else done */
8535 error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
8536 if (error == E1000_SUCCESS) {
8537 break;
8538 } else {
8539 /* If we're here, then things are most likely completely hosed,
8540 * but if the error condition is detected, it won't hurt to give
8541 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
8542 */
8543 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
8544 if (hsfsts.hsf_status.flcerr == 1) {
8545 /* Repeat for some time before giving up. */
8546 continue;
8547 } else if (hsfsts.hsf_status.flcdone == 0) {
8548 DEBUGOUT("Timeout error - flash cycle did not complete.");
8549 break;
8550 }
8551 }
8552 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8553
8554 return error;
8555}
8556
8557/******************************************************************************
8558 * Reads a single byte from the NVM using the ICH8 flash access registers.
8559 *
8560 * hw - pointer to e1000_hw structure
8561 * index - The index of the byte to read.
8562 * data - Pointer to a byte to store the value read.
8563 *****************************************************************************/
8564static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data)
8565{
8566 s32 status = E1000_SUCCESS;
8567 u16 word = 0;
8568
8569 status = e1000_read_ich8_data(hw, index, 1, &word);
8570 if (status == E1000_SUCCESS) {
8571 *data = (u8)word;
8572 }
8573
8574 return status;
8575}
8576
8577/******************************************************************************
8578 * Writes a single byte to the NVM using the ICH8 flash access registers.
8579 * Performs verification by reading back the value and then going through
8580 * a retry algorithm before giving up.
8581 *
8582 * hw - pointer to e1000_hw structure
8583 * index - The index of the byte to write.
8584 * byte - The byte to write to the NVM.
8585 *****************************************************************************/
8586static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte)
8587{
8588 s32 error = E1000_SUCCESS;
8589 s32 program_retries = 0;
8590
8591 DEBUGOUT2("Byte := %2.2X Offset := %d\n", byte, index);
8592
8593 error = e1000_write_ich8_byte(hw, index, byte);
8594
8595 if (error != E1000_SUCCESS) {
8596 for (program_retries = 0; program_retries < 100; program_retries++) {
8597 DEBUGOUT2("Retrying \t Byte := %2.2X Offset := %d\n", byte, index);
8598 error = e1000_write_ich8_byte(hw, index, byte);
8599 udelay(100);
8600 if (error == E1000_SUCCESS)
8601 break;
8602 }
8603 }
8604
8605 if (program_retries == 100)
8606 error = E1000_ERR_EEPROM;
8607
8608 return error;
8609}
8610
8611/******************************************************************************
8612 * Writes a single byte to the NVM using the ICH8 flash access registers.
8613 *
8614 * hw - pointer to e1000_hw structure
8615 * index - The index of the byte to read.
8616 * data - The byte to write to the NVM.
8617 *****************************************************************************/
8618static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data)
8619{
8620 s32 status = E1000_SUCCESS;
8621 u16 word = (u16)data;
8622
8623 status = e1000_write_ich8_data(hw, index, 1, word);
8624
8625 return status;
8626}
8627
8628/******************************************************************************
8629 * Reads a word from the NVM using the ICH8 flash access registers.
8630 *
8631 * hw - pointer to e1000_hw structure
8632 * index - The starting byte index of the word to read.
8633 * data - Pointer to a word to store the value read.
8634 *****************************************************************************/
8635static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data)
8636{
8637 s32 status = E1000_SUCCESS;
8638 status = e1000_read_ich8_data(hw, index, 2, data);
8639 return status;
8640}
8641
8642/******************************************************************************
8643 * Erases the bank specified. Each bank may be a 4, 8 or 64k block. Banks are 0
8644 * based.
8645 *
8646 * hw - pointer to e1000_hw structure
8647 * bank - 0 for first bank, 1 for second bank
8648 *
8649 * Note that this function may actually erase as much as 8 or 64 KBytes. The
8650 * amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the
8651 * bank size may be 4, 8 or 64 KBytes
8652 *****************************************************************************/
8653static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank)
8654{
8655 union ich8_hws_flash_status hsfsts;
8656 union ich8_hws_flash_ctrl hsflctl;
8657 u32 flash_linear_address;
8658 s32 count = 0;
8659 s32 error = E1000_ERR_EEPROM;
8660 s32 iteration;
8661 s32 sub_sector_size = 0;
8662 s32 bank_size;
8663 s32 j = 0;
8664 s32 error_flag = 0;
8665
8666 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
8667
8668 /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */
8669 /* 00: The Hw sector is 256 bytes, hence we need to erase 16
8670 * consecutive sectors. The start index for the nth Hw sector can be
8671 * calculated as bank * 4096 + n * 256
8672 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
8673 * The start index for the nth Hw sector can be calculated
8674 * as bank * 4096
8675 * 10: The HW sector is 8K bytes
8676 * 11: The Hw sector size is 64K bytes */
8677 if (hsfsts.hsf_status.berasesz == 0x0) {
8678 /* Hw sector size 256 */
8679 sub_sector_size = ICH_FLASH_SEG_SIZE_256;
8680 bank_size = ICH_FLASH_SECTOR_SIZE;
8681 iteration = ICH_FLASH_SECTOR_SIZE / ICH_FLASH_SEG_SIZE_256;
8682 } else if (hsfsts.hsf_status.berasesz == 0x1) {
8683 bank_size = ICH_FLASH_SEG_SIZE_4K;
8684 iteration = 1;
8685 } else if (hsfsts.hsf_status.berasesz == 0x3) {
8686 bank_size = ICH_FLASH_SEG_SIZE_64K;
8687 iteration = 1;
8688 } else {
8689 return error;
8690 }
8691
8692 for (j = 0; j < iteration ; j++) {
8693 do {
8694 count++;
8695 /* Steps */
8696 error = e1000_ich8_cycle_init(hw);
8697 if (error != E1000_SUCCESS) {
8698 error_flag = 1;
8699 break;
8700 }
8701
8702 /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash
8703 * Control */
8704 hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
8705 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
8706 E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
8707
8708 /* Write the last 24 bits of an index within the block into Flash
8709 * Linear address field in Flash Address. This probably needs to
8710 * be calculated here based off the on-chip erase sector size and
8711 * the software bank size (4, 8 or 64 KBytes) */
8712 flash_linear_address = bank * bank_size + j * sub_sector_size;
8713 flash_linear_address += hw->flash_base_addr;
8714 flash_linear_address &= ICH_FLASH_LINEAR_ADDR_MASK;
8715
8716 E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
8717
8718 error = e1000_ich8_flash_cycle(hw, ICH_FLASH_ERASE_TIMEOUT);
8719 /* Check if FCERR is set to 1. If 1, clear it and try the whole
8720 * sequence a few more times else Done */
8721 if (error == E1000_SUCCESS) {
8722 break;
8723 } else {
8724 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
8725 if (hsfsts.hsf_status.flcerr == 1) {
8726 /* repeat for some time before giving up */
8727 continue;
8728 } else if (hsfsts.hsf_status.flcdone == 0) {
8729 error_flag = 1;
8730 break;
8731 }
8732 }
8733 } while ((count < ICH_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
8734 if (error_flag == 1)
8735 break;
8736 }
8737 if (error_flag != 1)
8738 error = E1000_SUCCESS;
8739 return error;
8740}
8741
8742static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
8743 u32 cnf_base_addr,
8744 u32 cnf_size)
8745{
8746 u32 ret_val = E1000_SUCCESS;
8747 u16 word_addr, reg_data, reg_addr;
8748 u16 i;
8749
8750 /* cnf_base_addr is in DWORD */
8751 word_addr = (u16)(cnf_base_addr << 1);
8752
8753 /* cnf_size is returned in size of dwords */
8754 for (i = 0; i < cnf_size; i++) {
8755 ret_val = e1000_read_eeprom(hw, (word_addr + i*2), 1, &reg_data);
8756 if (ret_val)
8757 return ret_val;
8758
8759 ret_val = e1000_read_eeprom(hw, (word_addr + i*2 + 1), 1, &reg_addr);
8760 if (ret_val)
8761 return ret_val;
8762
8763 ret_val = e1000_get_software_flag(hw);
8764 if (ret_val != E1000_SUCCESS)
8765 return ret_val;
8766
8767 ret_val = e1000_write_phy_reg_ex(hw, (u32)reg_addr, reg_data);
8768
8769 e1000_release_software_flag(hw);
8770 }
8771
8772 return ret_val;
8773}
8774
8775
8776/******************************************************************************
8777 * This function initializes the PHY from the NVM on ICH8 platforms. This
8778 * is needed due to an issue where the NVM configuration is not properly
8779 * autoloaded after power transitions. Therefore, after each PHY reset, we
8780 * will load the configuration data out of the NVM manually.
8781 *
8782 * hw: Struct containing variables accessed by shared code
8783 *****************************************************************************/
8784static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw)
8785{
8786 u32 reg_data, cnf_base_addr, cnf_size, ret_val, loop;
8787
8788 if (hw->phy_type != e1000_phy_igp_3)
8789 return E1000_SUCCESS;
8790
8791 /* Check if SW needs configure the PHY */
8792 reg_data = er32(FEXTNVM);
8793 if (!(reg_data & FEXTNVM_SW_CONFIG))
8794 return E1000_SUCCESS;
8795
8796 /* Wait for basic configuration completes before proceeding*/
8797 loop = 0;
8798 do {
8799 reg_data = er32(STATUS) & E1000_STATUS_LAN_INIT_DONE;
8800 udelay(100);
8801 loop++;
8802 } while ((!reg_data) && (loop < 50));
8803
8804 /* Clear the Init Done bit for the next init event */
8805 reg_data = er32(STATUS);
8806 reg_data &= ~E1000_STATUS_LAN_INIT_DONE;
8807 ew32(STATUS, reg_data);
8808
8809 /* Make sure HW does not configure LCD from PHY extended configuration
8810 before SW configuration */
8811 reg_data = er32(EXTCNF_CTRL);
8812 if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) {
8813 reg_data = er32(EXTCNF_SIZE);
8814 cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH;
8815 cnf_size >>= 16;
8816 if (cnf_size) {
8817 reg_data = er32(EXTCNF_CTRL);
8818 cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER;
8819 /* cnf_base_addr is in DWORD */
8820 cnf_base_addr >>= 16;
8821
8822 /* Configure LCD from extended configuration region. */
8823 ret_val = e1000_init_lcd_from_nvm_config_region(hw, cnf_base_addr,
8824 cnf_size);
8825 if (ret_val)
8826 return ret_val;
8827 }
8828 }
8829
8830 return E1000_SUCCESS;
8831} 5634}
8832
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index a8866bdbb671..9acfddb0dafb 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -35,7 +35,6 @@
35 35
36#include "e1000_osdep.h" 36#include "e1000_osdep.h"
37 37
38
39/* Forward declarations of structures used by the shared code */ 38/* Forward declarations of structures used by the shared code */
40struct e1000_hw; 39struct e1000_hw;
41struct e1000_hw_stats; 40struct e1000_hw_stats;
@@ -43,252 +42,231 @@ struct e1000_hw_stats;
43/* Enumerated types specific to the e1000 hardware */ 42/* Enumerated types specific to the e1000 hardware */
44/* Media Access Controlers */ 43/* Media Access Controlers */
45typedef enum { 44typedef enum {
46 e1000_undefined = 0, 45 e1000_undefined = 0,
47 e1000_82542_rev2_0, 46 e1000_82542_rev2_0,
48 e1000_82542_rev2_1, 47 e1000_82542_rev2_1,
49 e1000_82543, 48 e1000_82543,
50 e1000_82544, 49 e1000_82544,
51 e1000_82540, 50 e1000_82540,
52 e1000_82545, 51 e1000_82545,
53 e1000_82545_rev_3, 52 e1000_82545_rev_3,
54 e1000_82546, 53 e1000_82546,
55 e1000_82546_rev_3, 54 e1000_82546_rev_3,
56 e1000_82541, 55 e1000_82541,
57 e1000_82541_rev_2, 56 e1000_82541_rev_2,
58 e1000_82547, 57 e1000_82547,
59 e1000_82547_rev_2, 58 e1000_82547_rev_2,
60 e1000_82571, 59 e1000_num_macs
61 e1000_82572,
62 e1000_82573,
63 e1000_80003es2lan,
64 e1000_ich8lan,
65 e1000_num_macs
66} e1000_mac_type; 60} e1000_mac_type;
67 61
68typedef enum { 62typedef enum {
69 e1000_eeprom_uninitialized = 0, 63 e1000_eeprom_uninitialized = 0,
70 e1000_eeprom_spi, 64 e1000_eeprom_spi,
71 e1000_eeprom_microwire, 65 e1000_eeprom_microwire,
72 e1000_eeprom_flash, 66 e1000_eeprom_flash,
73 e1000_eeprom_ich8, 67 e1000_eeprom_none, /* No NVM support */
74 e1000_eeprom_none, /* No NVM support */ 68 e1000_num_eeprom_types
75 e1000_num_eeprom_types
76} e1000_eeprom_type; 69} e1000_eeprom_type;
77 70
78/* Media Types */ 71/* Media Types */
79typedef enum { 72typedef enum {
80 e1000_media_type_copper = 0, 73 e1000_media_type_copper = 0,
81 e1000_media_type_fiber = 1, 74 e1000_media_type_fiber = 1,
82 e1000_media_type_internal_serdes = 2, 75 e1000_media_type_internal_serdes = 2,
83 e1000_num_media_types 76 e1000_num_media_types
84} e1000_media_type; 77} e1000_media_type;
85 78
86typedef enum { 79typedef enum {
87 e1000_10_half = 0, 80 e1000_10_half = 0,
88 e1000_10_full = 1, 81 e1000_10_full = 1,
89 e1000_100_half = 2, 82 e1000_100_half = 2,
90 e1000_100_full = 3 83 e1000_100_full = 3
91} e1000_speed_duplex_type; 84} e1000_speed_duplex_type;
92 85
93/* Flow Control Settings */ 86/* Flow Control Settings */
94typedef enum { 87typedef enum {
95 E1000_FC_NONE = 0, 88 E1000_FC_NONE = 0,
96 E1000_FC_RX_PAUSE = 1, 89 E1000_FC_RX_PAUSE = 1,
97 E1000_FC_TX_PAUSE = 2, 90 E1000_FC_TX_PAUSE = 2,
98 E1000_FC_FULL = 3, 91 E1000_FC_FULL = 3,
99 E1000_FC_DEFAULT = 0xFF 92 E1000_FC_DEFAULT = 0xFF
100} e1000_fc_type; 93} e1000_fc_type;
101 94
102struct e1000_shadow_ram { 95struct e1000_shadow_ram {
103 u16 eeprom_word; 96 u16 eeprom_word;
104 bool modified; 97 bool modified;
105}; 98};
106 99
107/* PCI bus types */ 100/* PCI bus types */
108typedef enum { 101typedef enum {
109 e1000_bus_type_unknown = 0, 102 e1000_bus_type_unknown = 0,
110 e1000_bus_type_pci, 103 e1000_bus_type_pci,
111 e1000_bus_type_pcix, 104 e1000_bus_type_pcix,
112 e1000_bus_type_pci_express, 105 e1000_bus_type_reserved
113 e1000_bus_type_reserved
114} e1000_bus_type; 106} e1000_bus_type;
115 107
116/* PCI bus speeds */ 108/* PCI bus speeds */
117typedef enum { 109typedef enum {
118 e1000_bus_speed_unknown = 0, 110 e1000_bus_speed_unknown = 0,
119 e1000_bus_speed_33, 111 e1000_bus_speed_33,
120 e1000_bus_speed_66, 112 e1000_bus_speed_66,
121 e1000_bus_speed_100, 113 e1000_bus_speed_100,
122 e1000_bus_speed_120, 114 e1000_bus_speed_120,
123 e1000_bus_speed_133, 115 e1000_bus_speed_133,
124 e1000_bus_speed_2500, 116 e1000_bus_speed_reserved
125 e1000_bus_speed_reserved
126} e1000_bus_speed; 117} e1000_bus_speed;
127 118
128/* PCI bus widths */ 119/* PCI bus widths */
129typedef enum { 120typedef enum {
130 e1000_bus_width_unknown = 0, 121 e1000_bus_width_unknown = 0,
131 /* These PCIe values should literally match the possible return values 122 e1000_bus_width_32,
132 * from config space */ 123 e1000_bus_width_64,
133 e1000_bus_width_pciex_1 = 1, 124 e1000_bus_width_reserved
134 e1000_bus_width_pciex_2 = 2,
135 e1000_bus_width_pciex_4 = 4,
136 e1000_bus_width_32,
137 e1000_bus_width_64,
138 e1000_bus_width_reserved
139} e1000_bus_width; 125} e1000_bus_width;
140 126
141/* PHY status info structure and supporting enums */ 127/* PHY status info structure and supporting enums */
142typedef enum { 128typedef enum {
143 e1000_cable_length_50 = 0, 129 e1000_cable_length_50 = 0,
144 e1000_cable_length_50_80, 130 e1000_cable_length_50_80,
145 e1000_cable_length_80_110, 131 e1000_cable_length_80_110,
146 e1000_cable_length_110_140, 132 e1000_cable_length_110_140,
147 e1000_cable_length_140, 133 e1000_cable_length_140,
148 e1000_cable_length_undefined = 0xFF 134 e1000_cable_length_undefined = 0xFF
149} e1000_cable_length; 135} e1000_cable_length;
150 136
151typedef enum { 137typedef enum {
152 e1000_gg_cable_length_60 = 0, 138 e1000_gg_cable_length_60 = 0,
153 e1000_gg_cable_length_60_115 = 1, 139 e1000_gg_cable_length_60_115 = 1,
154 e1000_gg_cable_length_115_150 = 2, 140 e1000_gg_cable_length_115_150 = 2,
155 e1000_gg_cable_length_150 = 4 141 e1000_gg_cable_length_150 = 4
156} e1000_gg_cable_length; 142} e1000_gg_cable_length;
157 143
158typedef enum { 144typedef enum {
159 e1000_igp_cable_length_10 = 10, 145 e1000_igp_cable_length_10 = 10,
160 e1000_igp_cable_length_20 = 20, 146 e1000_igp_cable_length_20 = 20,
161 e1000_igp_cable_length_30 = 30, 147 e1000_igp_cable_length_30 = 30,
162 e1000_igp_cable_length_40 = 40, 148 e1000_igp_cable_length_40 = 40,
163 e1000_igp_cable_length_50 = 50, 149 e1000_igp_cable_length_50 = 50,
164 e1000_igp_cable_length_60 = 60, 150 e1000_igp_cable_length_60 = 60,
165 e1000_igp_cable_length_70 = 70, 151 e1000_igp_cable_length_70 = 70,
166 e1000_igp_cable_length_80 = 80, 152 e1000_igp_cable_length_80 = 80,
167 e1000_igp_cable_length_90 = 90, 153 e1000_igp_cable_length_90 = 90,
168 e1000_igp_cable_length_100 = 100, 154 e1000_igp_cable_length_100 = 100,
169 e1000_igp_cable_length_110 = 110, 155 e1000_igp_cable_length_110 = 110,
170 e1000_igp_cable_length_115 = 115, 156 e1000_igp_cable_length_115 = 115,
171 e1000_igp_cable_length_120 = 120, 157 e1000_igp_cable_length_120 = 120,
172 e1000_igp_cable_length_130 = 130, 158 e1000_igp_cable_length_130 = 130,
173 e1000_igp_cable_length_140 = 140, 159 e1000_igp_cable_length_140 = 140,
174 e1000_igp_cable_length_150 = 150, 160 e1000_igp_cable_length_150 = 150,
175 e1000_igp_cable_length_160 = 160, 161 e1000_igp_cable_length_160 = 160,
176 e1000_igp_cable_length_170 = 170, 162 e1000_igp_cable_length_170 = 170,
177 e1000_igp_cable_length_180 = 180 163 e1000_igp_cable_length_180 = 180
178} e1000_igp_cable_length; 164} e1000_igp_cable_length;
179 165
180typedef enum { 166typedef enum {
181 e1000_10bt_ext_dist_enable_normal = 0, 167 e1000_10bt_ext_dist_enable_normal = 0,
182 e1000_10bt_ext_dist_enable_lower, 168 e1000_10bt_ext_dist_enable_lower,
183 e1000_10bt_ext_dist_enable_undefined = 0xFF 169 e1000_10bt_ext_dist_enable_undefined = 0xFF
184} e1000_10bt_ext_dist_enable; 170} e1000_10bt_ext_dist_enable;
185 171
186typedef enum { 172typedef enum {
187 e1000_rev_polarity_normal = 0, 173 e1000_rev_polarity_normal = 0,
188 e1000_rev_polarity_reversed, 174 e1000_rev_polarity_reversed,
189 e1000_rev_polarity_undefined = 0xFF 175 e1000_rev_polarity_undefined = 0xFF
190} e1000_rev_polarity; 176} e1000_rev_polarity;
191 177
192typedef enum { 178typedef enum {
193 e1000_downshift_normal = 0, 179 e1000_downshift_normal = 0,
194 e1000_downshift_activated, 180 e1000_downshift_activated,
195 e1000_downshift_undefined = 0xFF 181 e1000_downshift_undefined = 0xFF
196} e1000_downshift; 182} e1000_downshift;
197 183
198typedef enum { 184typedef enum {
199 e1000_smart_speed_default = 0, 185 e1000_smart_speed_default = 0,
200 e1000_smart_speed_on, 186 e1000_smart_speed_on,
201 e1000_smart_speed_off 187 e1000_smart_speed_off
202} e1000_smart_speed; 188} e1000_smart_speed;
203 189
204typedef enum { 190typedef enum {
205 e1000_polarity_reversal_enabled = 0, 191 e1000_polarity_reversal_enabled = 0,
206 e1000_polarity_reversal_disabled, 192 e1000_polarity_reversal_disabled,
207 e1000_polarity_reversal_undefined = 0xFF 193 e1000_polarity_reversal_undefined = 0xFF
208} e1000_polarity_reversal; 194} e1000_polarity_reversal;
209 195
210typedef enum { 196typedef enum {
211 e1000_auto_x_mode_manual_mdi = 0, 197 e1000_auto_x_mode_manual_mdi = 0,
212 e1000_auto_x_mode_manual_mdix, 198 e1000_auto_x_mode_manual_mdix,
213 e1000_auto_x_mode_auto1, 199 e1000_auto_x_mode_auto1,
214 e1000_auto_x_mode_auto2, 200 e1000_auto_x_mode_auto2,
215 e1000_auto_x_mode_undefined = 0xFF 201 e1000_auto_x_mode_undefined = 0xFF
216} e1000_auto_x_mode; 202} e1000_auto_x_mode;
217 203
218typedef enum { 204typedef enum {
219 e1000_1000t_rx_status_not_ok = 0, 205 e1000_1000t_rx_status_not_ok = 0,
220 e1000_1000t_rx_status_ok, 206 e1000_1000t_rx_status_ok,
221 e1000_1000t_rx_status_undefined = 0xFF 207 e1000_1000t_rx_status_undefined = 0xFF
222} e1000_1000t_rx_status; 208} e1000_1000t_rx_status;
223 209
224typedef enum { 210typedef enum {
225 e1000_phy_m88 = 0, 211 e1000_phy_m88 = 0,
226 e1000_phy_igp, 212 e1000_phy_igp,
227 e1000_phy_igp_2,
228 e1000_phy_gg82563,
229 e1000_phy_igp_3,
230 e1000_phy_ife,
231 e1000_phy_undefined = 0xFF 213 e1000_phy_undefined = 0xFF
232} e1000_phy_type; 214} e1000_phy_type;
233 215
234typedef enum { 216typedef enum {
235 e1000_ms_hw_default = 0, 217 e1000_ms_hw_default = 0,
236 e1000_ms_force_master, 218 e1000_ms_force_master,
237 e1000_ms_force_slave, 219 e1000_ms_force_slave,
238 e1000_ms_auto 220 e1000_ms_auto
239} e1000_ms_type; 221} e1000_ms_type;
240 222
241typedef enum { 223typedef enum {
242 e1000_ffe_config_enabled = 0, 224 e1000_ffe_config_enabled = 0,
243 e1000_ffe_config_active, 225 e1000_ffe_config_active,
244 e1000_ffe_config_blocked 226 e1000_ffe_config_blocked
245} e1000_ffe_config; 227} e1000_ffe_config;
246 228
247typedef enum { 229typedef enum {
248 e1000_dsp_config_disabled = 0, 230 e1000_dsp_config_disabled = 0,
249 e1000_dsp_config_enabled, 231 e1000_dsp_config_enabled,
250 e1000_dsp_config_activated, 232 e1000_dsp_config_activated,
251 e1000_dsp_config_undefined = 0xFF 233 e1000_dsp_config_undefined = 0xFF
252} e1000_dsp_config; 234} e1000_dsp_config;
253 235
254struct e1000_phy_info { 236struct e1000_phy_info {
255 e1000_cable_length cable_length; 237 e1000_cable_length cable_length;
256 e1000_10bt_ext_dist_enable extended_10bt_distance; 238 e1000_10bt_ext_dist_enable extended_10bt_distance;
257 e1000_rev_polarity cable_polarity; 239 e1000_rev_polarity cable_polarity;
258 e1000_downshift downshift; 240 e1000_downshift downshift;
259 e1000_polarity_reversal polarity_correction; 241 e1000_polarity_reversal polarity_correction;
260 e1000_auto_x_mode mdix_mode; 242 e1000_auto_x_mode mdix_mode;
261 e1000_1000t_rx_status local_rx; 243 e1000_1000t_rx_status local_rx;
262 e1000_1000t_rx_status remote_rx; 244 e1000_1000t_rx_status remote_rx;
263}; 245};
264 246
265struct e1000_phy_stats { 247struct e1000_phy_stats {
266 u32 idle_errors; 248 u32 idle_errors;
267 u32 receive_errors; 249 u32 receive_errors;
268}; 250};
269 251
270struct e1000_eeprom_info { 252struct e1000_eeprom_info {
271 e1000_eeprom_type type; 253 e1000_eeprom_type type;
272 u16 word_size; 254 u16 word_size;
273 u16 opcode_bits; 255 u16 opcode_bits;
274 u16 address_bits; 256 u16 address_bits;
275 u16 delay_usec; 257 u16 delay_usec;
276 u16 page_size; 258 u16 page_size;
277 bool use_eerd;
278 bool use_eewr;
279}; 259};
280 260
281/* Flex ASF Information */ 261/* Flex ASF Information */
282#define E1000_HOST_IF_MAX_SIZE 2048 262#define E1000_HOST_IF_MAX_SIZE 2048
283 263
284typedef enum { 264typedef enum {
285 e1000_byte_align = 0, 265 e1000_byte_align = 0,
286 e1000_word_align = 1, 266 e1000_word_align = 1,
287 e1000_dword_align = 2 267 e1000_dword_align = 2
288} e1000_align_type; 268} e1000_align_type;
289 269
290
291
292/* Error Codes */ 270/* Error Codes */
293#define E1000_SUCCESS 0 271#define E1000_SUCCESS 0
294#define E1000_ERR_EEPROM 1 272#define E1000_ERR_EEPROM 1
@@ -301,7 +279,6 @@ typedef enum {
301#define E1000_ERR_MASTER_REQUESTS_PENDING 10 279#define E1000_ERR_MASTER_REQUESTS_PENDING 10
302#define E1000_ERR_HOST_INTERFACE_COMMAND 11 280#define E1000_ERR_HOST_INTERFACE_COMMAND 11
303#define E1000_BLK_PHY_RESET 12 281#define E1000_BLK_PHY_RESET 12
304#define E1000_ERR_SWFW_SYNC 13
305 282
306#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \ 283#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \
307 (((_value) & 0xff00) >> 8)) 284 (((_value) & 0xff00) >> 8))
@@ -318,19 +295,17 @@ s32 e1000_setup_link(struct e1000_hw *hw);
318s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); 295s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
319void e1000_config_collision_dist(struct e1000_hw *hw); 296void e1000_config_collision_dist(struct e1000_hw *hw);
320s32 e1000_check_for_link(struct e1000_hw *hw); 297s32 e1000_check_for_link(struct e1000_hw *hw);
321s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex); 298s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex);
322s32 e1000_force_mac_fc(struct e1000_hw *hw); 299s32 e1000_force_mac_fc(struct e1000_hw *hw);
323 300
324/* PHY */ 301/* PHY */
325s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data); 302s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data);
326s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data); 303s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
327s32 e1000_phy_hw_reset(struct e1000_hw *hw); 304s32 e1000_phy_hw_reset(struct e1000_hw *hw);
328s32 e1000_phy_reset(struct e1000_hw *hw); 305s32 e1000_phy_reset(struct e1000_hw *hw);
329s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); 306s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
330s32 e1000_validate_mdi_setting(struct e1000_hw *hw); 307s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
331 308
332void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
333
334/* EEPROM Functions */ 309/* EEPROM Functions */
335s32 e1000_init_eeprom_params(struct e1000_hw *hw); 310s32 e1000_init_eeprom_params(struct e1000_hw *hw);
336 311
@@ -338,66 +313,63 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw);
338u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw); 313u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw);
339 314
340#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 315#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
341#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ 316#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */
342 317
343#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ 318#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */
344#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ 319#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */
345#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ 320#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */
346#define E1000_MNG_IAMT_MODE 0x3 321#define E1000_MNG_IAMT_MODE 0x3
347#define E1000_MNG_ICH_IAMT_MODE 0x2 322#define E1000_MNG_ICH_IAMT_MODE 0x2
348#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ 323#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */
349 324
350#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ 325#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */
351#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ 326#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */
352#define E1000_VFTA_ENTRY_SHIFT 0x5 327#define E1000_VFTA_ENTRY_SHIFT 0x5
353#define E1000_VFTA_ENTRY_MASK 0x7F 328#define E1000_VFTA_ENTRY_MASK 0x7F
354#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F 329#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
355 330
356struct e1000_host_mng_command_header { 331struct e1000_host_mng_command_header {
357 u8 command_id; 332 u8 command_id;
358 u8 checksum; 333 u8 checksum;
359 u16 reserved1; 334 u16 reserved1;
360 u16 reserved2; 335 u16 reserved2;
361 u16 command_length; 336 u16 command_length;
362}; 337};
363 338
364struct e1000_host_mng_command_info { 339struct e1000_host_mng_command_info {
365 struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ 340 struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
366 u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/ 341 u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658 */
367}; 342};
368#ifdef __BIG_ENDIAN 343#ifdef __BIG_ENDIAN
369struct e1000_host_mng_dhcp_cookie{ 344struct e1000_host_mng_dhcp_cookie {
370 u32 signature; 345 u32 signature;
371 u16 vlan_id; 346 u16 vlan_id;
372 u8 reserved0; 347 u8 reserved0;
373 u8 status; 348 u8 status;
374 u32 reserved1; 349 u32 reserved1;
375 u8 checksum; 350 u8 checksum;
376 u8 reserved3; 351 u8 reserved3;
377 u16 reserved2; 352 u16 reserved2;
378}; 353};
379#else 354#else
380struct e1000_host_mng_dhcp_cookie{ 355struct e1000_host_mng_dhcp_cookie {
381 u32 signature; 356 u32 signature;
382 u8 status; 357 u8 status;
383 u8 reserved0; 358 u8 reserved0;
384 u16 vlan_id; 359 u16 vlan_id;
385 u32 reserved1; 360 u32 reserved1;
386 u16 reserved2; 361 u16 reserved2;
387 u8 reserved3; 362 u8 reserved3;
388 u8 checksum; 363 u8 checksum;
389}; 364};
390#endif 365#endif
391 366
392s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer,
393 u16 length);
394bool e1000_check_mng_mode(struct e1000_hw *hw); 367bool e1000_check_mng_mode(struct e1000_hw *hw);
395bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); 368s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data);
396s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
397s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw); 369s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw);
398s32 e1000_update_eeprom_checksum(struct e1000_hw *hw); 370s32 e1000_update_eeprom_checksum(struct e1000_hw *hw);
399s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data); 371s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data);
400s32 e1000_read_mac_addr(struct e1000_hw * hw); 372s32 e1000_read_mac_addr(struct e1000_hw *hw);
401 373
402/* Filters (multicast, vlan, receive) */ 374/* Filters (multicast, vlan, receive) */
403u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr); 375u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr);
@@ -417,18 +389,15 @@ s32 e1000_blink_led_start(struct e1000_hw *hw);
417/* Everything else */ 389/* Everything else */
418void e1000_reset_adaptive(struct e1000_hw *hw); 390void e1000_reset_adaptive(struct e1000_hw *hw);
419void e1000_update_adaptive(struct e1000_hw *hw); 391void e1000_update_adaptive(struct e1000_hw *hw);
420void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, u32 frame_len, u8 * mac_addr); 392void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
393 u32 frame_len, u8 * mac_addr);
421void e1000_get_bus_info(struct e1000_hw *hw); 394void e1000_get_bus_info(struct e1000_hw *hw);
422void e1000_pci_set_mwi(struct e1000_hw *hw); 395void e1000_pci_set_mwi(struct e1000_hw *hw);
423void e1000_pci_clear_mwi(struct e1000_hw *hw); 396void e1000_pci_clear_mwi(struct e1000_hw *hw);
424s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
425void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); 397void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc);
426int e1000_pcix_get_mmrbc(struct e1000_hw *hw); 398int e1000_pcix_get_mmrbc(struct e1000_hw *hw);
427/* Port I/O is only supported on 82544 and newer */ 399/* Port I/O is only supported on 82544 and newer */
428void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); 400void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
429s32 e1000_disable_pciex_master(struct e1000_hw *hw);
430s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
431
432 401
433#define E1000_READ_REG_IO(a, reg) \ 402#define E1000_READ_REG_IO(a, reg) \
434 e1000_read_reg_io((a), E1000_##reg) 403 e1000_read_reg_io((a), E1000_##reg)
@@ -471,36 +440,7 @@ s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
471#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 440#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
472#define E1000_DEV_ID_82547EI 0x1019 441#define E1000_DEV_ID_82547EI 0x1019
473#define E1000_DEV_ID_82547EI_MOBILE 0x101A 442#define E1000_DEV_ID_82547EI_MOBILE 0x101A
474#define E1000_DEV_ID_82571EB_COPPER 0x105E
475#define E1000_DEV_ID_82571EB_FIBER 0x105F
476#define E1000_DEV_ID_82571EB_SERDES 0x1060
477#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
478#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
479#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
480#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC
481#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
482#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
483#define E1000_DEV_ID_82572EI_COPPER 0x107D
484#define E1000_DEV_ID_82572EI_FIBER 0x107E
485#define E1000_DEV_ID_82572EI_SERDES 0x107F
486#define E1000_DEV_ID_82572EI 0x10B9
487#define E1000_DEV_ID_82573E 0x108B
488#define E1000_DEV_ID_82573E_IAMT 0x108C
489#define E1000_DEV_ID_82573L 0x109A
490#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 443#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
491#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
492#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
493#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
494#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
495
496#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
497#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
498#define E1000_DEV_ID_ICH8_IGP_C 0x104B
499#define E1000_DEV_ID_ICH8_IFE 0x104C
500#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
501#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
502#define E1000_DEV_ID_ICH8_IGP_M 0x104D
503
504 444
505#define NODE_ADDRESS_SIZE 6 445#define NODE_ADDRESS_SIZE 6
506#define ETH_LENGTH_OF_ADDRESS 6 446#define ETH_LENGTH_OF_ADDRESS 6
@@ -523,21 +463,20 @@ s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
523 463
524/* The sizes (in bytes) of a ethernet packet */ 464/* The sizes (in bytes) of a ethernet packet */
525#define ENET_HEADER_SIZE 14 465#define ENET_HEADER_SIZE 14
526#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ 466#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */
527#define ETHERNET_FCS_SIZE 4 467#define ETHERNET_FCS_SIZE 4
528#define MINIMUM_ETHERNET_PACKET_SIZE \ 468#define MINIMUM_ETHERNET_PACKET_SIZE \
529 (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) 469 (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
530#define CRC_LENGTH ETHERNET_FCS_SIZE 470#define CRC_LENGTH ETHERNET_FCS_SIZE
531#define MAX_JUMBO_FRAME_SIZE 0x3F00 471#define MAX_JUMBO_FRAME_SIZE 0x3F00
532 472
533
534/* 802.1q VLAN Packet Sizes */ 473/* 802.1q VLAN Packet Sizes */
535#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ 474#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */
536 475
537/* Ethertype field values */ 476/* Ethertype field values */
538#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ 477#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
539#define ETHERNET_IP_TYPE 0x0800 /* IP packets */ 478#define ETHERNET_IP_TYPE 0x0800 /* IP packets */
540#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */ 479#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */
541 480
542/* Packet Header defines */ 481/* Packet Header defines */
543#define IP_PROTOCOL_TCP 6 482#define IP_PROTOCOL_TCP 6
@@ -567,15 +506,6 @@ s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
567 E1000_IMS_RXSEQ | \ 506 E1000_IMS_RXSEQ | \
568 E1000_IMS_LSC) 507 E1000_IMS_LSC)
569 508
570/* Additional interrupts need to be handled for e1000_ich8lan:
571 DSW = The FW changed the status of the DISSW bit in FWSM
572 PHYINT = The LAN connected device generates an interrupt
573 EPRST = Manageability reset event */
574#define IMS_ICH8LAN_ENABLE_MASK (\
575 E1000_IMS_DSW | \
576 E1000_IMS_PHYINT | \
577 E1000_IMS_EPRST)
578
579/* Number of high/low register pairs in the RAR. The RAR (Receive Address 509/* Number of high/low register pairs in the RAR. The RAR (Receive Address
580 * Registers) holds the directed and multicast addresses that we monitor. We 510 * Registers) holds the directed and multicast addresses that we monitor. We
581 * reserve one of these spots for our directed address, allowing us room for 511 * reserve one of these spots for our directed address, allowing us room for
@@ -583,100 +513,98 @@ s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
583 */ 513 */
584#define E1000_RAR_ENTRIES 15 514#define E1000_RAR_ENTRIES 15
585 515
586#define E1000_RAR_ENTRIES_ICH8LAN 6
587
588#define MIN_NUMBER_OF_DESCRIPTORS 8 516#define MIN_NUMBER_OF_DESCRIPTORS 8
589#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 517#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8
590 518
591/* Receive Descriptor */ 519/* Receive Descriptor */
592struct e1000_rx_desc { 520struct e1000_rx_desc {
593 __le64 buffer_addr; /* Address of the descriptor's data buffer */ 521 __le64 buffer_addr; /* Address of the descriptor's data buffer */
594 __le16 length; /* Length of data DMAed into data buffer */ 522 __le16 length; /* Length of data DMAed into data buffer */
595 __le16 csum; /* Packet checksum */ 523 __le16 csum; /* Packet checksum */
596 u8 status; /* Descriptor status */ 524 u8 status; /* Descriptor status */
597 u8 errors; /* Descriptor Errors */ 525 u8 errors; /* Descriptor Errors */
598 __le16 special; 526 __le16 special;
599}; 527};
600 528
601/* Receive Descriptor - Extended */ 529/* Receive Descriptor - Extended */
602union e1000_rx_desc_extended { 530union e1000_rx_desc_extended {
603 struct { 531 struct {
604 __le64 buffer_addr; 532 __le64 buffer_addr;
605 __le64 reserved; 533 __le64 reserved;
606 } read; 534 } read;
607 struct { 535 struct {
608 struct { 536 struct {
609 __le32 mrq; /* Multiple Rx Queues */ 537 __le32 mrq; /* Multiple Rx Queues */
610 union { 538 union {
611 __le32 rss; /* RSS Hash */ 539 __le32 rss; /* RSS Hash */
612 struct { 540 struct {
613 __le16 ip_id; /* IP id */ 541 __le16 ip_id; /* IP id */
614 __le16 csum; /* Packet Checksum */ 542 __le16 csum; /* Packet Checksum */
615 } csum_ip; 543 } csum_ip;
616 } hi_dword; 544 } hi_dword;
617 } lower; 545 } lower;
618 struct { 546 struct {
619 __le32 status_error; /* ext status/error */ 547 __le32 status_error; /* ext status/error */
620 __le16 length; 548 __le16 length;
621 __le16 vlan; /* VLAN tag */ 549 __le16 vlan; /* VLAN tag */
622 } upper; 550 } upper;
623 } wb; /* writeback */ 551 } wb; /* writeback */
624}; 552};
625 553
626#define MAX_PS_BUFFERS 4 554#define MAX_PS_BUFFERS 4
627/* Receive Descriptor - Packet Split */ 555/* Receive Descriptor - Packet Split */
628union e1000_rx_desc_packet_split { 556union e1000_rx_desc_packet_split {
629 struct { 557 struct {
630 /* one buffer for protocol header(s), three data buffers */ 558 /* one buffer for protocol header(s), three data buffers */
631 __le64 buffer_addr[MAX_PS_BUFFERS]; 559 __le64 buffer_addr[MAX_PS_BUFFERS];
632 } read; 560 } read;
633 struct { 561 struct {
634 struct { 562 struct {
635 __le32 mrq; /* Multiple Rx Queues */ 563 __le32 mrq; /* Multiple Rx Queues */
636 union { 564 union {
637 __le32 rss; /* RSS Hash */ 565 __le32 rss; /* RSS Hash */
638 struct { 566 struct {
639 __le16 ip_id; /* IP id */ 567 __le16 ip_id; /* IP id */
640 __le16 csum; /* Packet Checksum */ 568 __le16 csum; /* Packet Checksum */
641 } csum_ip; 569 } csum_ip;
642 } hi_dword; 570 } hi_dword;
643 } lower; 571 } lower;
644 struct { 572 struct {
645 __le32 status_error; /* ext status/error */ 573 __le32 status_error; /* ext status/error */
646 __le16 length0; /* length of buffer 0 */ 574 __le16 length0; /* length of buffer 0 */
647 __le16 vlan; /* VLAN tag */ 575 __le16 vlan; /* VLAN tag */
648 } middle; 576 } middle;
649 struct { 577 struct {
650 __le16 header_status; 578 __le16 header_status;
651 __le16 length[3]; /* length of buffers 1-3 */ 579 __le16 length[3]; /* length of buffers 1-3 */
652 } upper; 580 } upper;
653 __le64 reserved; 581 __le64 reserved;
654 } wb; /* writeback */ 582 } wb; /* writeback */
655}; 583};
656 584
657/* Receive Decriptor bit definitions */ 585/* Receive Descriptor bit definitions */
658#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ 586#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
659#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ 587#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
660#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ 588#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
661#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ 589#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
662#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ 590#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
663#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ 591#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
664#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ 592#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
665#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ 593#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
666#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ 594#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
667#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ 595#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
668#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ 596#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
669#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ 597#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
670#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ 598#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
671#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ 599#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
672#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ 600#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
673#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ 601#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
674#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ 602#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
675#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ 603#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
676#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ 604#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
677#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ 605#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
678#define E1000_RXD_SPC_PRI_SHIFT 13 606#define E1000_RXD_SPC_PRI_SHIFT 13
679#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ 607#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */
680#define E1000_RXD_SPC_CFI_SHIFT 12 608#define E1000_RXD_SPC_CFI_SHIFT 12
681 609
682#define E1000_RXDEXT_STATERR_CE 0x01000000 610#define E1000_RXDEXT_STATERR_CE 0x01000000
@@ -698,7 +626,6 @@ union e1000_rx_desc_packet_split {
698 E1000_RXD_ERR_CXE | \ 626 E1000_RXD_ERR_CXE | \
699 E1000_RXD_ERR_RXE) 627 E1000_RXD_ERR_RXE)
700 628
701
702/* Same mask, but for extended and packet split descriptors */ 629/* Same mask, but for extended and packet split descriptors */
703#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ 630#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
704 E1000_RXDEXT_STATERR_CE | \ 631 E1000_RXDEXT_STATERR_CE | \
@@ -707,152 +634,145 @@ union e1000_rx_desc_packet_split {
707 E1000_RXDEXT_STATERR_CXE | \ 634 E1000_RXDEXT_STATERR_CXE | \
708 E1000_RXDEXT_STATERR_RXE) 635 E1000_RXDEXT_STATERR_RXE)
709 636
710
711/* Transmit Descriptor */ 637/* Transmit Descriptor */
712struct e1000_tx_desc { 638struct e1000_tx_desc {
713 __le64 buffer_addr; /* Address of the descriptor's data buffer */ 639 __le64 buffer_addr; /* Address of the descriptor's data buffer */
714 union { 640 union {
715 __le32 data; 641 __le32 data;
716 struct { 642 struct {
717 __le16 length; /* Data buffer length */ 643 __le16 length; /* Data buffer length */
718 u8 cso; /* Checksum offset */ 644 u8 cso; /* Checksum offset */
719 u8 cmd; /* Descriptor control */ 645 u8 cmd; /* Descriptor control */
720 } flags; 646 } flags;
721 } lower; 647 } lower;
722 union { 648 union {
723 __le32 data; 649 __le32 data;
724 struct { 650 struct {
725 u8 status; /* Descriptor status */ 651 u8 status; /* Descriptor status */
726 u8 css; /* Checksum start */ 652 u8 css; /* Checksum start */
727 __le16 special; 653 __le16 special;
728 } fields; 654 } fields;
729 } upper; 655 } upper;
730}; 656};
731 657
732/* Transmit Descriptor bit definitions */ 658/* Transmit Descriptor bit definitions */
733#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ 659#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
734#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ 660#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
735#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ 661#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
736#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ 662#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
737#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ 663#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
738#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ 664#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
739#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ 665#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
740#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ 666#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
741#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ 667#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
742#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ 668#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
743#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ 669#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
744#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ 670#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
745#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ 671#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
746#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ 672#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
747#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ 673#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
748#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ 674#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
749#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ 675#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
750#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ 676#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
751#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ 677#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
752#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ 678#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
753 679
754/* Offload Context Descriptor */ 680/* Offload Context Descriptor */
755struct e1000_context_desc { 681struct e1000_context_desc {
756 union { 682 union {
757 __le32 ip_config; 683 __le32 ip_config;
758 struct { 684 struct {
759 u8 ipcss; /* IP checksum start */ 685 u8 ipcss; /* IP checksum start */
760 u8 ipcso; /* IP checksum offset */ 686 u8 ipcso; /* IP checksum offset */
761 __le16 ipcse; /* IP checksum end */ 687 __le16 ipcse; /* IP checksum end */
762 } ip_fields; 688 } ip_fields;
763 } lower_setup; 689 } lower_setup;
764 union { 690 union {
765 __le32 tcp_config; 691 __le32 tcp_config;
766 struct { 692 struct {
767 u8 tucss; /* TCP checksum start */ 693 u8 tucss; /* TCP checksum start */
768 u8 tucso; /* TCP checksum offset */ 694 u8 tucso; /* TCP checksum offset */
769 __le16 tucse; /* TCP checksum end */ 695 __le16 tucse; /* TCP checksum end */
770 } tcp_fields; 696 } tcp_fields;
771 } upper_setup; 697 } upper_setup;
772 __le32 cmd_and_length; /* */ 698 __le32 cmd_and_length; /* */
773 union { 699 union {
774 __le32 data; 700 __le32 data;
775 struct { 701 struct {
776 u8 status; /* Descriptor status */ 702 u8 status; /* Descriptor status */
777 u8 hdr_len; /* Header length */ 703 u8 hdr_len; /* Header length */
778 __le16 mss; /* Maximum segment size */ 704 __le16 mss; /* Maximum segment size */
779 } fields; 705 } fields;
780 } tcp_seg_setup; 706 } tcp_seg_setup;
781}; 707};
782 708
783/* Offload data descriptor */ 709/* Offload data descriptor */
784struct e1000_data_desc { 710struct e1000_data_desc {
785 __le64 buffer_addr; /* Address of the descriptor's buffer address */ 711 __le64 buffer_addr; /* Address of the descriptor's buffer address */
786 union { 712 union {
787 __le32 data; 713 __le32 data;
788 struct { 714 struct {
789 __le16 length; /* Data buffer length */ 715 __le16 length; /* Data buffer length */
790 u8 typ_len_ext; /* */ 716 u8 typ_len_ext; /* */
791 u8 cmd; /* */ 717 u8 cmd; /* */
792 } flags; 718 } flags;
793 } lower; 719 } lower;
794 union { 720 union {
795 __le32 data; 721 __le32 data;
796 struct { 722 struct {
797 u8 status; /* Descriptor status */ 723 u8 status; /* Descriptor status */
798 u8 popts; /* Packet Options */ 724 u8 popts; /* Packet Options */
799 __le16 special; /* */ 725 __le16 special; /* */
800 } fields; 726 } fields;
801 } upper; 727 } upper;
802}; 728};
803 729
804/* Filters */ 730/* Filters */
805#define E1000_NUM_UNICAST 16 /* Unicast filter entries */ 731#define E1000_NUM_UNICAST 16 /* Unicast filter entries */
806#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ 732#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
807#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ 733#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
808
809#define E1000_NUM_UNICAST_ICH8LAN 7
810#define E1000_MC_TBL_SIZE_ICH8LAN 32
811
812 734
813/* Receive Address Register */ 735/* Receive Address Register */
814struct e1000_rar { 736struct e1000_rar {
815 volatile __le32 low; /* receive address low */ 737 volatile __le32 low; /* receive address low */
816 volatile __le32 high; /* receive address high */ 738 volatile __le32 high; /* receive address high */
817}; 739};
818 740
819/* Number of entries in the Multicast Table Array (MTA). */ 741/* Number of entries in the Multicast Table Array (MTA). */
820#define E1000_NUM_MTA_REGISTERS 128 742#define E1000_NUM_MTA_REGISTERS 128
821#define E1000_NUM_MTA_REGISTERS_ICH8LAN 32
822 743
823/* IPv4 Address Table Entry */ 744/* IPv4 Address Table Entry */
824struct e1000_ipv4_at_entry { 745struct e1000_ipv4_at_entry {
825 volatile u32 ipv4_addr; /* IP Address (RW) */ 746 volatile u32 ipv4_addr; /* IP Address (RW) */
826 volatile u32 reserved; 747 volatile u32 reserved;
827}; 748};
828 749
829/* Four wakeup IP addresses are supported */ 750/* Four wakeup IP addresses are supported */
830#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 751#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4
831#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 752#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX
832#define E1000_IP4AT_SIZE_ICH8LAN 3
833#define E1000_IP6AT_SIZE 1 753#define E1000_IP6AT_SIZE 1
834 754
835/* IPv6 Address Table Entry */ 755/* IPv6 Address Table Entry */
836struct e1000_ipv6_at_entry { 756struct e1000_ipv6_at_entry {
837 volatile u8 ipv6_addr[16]; 757 volatile u8 ipv6_addr[16];
838}; 758};
839 759
840/* Flexible Filter Length Table Entry */ 760/* Flexible Filter Length Table Entry */
841struct e1000_fflt_entry { 761struct e1000_fflt_entry {
842 volatile u32 length; /* Flexible Filter Length (RW) */ 762 volatile u32 length; /* Flexible Filter Length (RW) */
843 volatile u32 reserved; 763 volatile u32 reserved;
844}; 764};
845 765
846/* Flexible Filter Mask Table Entry */ 766/* Flexible Filter Mask Table Entry */
847struct e1000_ffmt_entry { 767struct e1000_ffmt_entry {
848 volatile u32 mask; /* Flexible Filter Mask (RW) */ 768 volatile u32 mask; /* Flexible Filter Mask (RW) */
849 volatile u32 reserved; 769 volatile u32 reserved;
850}; 770};
851 771
852/* Flexible Filter Value Table Entry */ 772/* Flexible Filter Value Table Entry */
853struct e1000_ffvt_entry { 773struct e1000_ffvt_entry {
854 volatile u32 value; /* Flexible Filter Value (RW) */ 774 volatile u32 value; /* Flexible Filter Value (RW) */
855 volatile u32 reserved; 775 volatile u32 reserved;
856}; 776};
857 777
858/* Four Flexible Filters are supported */ 778/* Four Flexible Filters are supported */
@@ -879,211 +799,211 @@ struct e1000_ffvt_entry {
879 * R/clr - register is read only and is cleared when read 799 * R/clr - register is read only and is cleared when read
880 * A - register array 800 * A - register array
881 */ 801 */
882#define E1000_CTRL 0x00000 /* Device Control - RW */ 802#define E1000_CTRL 0x00000 /* Device Control - RW */
883#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ 803#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */
884#define E1000_STATUS 0x00008 /* Device Status - RO */ 804#define E1000_STATUS 0x00008 /* Device Status - RO */
885#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ 805#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
886#define E1000_EERD 0x00014 /* EEPROM Read - RW */ 806#define E1000_EERD 0x00014 /* EEPROM Read - RW */
887#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ 807#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
888#define E1000_FLA 0x0001C /* Flash Access - RW */ 808#define E1000_FLA 0x0001C /* Flash Access - RW */
889#define E1000_MDIC 0x00020 /* MDI Control - RW */ 809#define E1000_MDIC 0x00020 /* MDI Control - RW */
890#define E1000_SCTL 0x00024 /* SerDes Control - RW */ 810#define E1000_SCTL 0x00024 /* SerDes Control - RW */
891#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ 811#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
892#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ 812#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
893#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ 813#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
894#define E1000_FCT 0x00030 /* Flow Control Type - RW */ 814#define E1000_FCT 0x00030 /* Flow Control Type - RW */
895#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ 815#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
896#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ 816#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
897#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ 817#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
898#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ 818#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
899#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ 819#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
900#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ 820#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
901#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ 821#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
902#define E1000_RCTL 0x00100 /* RX Control - RW */ 822#define E1000_RCTL 0x00100 /* RX Control - RW */
903#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ 823#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
904#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ 824#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
905#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ 825#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */
906#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ 826#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */
907#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ 827#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */
908#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ 828#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */
909#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ 829#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
910#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ 830#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
911#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ 831#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */
912#define E1000_TCTL 0x00400 /* TX Control - RW */ 832#define E1000_TCTL 0x00400 /* TX Control - RW */
913#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ 833#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
914#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ 834#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
915#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ 835#define E1000_TBT 0x00448 /* TX Burst Timer - RW */
916#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ 836#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
917#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ 837#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
918#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ 838#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
919#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ 839#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
920#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ 840#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
921#define FEXTNVM_SW_CONFIG 0x0001 841#define FEXTNVM_SW_CONFIG 0x0001
922#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ 842#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
923#define E1000_PBS 0x01008 /* Packet Buffer Size */ 843#define E1000_PBS 0x01008 /* Packet Buffer Size */
924#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ 844#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
925#define E1000_FLASH_UPDATES 1000 845#define E1000_FLASH_UPDATES 1000
926#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ 846#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
927#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ 847#define E1000_FLASHT 0x01028 /* FLASH Timer Register */
928#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ 848#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
929#define E1000_FLSWCTL 0x01030 /* FLASH control register */ 849#define E1000_FLSWCTL 0x01030 /* FLASH control register */
930#define E1000_FLSWDATA 0x01034 /* FLASH data register */ 850#define E1000_FLSWDATA 0x01034 /* FLASH data register */
931#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ 851#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */
932#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ 852#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
933#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ 853#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
934#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ 854#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
935#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ 855#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
936#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ 856#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
937#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ 857#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */
938#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ 858#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */
939#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ 859#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */
940#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ 860#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */
941#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ 861#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */
942#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ 862#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */
943#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ 863#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */
944#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ 864#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */
945#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ 865#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */
946#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ 866#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */
947#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ 867#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */
948#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ 868#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */
949#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ 869#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */
950#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ 870#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */
951#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ 871#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */
952#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ 872#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */
953#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ 873#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
954#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ 874#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */
955#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ 875#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
956#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ 876#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
957#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ 877#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
958#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ 878#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
959#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ 879#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */
960#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ 880#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */
961#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ 881#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */
962#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ 882#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */
963#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */ 883#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */
964#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */ 884#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */
965#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */ 885#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */
966#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */ 886#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */
967#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ 887#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
968#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ 888#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
969#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ 889#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
970#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ 890#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */
971#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ 891#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */
972#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ 892#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */
973#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ 893#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */
974#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ 894#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */
975#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ 895#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
976#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ 896#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
977#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ 897#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
978#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ 898#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
979#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ 899#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
980#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ 900#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
981#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ 901#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
982#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ 902#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
983#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ 903#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
984#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ 904#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
985#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ 905#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
986#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ 906#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
987#define E1000_COLC 0x04028 /* Collision Count - R/clr */ 907#define E1000_COLC 0x04028 /* Collision Count - R/clr */
988#define E1000_DC 0x04030 /* Defer Count - R/clr */ 908#define E1000_DC 0x04030 /* Defer Count - R/clr */
989#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ 909#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */
990#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ 910#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
991#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ 911#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
992#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ 912#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
993#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ 913#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */
994#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ 914#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */
995#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ 915#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */
996#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ 916#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */
997#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ 917#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */
998#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ 918#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */
999#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ 919#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */
1000#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ 920#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */
1001#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ 921#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */
1002#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ 922#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */
1003#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ 923#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */
1004#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ 924#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */
1005#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ 925#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */
1006#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ 926#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */
1007#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ 927#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */
1008#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ 928#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */
1009#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ 929#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */
1010#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ 930#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */
1011#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ 931#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */
1012#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ 932#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */
1013#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ 933#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */
1014#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ 934#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */
1015#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ 935#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */
1016#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ 936#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */
1017#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ 937#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */
1018#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ 938#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
1019#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ 939#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */
1020#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ 940#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */
1021#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ 941#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */
1022#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ 942#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */
1023#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ 943#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */
1024#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ 944#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */
1025#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ 945#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */
1026#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ 946#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */
1027#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ 947#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */
1028#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ 948#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */
1029#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ 949#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */
1030#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ 950#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */
1031#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ 951#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */
1032#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ 952#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */
1033#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ 953#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
1034#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ 954#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
1035#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ 955#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
1036#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ 956#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
1037#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ 957#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */
1038#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ 958#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */
1039#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ 959#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */
1040#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ 960#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */
1041#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ 961#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
1042#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ 962#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */
1043#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ 963#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
1044#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ 964#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
1045#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ 965#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
1046#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ 966#define E1000_RFCTL 0x05008 /* Receive Filter Control */
1047#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ 967#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
1048#define E1000_RA 0x05400 /* Receive Address - RW Array */ 968#define E1000_RA 0x05400 /* Receive Address - RW Array */
1049#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ 969#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
1050#define E1000_WUC 0x05800 /* Wakeup Control - RW */ 970#define E1000_WUC 0x05800 /* Wakeup Control - RW */
1051#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ 971#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
1052#define E1000_WUS 0x05810 /* Wakeup Status - RO */ 972#define E1000_WUS 0x05810 /* Wakeup Status - RO */
1053#define E1000_MANC 0x05820 /* Management Control - RW */ 973#define E1000_MANC 0x05820 /* Management Control - RW */
1054#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ 974#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
1055#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ 975#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
1056#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ 976#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
1057#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ 977#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
1058#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ 978#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
1059#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ 979#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
1060#define E1000_HOST_IF 0x08800 /* Host Interface */ 980#define E1000_HOST_IF 0x08800 /* Host Interface */
1061#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ 981#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
1062#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ 982#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
1063 983
1064#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ 984#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */
1065#define E1000_MDPHYA 0x0003C /* PHY address - RW */ 985#define E1000_MDPHYA 0x0003C /* PHY address - RW */
1066#define E1000_MANC2H 0x05860 /* Managment Control To Host - RW */ 986#define E1000_MANC2H 0x05860 /* Managment Control To Host - RW */
1067#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ 987#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
1068 988
1069#define E1000_GCR 0x05B00 /* PCI-Ex Control */ 989#define E1000_GCR 0x05B00 /* PCI-Ex Control */
1070#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ 990#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
1071#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ 991#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
1072#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ 992#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
1073#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ 993#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
1074#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ 994#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
1075#define E1000_SWSM 0x05B50 /* SW Semaphore */ 995#define E1000_SWSM 0x05B50 /* SW Semaphore */
1076#define E1000_FWSM 0x05B54 /* FW Semaphore */ 996#define E1000_FWSM 0x05B54 /* FW Semaphore */
1077#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ 997#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
1078#define E1000_HICR 0x08F00 /* Host Inteface Control */ 998#define E1000_HICR 0x08F00 /* Host Interface Control */
1079 999
1080/* RSS registers */ 1000/* RSS registers */
1081#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ 1001#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
1082#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ 1002#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
1083#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ 1003#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */
1084#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ 1004#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */
1085#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ 1005#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
1086#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ 1006#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
1087/* Register Set (82542) 1007/* Register Set (82542)
1088 * 1008 *
1089 * Some of the 82542 registers are located at different offsets than they are 1009 * Some of the 82542 registers are located at different offsets than they are
@@ -1123,19 +1043,19 @@ struct e1000_ffvt_entry {
1123#define E1000_82542_RDLEN0 E1000_82542_RDLEN 1043#define E1000_82542_RDLEN0 E1000_82542_RDLEN
1124#define E1000_82542_RDH0 E1000_82542_RDH 1044#define E1000_82542_RDH0 E1000_82542_RDH
1125#define E1000_82542_RDT0 E1000_82542_RDT 1045#define E1000_82542_RDT0 E1000_82542_RDT
1126#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication 1046#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication
1127 * RX Control - RW */ 1047 * RX Control - RW */
1128#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) 1048#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8))
1129#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ 1049#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */
1130#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ 1050#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */
1131#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ 1051#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */
1132#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ 1052#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */
1133#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ 1053#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */
1134#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ 1054#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */
1135#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ 1055#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */
1136#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ 1056#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */
1137#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ 1057#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */
1138#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ 1058#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */
1139#define E1000_82542_RDTR1 0x00130 1059#define E1000_82542_RDTR1 0x00130
1140#define E1000_82542_RDBAL1 0x00138 1060#define E1000_82542_RDBAL1 0x00138
1141#define E1000_82542_RDBAH1 0x0013C 1061#define E1000_82542_RDBAH1 0x0013C
@@ -1302,288 +1222,281 @@ struct e1000_ffvt_entry {
1302#define E1000_82542_RSSIR E1000_RSSIR 1222#define E1000_82542_RSSIR E1000_RSSIR
1303#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA 1223#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA
1304#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC 1224#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC
1305#define E1000_82542_MANC2H E1000_MANC2H
1306 1225
1307/* Statistics counters collected by the MAC */ 1226/* Statistics counters collected by the MAC */
1308struct e1000_hw_stats { 1227struct e1000_hw_stats {
1309 u64 crcerrs; 1228 u64 crcerrs;
1310 u64 algnerrc; 1229 u64 algnerrc;
1311 u64 symerrs; 1230 u64 symerrs;
1312 u64 rxerrc; 1231 u64 rxerrc;
1313 u64 txerrc; 1232 u64 txerrc;
1314 u64 mpc; 1233 u64 mpc;
1315 u64 scc; 1234 u64 scc;
1316 u64 ecol; 1235 u64 ecol;
1317 u64 mcc; 1236 u64 mcc;
1318 u64 latecol; 1237 u64 latecol;
1319 u64 colc; 1238 u64 colc;
1320 u64 dc; 1239 u64 dc;
1321 u64 tncrs; 1240 u64 tncrs;
1322 u64 sec; 1241 u64 sec;
1323 u64 cexterr; 1242 u64 cexterr;
1324 u64 rlec; 1243 u64 rlec;
1325 u64 xonrxc; 1244 u64 xonrxc;
1326 u64 xontxc; 1245 u64 xontxc;
1327 u64 xoffrxc; 1246 u64 xoffrxc;
1328 u64 xofftxc; 1247 u64 xofftxc;
1329 u64 fcruc; 1248 u64 fcruc;
1330 u64 prc64; 1249 u64 prc64;
1331 u64 prc127; 1250 u64 prc127;
1332 u64 prc255; 1251 u64 prc255;
1333 u64 prc511; 1252 u64 prc511;
1334 u64 prc1023; 1253 u64 prc1023;
1335 u64 prc1522; 1254 u64 prc1522;
1336 u64 gprc; 1255 u64 gprc;
1337 u64 bprc; 1256 u64 bprc;
1338 u64 mprc; 1257 u64 mprc;
1339 u64 gptc; 1258 u64 gptc;
1340 u64 gorcl; 1259 u64 gorcl;
1341 u64 gorch; 1260 u64 gorch;
1342 u64 gotcl; 1261 u64 gotcl;
1343 u64 gotch; 1262 u64 gotch;
1344 u64 rnbc; 1263 u64 rnbc;
1345 u64 ruc; 1264 u64 ruc;
1346 u64 rfc; 1265 u64 rfc;
1347 u64 roc; 1266 u64 roc;
1348 u64 rlerrc; 1267 u64 rlerrc;
1349 u64 rjc; 1268 u64 rjc;
1350 u64 mgprc; 1269 u64 mgprc;
1351 u64 mgpdc; 1270 u64 mgpdc;
1352 u64 mgptc; 1271 u64 mgptc;
1353 u64 torl; 1272 u64 torl;
1354 u64 torh; 1273 u64 torh;
1355 u64 totl; 1274 u64 totl;
1356 u64 toth; 1275 u64 toth;
1357 u64 tpr; 1276 u64 tpr;
1358 u64 tpt; 1277 u64 tpt;
1359 u64 ptc64; 1278 u64 ptc64;
1360 u64 ptc127; 1279 u64 ptc127;
1361 u64 ptc255; 1280 u64 ptc255;
1362 u64 ptc511; 1281 u64 ptc511;
1363 u64 ptc1023; 1282 u64 ptc1023;
1364 u64 ptc1522; 1283 u64 ptc1522;
1365 u64 mptc; 1284 u64 mptc;
1366 u64 bptc; 1285 u64 bptc;
1367 u64 tsctc; 1286 u64 tsctc;
1368 u64 tsctfc; 1287 u64 tsctfc;
1369 u64 iac; 1288 u64 iac;
1370 u64 icrxptc; 1289 u64 icrxptc;
1371 u64 icrxatc; 1290 u64 icrxatc;
1372 u64 ictxptc; 1291 u64 ictxptc;
1373 u64 ictxatc; 1292 u64 ictxatc;
1374 u64 ictxqec; 1293 u64 ictxqec;
1375 u64 ictxqmtc; 1294 u64 ictxqmtc;
1376 u64 icrxdmtc; 1295 u64 icrxdmtc;
1377 u64 icrxoc; 1296 u64 icrxoc;
1378}; 1297};
1379 1298
1380/* Structure containing variables used by the shared code (e1000_hw.c) */ 1299/* Structure containing variables used by the shared code (e1000_hw.c) */
1381struct e1000_hw { 1300struct e1000_hw {
1382 u8 __iomem *hw_addr; 1301 u8 __iomem *hw_addr;
1383 u8 __iomem *flash_address; 1302 u8 __iomem *flash_address;
1384 e1000_mac_type mac_type; 1303 e1000_mac_type mac_type;
1385 e1000_phy_type phy_type; 1304 e1000_phy_type phy_type;
1386 u32 phy_init_script; 1305 u32 phy_init_script;
1387 e1000_media_type media_type; 1306 e1000_media_type media_type;
1388 void *back; 1307 void *back;
1389 struct e1000_shadow_ram *eeprom_shadow_ram; 1308 struct e1000_shadow_ram *eeprom_shadow_ram;
1390 u32 flash_bank_size; 1309 u32 flash_bank_size;
1391 u32 flash_base_addr; 1310 u32 flash_base_addr;
1392 e1000_fc_type fc; 1311 e1000_fc_type fc;
1393 e1000_bus_speed bus_speed; 1312 e1000_bus_speed bus_speed;
1394 e1000_bus_width bus_width; 1313 e1000_bus_width bus_width;
1395 e1000_bus_type bus_type; 1314 e1000_bus_type bus_type;
1396 struct e1000_eeprom_info eeprom; 1315 struct e1000_eeprom_info eeprom;
1397 e1000_ms_type master_slave; 1316 e1000_ms_type master_slave;
1398 e1000_ms_type original_master_slave; 1317 e1000_ms_type original_master_slave;
1399 e1000_ffe_config ffe_config_state; 1318 e1000_ffe_config ffe_config_state;
1400 u32 asf_firmware_present; 1319 u32 asf_firmware_present;
1401 u32 eeprom_semaphore_present; 1320 u32 eeprom_semaphore_present;
1402 u32 swfw_sync_present; 1321 unsigned long io_base;
1403 u32 swfwhw_semaphore_present; 1322 u32 phy_id;
1404 unsigned long io_base; 1323 u32 phy_revision;
1405 u32 phy_id; 1324 u32 phy_addr;
1406 u32 phy_revision; 1325 u32 original_fc;
1407 u32 phy_addr; 1326 u32 txcw;
1408 u32 original_fc; 1327 u32 autoneg_failed;
1409 u32 txcw; 1328 u32 max_frame_size;
1410 u32 autoneg_failed; 1329 u32 min_frame_size;
1411 u32 max_frame_size; 1330 u32 mc_filter_type;
1412 u32 min_frame_size; 1331 u32 num_mc_addrs;
1413 u32 mc_filter_type; 1332 u32 collision_delta;
1414 u32 num_mc_addrs; 1333 u32 tx_packet_delta;
1415 u32 collision_delta; 1334 u32 ledctl_default;
1416 u32 tx_packet_delta; 1335 u32 ledctl_mode1;
1417 u32 ledctl_default; 1336 u32 ledctl_mode2;
1418 u32 ledctl_mode1; 1337 bool tx_pkt_filtering;
1419 u32 ledctl_mode2;
1420 bool tx_pkt_filtering;
1421 struct e1000_host_mng_dhcp_cookie mng_cookie; 1338 struct e1000_host_mng_dhcp_cookie mng_cookie;
1422 u16 phy_spd_default; 1339 u16 phy_spd_default;
1423 u16 autoneg_advertised; 1340 u16 autoneg_advertised;
1424 u16 pci_cmd_word; 1341 u16 pci_cmd_word;
1425 u16 fc_high_water; 1342 u16 fc_high_water;
1426 u16 fc_low_water; 1343 u16 fc_low_water;
1427 u16 fc_pause_time; 1344 u16 fc_pause_time;
1428 u16 current_ifs_val; 1345 u16 current_ifs_val;
1429 u16 ifs_min_val; 1346 u16 ifs_min_val;
1430 u16 ifs_max_val; 1347 u16 ifs_max_val;
1431 u16 ifs_step_size; 1348 u16 ifs_step_size;
1432 u16 ifs_ratio; 1349 u16 ifs_ratio;
1433 u16 device_id; 1350 u16 device_id;
1434 u16 vendor_id; 1351 u16 vendor_id;
1435 u16 subsystem_id; 1352 u16 subsystem_id;
1436 u16 subsystem_vendor_id; 1353 u16 subsystem_vendor_id;
1437 u8 revision_id; 1354 u8 revision_id;
1438 u8 autoneg; 1355 u8 autoneg;
1439 u8 mdix; 1356 u8 mdix;
1440 u8 forced_speed_duplex; 1357 u8 forced_speed_duplex;
1441 u8 wait_autoneg_complete; 1358 u8 wait_autoneg_complete;
1442 u8 dma_fairness; 1359 u8 dma_fairness;
1443 u8 mac_addr[NODE_ADDRESS_SIZE]; 1360 u8 mac_addr[NODE_ADDRESS_SIZE];
1444 u8 perm_mac_addr[NODE_ADDRESS_SIZE]; 1361 u8 perm_mac_addr[NODE_ADDRESS_SIZE];
1445 bool disable_polarity_correction; 1362 bool disable_polarity_correction;
1446 bool speed_downgraded; 1363 bool speed_downgraded;
1447 e1000_smart_speed smart_speed; 1364 e1000_smart_speed smart_speed;
1448 e1000_dsp_config dsp_config_state; 1365 e1000_dsp_config dsp_config_state;
1449 bool get_link_status; 1366 bool get_link_status;
1450 bool serdes_link_down; 1367 bool serdes_has_link;
1451 bool tbi_compatibility_en; 1368 bool tbi_compatibility_en;
1452 bool tbi_compatibility_on; 1369 bool tbi_compatibility_on;
1453 bool laa_is_present; 1370 bool laa_is_present;
1454 bool phy_reset_disable; 1371 bool phy_reset_disable;
1455 bool initialize_hw_bits_disable; 1372 bool initialize_hw_bits_disable;
1456 bool fc_send_xon; 1373 bool fc_send_xon;
1457 bool fc_strict_ieee; 1374 bool fc_strict_ieee;
1458 bool report_tx_early; 1375 bool report_tx_early;
1459 bool adaptive_ifs; 1376 bool adaptive_ifs;
1460 bool ifs_params_forced; 1377 bool ifs_params_forced;
1461 bool in_ifs_mode; 1378 bool in_ifs_mode;
1462 bool mng_reg_access_disabled; 1379 bool mng_reg_access_disabled;
1463 bool leave_av_bit_off; 1380 bool leave_av_bit_off;
1464 bool kmrn_lock_loss_workaround_disabled; 1381 bool bad_tx_carr_stats_fd;
1465 bool bad_tx_carr_stats_fd; 1382 bool has_smbus;
1466 bool has_manc2h;
1467 bool rx_needs_kicking;
1468 bool has_smbus;
1469}; 1383};
1470 1384
1471 1385#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */
1472#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ 1386#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */
1473#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ 1387#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */
1474#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ 1388#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
1475#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ 1389#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */
1476#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ 1390#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
1477#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ 1391#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */
1478#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ 1392#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */
1479#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */
1480/* Register Bit Masks */ 1393/* Register Bit Masks */
1481/* Device Control */ 1394/* Device Control */
1482#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ 1395#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
1483#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ 1396#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */
1484#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ 1397#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
1485#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ 1398#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
1486#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ 1399#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
1487#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ 1400#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */
1488#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ 1401#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */
1489#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ 1402#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
1490#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ 1403#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
1491#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ 1404#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
1492#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ 1405#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
1493#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ 1406#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
1494#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ 1407#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
1495#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ 1408#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
1496#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ 1409#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */
1497#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ 1410#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
1498#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ 1411#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
1499#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ 1412#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */
1500#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ 1413#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
1501#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ 1414#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
1502#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ 1415#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
1503#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 1416#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
1504#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 1417#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
1505#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ 1418#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
1506#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ 1419#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
1507#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ 1420#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
1508#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ 1421#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */
1509#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ 1422#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
1510#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ 1423#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
1511#define E1000_CTRL_RST 0x04000000 /* Global reset */ 1424#define E1000_CTRL_RST 0x04000000 /* Global reset */
1512#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ 1425#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
1513#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ 1426#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
1514#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ 1427#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */
1515#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ 1428#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
1516#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ 1429#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
1517#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ 1430#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */
1518 1431
1519/* Device Status */ 1432/* Device Status */
1520#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ 1433#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
1521#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ 1434#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
1522#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ 1435#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
1523#define E1000_STATUS_FUNC_SHIFT 2 1436#define E1000_STATUS_FUNC_SHIFT 2
1524#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ 1437#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */
1525#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ 1438#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
1526#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ 1439#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
1527#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ 1440#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */
1528#define E1000_STATUS_SPEED_MASK 0x000000C0 1441#define E1000_STATUS_SPEED_MASK 0x000000C0
1529#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ 1442#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
1530#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ 1443#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
1531#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ 1444#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
1532#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion 1445#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion
1533 by EEPROM/Flash */ 1446 by EEPROM/Flash */
1534#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ 1447#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
1535#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ 1448#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */
1536#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ 1449#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
1537#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ 1450#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */
1538#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ 1451#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */
1539#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ 1452#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */
1540#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ 1453#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */
1541#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ 1454#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */
1542#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ 1455#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */
1543#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ 1456#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */
1544#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ 1457#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */
1545#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ 1458#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
1546#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ 1459#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */
1547#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ 1460#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
1548#define E1000_STATUS_FUSE_8 0x04000000 1461#define E1000_STATUS_FUSE_8 0x04000000
1549#define E1000_STATUS_FUSE_9 0x08000000 1462#define E1000_STATUS_FUSE_9 0x08000000
1550#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ 1463#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */
1551#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ 1464#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */
1552 1465
1553/* Constants used to intrepret the masked PCI-X bus speed. */ 1466/* Constants used to interpret the masked PCI-X bus speed. */
1554#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ 1467#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */
1555#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ 1468#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */
1556#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ 1469#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
1557 1470
1558/* EEPROM/Flash Control */ 1471/* EEPROM/Flash Control */
1559#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ 1472#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */
1560#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ 1473#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */
1561#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ 1474#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */
1562#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ 1475#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */
1563#define E1000_EECD_FWE_MASK 0x00000030 1476#define E1000_EECD_FWE_MASK 0x00000030
1564#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ 1477#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */
1565#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ 1478#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */
1566#define E1000_EECD_FWE_SHIFT 4 1479#define E1000_EECD_FWE_SHIFT 4
1567#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ 1480#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */
1568#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ 1481#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */
1569#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ 1482#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */
1570#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ 1483#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */
1571#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type 1484#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type
1572 * (0-small, 1-large) */ 1485 * (0-small, 1-large) */
1573#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ 1486#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */
1574#ifndef E1000_EEPROM_GRANT_ATTEMPTS 1487#ifndef E1000_EEPROM_GRANT_ATTEMPTS
1575#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ 1488#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
1576#endif 1489#endif
1577#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ 1490#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */
1578#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ 1491#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */
1579#define E1000_EECD_SIZE_EX_SHIFT 11 1492#define E1000_EECD_SIZE_EX_SHIFT 11
1580#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ 1493#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */
1581#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ 1494#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */
1582#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ 1495#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */
1583#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ 1496#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
1584#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ 1497#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
1585#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ 1498#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */
1586#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ 1499#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
1587#define E1000_EECD_SECVAL_SHIFT 22 1500#define E1000_EECD_SECVAL_SHIFT 22
1588#define E1000_STM_OPCODE 0xDB00 1501#define E1000_STM_OPCODE 0xDB00
1589#define E1000_HICR_FW_RESET 0xC0 1502#define E1000_HICR_FW_RESET 0xC0
@@ -1593,12 +1506,12 @@ struct e1000_hw {
1593#define E1000_ICH_NVM_SIG_MASK 0xC0 1506#define E1000_ICH_NVM_SIG_MASK 0xC0
1594 1507
1595/* EEPROM Read */ 1508/* EEPROM Read */
1596#define E1000_EERD_START 0x00000001 /* Start Read */ 1509#define E1000_EERD_START 0x00000001 /* Start Read */
1597#define E1000_EERD_DONE 0x00000010 /* Read Done */ 1510#define E1000_EERD_DONE 0x00000010 /* Read Done */
1598#define E1000_EERD_ADDR_SHIFT 8 1511#define E1000_EERD_ADDR_SHIFT 8
1599#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */ 1512#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */
1600#define E1000_EERD_DATA_SHIFT 16 1513#define E1000_EERD_DATA_SHIFT 16
1601#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */ 1514#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */
1602 1515
1603/* SPI EEPROM Status Register */ 1516/* SPI EEPROM Status Register */
1604#define EEPROM_STATUS_RDY_SPI 0x01 1517#define EEPROM_STATUS_RDY_SPI 0x01
@@ -1608,25 +1521,25 @@ struct e1000_hw {
1608#define EEPROM_STATUS_WPEN_SPI 0x80 1521#define EEPROM_STATUS_WPEN_SPI 0x80
1609 1522
1610/* Extended Device Control */ 1523/* Extended Device Control */
1611#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ 1524#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */
1612#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ 1525#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */
1613#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN 1526#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
1614#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ 1527#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */
1615#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ 1528#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */
1616#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ 1529#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
1617#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ 1530#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
1618#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA 1531#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA
1619#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ 1532#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
1620#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ 1533#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
1621#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ 1534#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */
1622#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ 1535#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */
1623#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ 1536#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
1624#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ 1537#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */
1625#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ 1538#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */
1626#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ 1539#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
1627#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ 1540#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
1628#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ 1541#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
1629#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ 1542#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
1630#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 1543#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
1631#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 1544#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
1632#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 1545#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
@@ -1638,11 +1551,11 @@ struct e1000_hw {
1638#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 1551#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
1639#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 1552#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
1640#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 1553#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
1641#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ 1554#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
1642#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ 1555#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
1643#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ 1556#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
1644#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ 1557#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */
1645#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ 1558#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */
1646#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 1559#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000
1647 1560
1648/* MDI Control */ 1561/* MDI Control */
@@ -1742,167 +1655,167 @@ struct e1000_hw {
1742#define E1000_LEDCTL_MODE_LED_OFF 0xF 1655#define E1000_LEDCTL_MODE_LED_OFF 0xF
1743 1656
1744/* Receive Address */ 1657/* Receive Address */
1745#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ 1658#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
1746 1659
1747/* Interrupt Cause Read */ 1660/* Interrupt Cause Read */
1748#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ 1661#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
1749#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ 1662#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
1750#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ 1663#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
1751#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ 1664#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
1752#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ 1665#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
1753#define E1000_ICR_RXO 0x00000040 /* rx overrun */ 1666#define E1000_ICR_RXO 0x00000040 /* rx overrun */
1754#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ 1667#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
1755#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ 1668#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */
1756#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ 1669#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */
1757#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ 1670#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
1758#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ 1671#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
1759#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ 1672#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
1760#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ 1673#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
1761#define E1000_ICR_TXD_LOW 0x00008000 1674#define E1000_ICR_TXD_LOW 0x00008000
1762#define E1000_ICR_SRPD 0x00010000 1675#define E1000_ICR_SRPD 0x00010000
1763#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ 1676#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
1764#define E1000_ICR_MNG 0x00040000 /* Manageability event */ 1677#define E1000_ICR_MNG 0x00040000 /* Manageability event */
1765#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ 1678#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
1766#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ 1679#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
1767#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ 1680#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
1768#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ 1681#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
1769#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ 1682#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */
1770#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ 1683#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */
1771#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ 1684#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
1772#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ 1685#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
1773#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ 1686#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */
1774#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ 1687#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */
1775#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ 1688#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */
1776#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */ 1689#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */
1777 1690
1778/* Interrupt Cause Set */ 1691/* Interrupt Cause Set */
1779#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 1692#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
1780#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ 1693#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
1781#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ 1694#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
1782#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ 1695#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
1783#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 1696#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
1784#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ 1697#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */
1785#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ 1698#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
1786#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ 1699#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */
1787#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ 1700#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
1788#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ 1701#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
1789#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ 1702#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
1790#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ 1703#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
1791#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1704#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
1792#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW 1705#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW
1793#define E1000_ICS_SRPD E1000_ICR_SRPD 1706#define E1000_ICS_SRPD E1000_ICR_SRPD
1794#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ 1707#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */
1795#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ 1708#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */
1796#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ 1709#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */
1797#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ 1710#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
1798#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ 1711#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
1799#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ 1712#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
1800#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ 1713#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
1801#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ 1714#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
1802#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ 1715#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
1803#define E1000_ICS_DSW E1000_ICR_DSW 1716#define E1000_ICS_DSW E1000_ICR_DSW
1804#define E1000_ICS_PHYINT E1000_ICR_PHYINT 1717#define E1000_ICS_PHYINT E1000_ICR_PHYINT
1805#define E1000_ICS_EPRST E1000_ICR_EPRST 1718#define E1000_ICS_EPRST E1000_ICR_EPRST
1806 1719
1807/* Interrupt Mask Set */ 1720/* Interrupt Mask Set */
1808#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 1721#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
1809#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ 1722#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
1810#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ 1723#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
1811#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ 1724#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
1812#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 1725#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
1813#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ 1726#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */
1814#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ 1727#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
1815#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ 1728#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */
1816#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ 1729#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
1817#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ 1730#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
1818#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ 1731#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
1819#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ 1732#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
1820#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1733#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
1821#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW 1734#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
1822#define E1000_IMS_SRPD E1000_ICR_SRPD 1735#define E1000_IMS_SRPD E1000_ICR_SRPD
1823#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ 1736#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */
1824#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ 1737#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */
1825#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ 1738#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */
1826#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ 1739#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
1827#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ 1740#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
1828#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ 1741#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
1829#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ 1742#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
1830#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ 1743#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
1831#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ 1744#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
1832#define E1000_IMS_DSW E1000_ICR_DSW 1745#define E1000_IMS_DSW E1000_ICR_DSW
1833#define E1000_IMS_PHYINT E1000_ICR_PHYINT 1746#define E1000_IMS_PHYINT E1000_ICR_PHYINT
1834#define E1000_IMS_EPRST E1000_ICR_EPRST 1747#define E1000_IMS_EPRST E1000_ICR_EPRST
1835 1748
1836/* Interrupt Mask Clear */ 1749/* Interrupt Mask Clear */
1837#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 1750#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */
1838#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ 1751#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
1839#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ 1752#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */
1840#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ 1753#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
1841#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 1754#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
1842#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ 1755#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */
1843#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ 1756#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */
1844#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ 1757#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */
1845#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ 1758#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
1846#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ 1759#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
1847#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ 1760#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
1848#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ 1761#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
1849#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1762#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
1850#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW 1763#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW
1851#define E1000_IMC_SRPD E1000_ICR_SRPD 1764#define E1000_IMC_SRPD E1000_ICR_SRPD
1852#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ 1765#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */
1853#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ 1766#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */
1854#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ 1767#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */
1855#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ 1768#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
1856#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ 1769#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
1857#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ 1770#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
1858#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ 1771#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
1859#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ 1772#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
1860#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ 1773#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
1861#define E1000_IMC_DSW E1000_ICR_DSW 1774#define E1000_IMC_DSW E1000_ICR_DSW
1862#define E1000_IMC_PHYINT E1000_ICR_PHYINT 1775#define E1000_IMC_PHYINT E1000_ICR_PHYINT
1863#define E1000_IMC_EPRST E1000_ICR_EPRST 1776#define E1000_IMC_EPRST E1000_ICR_EPRST
1864 1777
1865/* Receive Control */ 1778/* Receive Control */
1866#define E1000_RCTL_RST 0x00000001 /* Software reset */ 1779#define E1000_RCTL_RST 0x00000001 /* Software reset */
1867#define E1000_RCTL_EN 0x00000002 /* enable */ 1780#define E1000_RCTL_EN 0x00000002 /* enable */
1868#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ 1781#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
1869#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ 1782#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
1870#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ 1783#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
1871#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ 1784#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
1872#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ 1785#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
1873#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ 1786#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
1874#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ 1787#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */
1875#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ 1788#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
1876#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ 1789#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
1877#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ 1790#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
1878#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ 1791#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
1879#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ 1792#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */
1880#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ 1793#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */
1881#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ 1794#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
1882#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ 1795#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */
1883#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ 1796#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */
1884#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ 1797#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */
1885#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ 1798#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
1886#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ 1799#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */
1887#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ 1800#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
1888/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ 1801/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
1889#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ 1802#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
1890#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ 1803#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
1891#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ 1804#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
1892#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ 1805#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
1893/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ 1806/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
1894#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ 1807#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */
1895#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ 1808#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */
1896#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ 1809#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */
1897#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ 1810#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
1898#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ 1811#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
1899#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ 1812#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
1900#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ 1813#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
1901#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ 1814#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
1902#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ 1815#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
1903#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ 1816#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
1904#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ 1817#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */
1905#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ 1818#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */
1906 1819
1907/* Use byte values for the following shift parameters 1820/* Use byte values for the following shift parameters
1908 * Usage: 1821 * Usage:
@@ -1925,10 +1838,10 @@ struct e1000_hw {
1925#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 1838#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
1926#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 1839#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
1927 1840
1928#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ 1841#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
1929#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ 1842#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
1930#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ 1843#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
1931#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ 1844#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
1932 1845
1933/* SW_W_SYNC definitions */ 1846/* SW_W_SYNC definitions */
1934#define E1000_SWFW_EEP_SM 0x0001 1847#define E1000_SWFW_EEP_SM 0x0001
@@ -1937,17 +1850,17 @@ struct e1000_hw {
1937#define E1000_SWFW_MAC_CSR_SM 0x0008 1850#define E1000_SWFW_MAC_CSR_SM 0x0008
1938 1851
1939/* Receive Descriptor */ 1852/* Receive Descriptor */
1940#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ 1853#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */
1941#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ 1854#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */
1942#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */ 1855#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */
1943#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */ 1856#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */
1944#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */ 1857#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */
1945 1858
1946/* Flow Control */ 1859/* Flow Control */
1947#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ 1860#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
1948#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ 1861#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */
1949#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ 1862#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
1950#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ 1863#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
1951 1864
1952/* Header split receive */ 1865/* Header split receive */
1953#define E1000_RFCTL_ISCSI_DIS 0x00000001 1866#define E1000_RFCTL_ISCSI_DIS 0x00000001
@@ -1967,66 +1880,64 @@ struct e1000_hw {
1967#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 1880#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
1968 1881
1969/* Receive Descriptor Control */ 1882/* Receive Descriptor Control */
1970#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ 1883#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */
1971#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ 1884#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */
1972#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */ 1885#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */
1973#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ 1886#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
1974 1887
1975/* Transmit Descriptor Control */ 1888/* Transmit Descriptor Control */
1976#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ 1889#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
1977#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ 1890#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
1978#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ 1891#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
1979#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ 1892#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
1980#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ 1893#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
1981#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ 1894#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
1982#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. 1895#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
1983 still to be processed. */ 1896 still to be processed. */
1984/* Transmit Configuration Word */ 1897/* Transmit Configuration Word */
1985#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ 1898#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
1986#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ 1899#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
1987#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ 1900#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
1988#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ 1901#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
1989#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ 1902#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
1990#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ 1903#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */
1991#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ 1904#define E1000_TXCW_NP 0x00008000 /* TXCW next page */
1992#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ 1905#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */
1993#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ 1906#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */
1994#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ 1907#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
1995 1908
1996/* Receive Configuration Word */ 1909/* Receive Configuration Word */
1997#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ 1910#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
1998#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ 1911#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */
1999#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ 1912#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
2000#define E1000_RXCW_CC 0x10000000 /* Receive config change */ 1913#define E1000_RXCW_CC 0x10000000 /* Receive config change */
2001#define E1000_RXCW_C 0x20000000 /* Receive config */ 1914#define E1000_RXCW_C 0x20000000 /* Receive config */
2002#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ 1915#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
2003#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ 1916#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */
2004 1917
2005/* Transmit Control */ 1918/* Transmit Control */
2006#define E1000_TCTL_RST 0x00000001 /* software reset */ 1919#define E1000_TCTL_RST 0x00000001 /* software reset */
2007#define E1000_TCTL_EN 0x00000002 /* enable tx */ 1920#define E1000_TCTL_EN 0x00000002 /* enable tx */
2008#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ 1921#define E1000_TCTL_BCE 0x00000004 /* busy check enable */
2009#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ 1922#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
2010#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ 1923#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
2011#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ 1924#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
2012#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ 1925#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */
2013#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ 1926#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */
2014#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ 1927#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
2015#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ 1928#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */
2016#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ 1929#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
2017/* Extended Transmit Control */ 1930/* Extended Transmit Control */
2018#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ 1931#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */
2019#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ 1932#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
2020
2021#define DEFAULT_80003ES2LAN_TCTL_EXT_GCEX 0x00010000
2022 1933
2023/* Receive Checksum Control */ 1934/* Receive Checksum Control */
2024#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ 1935#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */
2025#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ 1936#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
2026#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ 1937#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
2027#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ 1938#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */
2028#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ 1939#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
2029#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ 1940#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
2030 1941
2031/* Multiple Receive Queue Control */ 1942/* Multiple Receive Queue Control */
2032#define E1000_MRQC_ENABLE_MASK 0x00000003 1943#define E1000_MRQC_ENABLE_MASK 0x00000003
@@ -2042,141 +1953,141 @@ struct e1000_hw {
2042 1953
2043/* Definitions for power management and wakeup registers */ 1954/* Definitions for power management and wakeup registers */
2044/* Wake Up Control */ 1955/* Wake Up Control */
2045#define E1000_WUC_APME 0x00000001 /* APM Enable */ 1956#define E1000_WUC_APME 0x00000001 /* APM Enable */
2046#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ 1957#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
2047#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ 1958#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
2048#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ 1959#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
2049#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ 1960#define E1000_WUC_SPM 0x80000000 /* Enable SPM */
2050 1961
2051/* Wake Up Filter Control */ 1962/* Wake Up Filter Control */
2052#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ 1963#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
2053#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ 1964#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
2054#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ 1965#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
2055#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ 1966#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
2056#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ 1967#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
2057#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ 1968#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
2058#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ 1969#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
2059#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ 1970#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
2060#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ 1971#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
2061#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ 1972#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
2062#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ 1973#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
2063#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ 1974#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
2064#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ 1975#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
2065#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ 1976#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
2066#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ 1977#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
2067#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ 1978#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
2068 1979
2069/* Wake Up Status */ 1980/* Wake Up Status */
2070#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */ 1981#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */
2071#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */ 1982#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */
2072#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */ 1983#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */
2073#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */ 1984#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */
2074#define E1000_WUS_BC 0x00000010 /* Broadcast Received */ 1985#define E1000_WUS_BC 0x00000010 /* Broadcast Received */
2075#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */ 1986#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */
2076#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */ 1987#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */
2077#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */ 1988#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */
2078#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */ 1989#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */
2079#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */ 1990#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */
2080#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */ 1991#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */
2081#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */ 1992#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */
2082#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ 1993#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
2083 1994
2084/* Management Control */ 1995/* Management Control */
2085#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ 1996#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
2086#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ 1997#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
2087#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ 1998#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */
2088#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ 1999#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */
2089#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ 2000#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */
2090#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ 2001#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */
2091#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ 2002#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */
2092#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ 2003#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */
2093#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ 2004#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
2094#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery 2005#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery
2095 * Filtering */ 2006 * Filtering */
2096#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ 2007#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */
2097#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ 2008#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */
2098#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ 2009#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
2099#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ 2010#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
2100#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ 2011#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */
2101#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ 2012#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
2102#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address 2013#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address
2103 * filtering */ 2014 * filtering */
2104#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host 2015#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host
2105 * memory */ 2016 * memory */
2106#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address 2017#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address
2107 * filtering */ 2018 * filtering */
2108#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ 2019#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */
2109#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ 2020#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */
2110#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ 2021#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
2111#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ 2022#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
2112#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ 2023#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
2113#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ 2024#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */
2114#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ 2025#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */
2115#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ 2026#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */
2116 2027
2117#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ 2028#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */
2118#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ 2029#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */
2119 2030
2120/* SW Semaphore Register */ 2031/* SW Semaphore Register */
2121#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 2032#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
2122#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ 2033#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
2123#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ 2034#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
2124#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ 2035#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
2125 2036
2126/* FW Semaphore Register */ 2037/* FW Semaphore Register */
2127#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ 2038#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */
2128#define E1000_FWSM_MODE_SHIFT 1 2039#define E1000_FWSM_MODE_SHIFT 1
2129#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ 2040#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */
2130 2041
2131#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ 2042#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */
2132#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ 2043#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */
2133#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ 2044#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */
2134#define E1000_FWSM_SKUEL_SHIFT 29 2045#define E1000_FWSM_SKUEL_SHIFT 29
2135#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ 2046#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */
2136#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ 2047#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */
2137#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ 2048#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */
2138#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ 2049#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */
2139 2050
2140/* FFLT Debug Register */ 2051/* FFLT Debug Register */
2141#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ 2052#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */
2142 2053
2143typedef enum { 2054typedef enum {
2144 e1000_mng_mode_none = 0, 2055 e1000_mng_mode_none = 0,
2145 e1000_mng_mode_asf, 2056 e1000_mng_mode_asf,
2146 e1000_mng_mode_pt, 2057 e1000_mng_mode_pt,
2147 e1000_mng_mode_ipmi, 2058 e1000_mng_mode_ipmi,
2148 e1000_mng_mode_host_interface_only 2059 e1000_mng_mode_host_interface_only
2149} e1000_mng_mode; 2060} e1000_mng_mode;
2150 2061
2151/* Host Inteface Control Register */ 2062/* Host Interface Control Register */
2152#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ 2063#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */
2153#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done 2064#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done
2154 * to put command in RAM */ 2065 * to put command in RAM */
2155#define E1000_HICR_SV 0x00000004 /* Status Validity */ 2066#define E1000_HICR_SV 0x00000004 /* Status Validity */
2156#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ 2067#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */
2157 2068
2158/* Host Interface Command Interface - Address range 0x8800-0x8EFF */ 2069/* Host Interface Command Interface - Address range 0x8800-0x8EFF */
2159#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ 2070#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */
2160#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ 2071#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */
2161#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ 2072#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */
2162#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ 2073#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */
2163 2074
2164struct e1000_host_command_header { 2075struct e1000_host_command_header {
2165 u8 command_id; 2076 u8 command_id;
2166 u8 command_length; 2077 u8 command_length;
2167 u8 command_options; /* I/F bits for command, status for return */ 2078 u8 command_options; /* I/F bits for command, status for return */
2168 u8 checksum; 2079 u8 checksum;
2169}; 2080};
2170struct e1000_host_command_info { 2081struct e1000_host_command_info {
2171 struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ 2082 struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
2172 u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ 2083 u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */
2173}; 2084};
2174 2085
2175/* Host SMB register #0 */ 2086/* Host SMB register #0 */
2176#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ 2087#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */
2177#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ 2088#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */
2178#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ 2089#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */
2179#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ 2090#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */
2180 2091
2181/* Host SMB register #1 */ 2092/* Host SMB register #1 */
2182#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN 2093#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN
@@ -2185,10 +2096,10 @@ struct e1000_host_command_info {
2185#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT 2096#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT
2186 2097
2187/* FW Status Register */ 2098/* FW Status Register */
2188#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ 2099#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */
2189 2100
2190/* Wake Up Packet Length */ 2101/* Wake Up Packet Length */
2191#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ 2102#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */
2192 2103
2193#define E1000_MDALIGN 4096 2104#define E1000_MDALIGN 4096
2194 2105
@@ -2242,24 +2153,24 @@ struct e1000_host_command_info {
2242#define PCI_EX_LINK_WIDTH_SHIFT 4 2153#define PCI_EX_LINK_WIDTH_SHIFT 4
2243 2154
2244/* EEPROM Commands - Microwire */ 2155/* EEPROM Commands - Microwire */
2245#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ 2156#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */
2246#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ 2157#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */
2247#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ 2158#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */
2248#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ 2159#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */
2249#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */ 2160#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erase/write disable */
2250 2161
2251/* EEPROM Commands - SPI */ 2162/* EEPROM Commands - SPI */
2252#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ 2163#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
2253#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ 2164#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
2254#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ 2165#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
2255#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ 2166#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
2256#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ 2167#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */
2257#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ 2168#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */
2258#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ 2169#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */
2259#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ 2170#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */
2260#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ 2171#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
2261#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ 2172#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
2262#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ 2173#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
2263 2174
2264/* EEPROM Size definitions */ 2175/* EEPROM Size definitions */
2265#define EEPROM_WORD_SIZE_SHIFT 6 2176#define EEPROM_WORD_SIZE_SHIFT 6
@@ -2270,7 +2181,7 @@ struct e1000_host_command_info {
2270#define EEPROM_COMPAT 0x0003 2181#define EEPROM_COMPAT 0x0003
2271#define EEPROM_ID_LED_SETTINGS 0x0004 2182#define EEPROM_ID_LED_SETTINGS 0x0004
2272#define EEPROM_VERSION 0x0005 2183#define EEPROM_VERSION 0x0005
2273#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ 2184#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */
2274#define EEPROM_PHY_CLASS_WORD 0x0007 2185#define EEPROM_PHY_CLASS_WORD 0x0007
2275#define EEPROM_INIT_CONTROL1_REG 0x000A 2186#define EEPROM_INIT_CONTROL1_REG 0x000A
2276#define EEPROM_INIT_CONTROL2_REG 0x000F 2187#define EEPROM_INIT_CONTROL2_REG 0x000F
@@ -2283,22 +2194,16 @@ struct e1000_host_command_info {
2283#define EEPROM_FLASH_VERSION 0x0032 2194#define EEPROM_FLASH_VERSION 0x0032
2284#define EEPROM_CHECKSUM_REG 0x003F 2195#define EEPROM_CHECKSUM_REG 0x003F
2285 2196
2286#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ 2197#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */
2287#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ 2198#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */
2288 2199
2289/* Word definitions for ID LED Settings */ 2200/* Word definitions for ID LED Settings */
2290#define ID_LED_RESERVED_0000 0x0000 2201#define ID_LED_RESERVED_0000 0x0000
2291#define ID_LED_RESERVED_FFFF 0xFFFF 2202#define ID_LED_RESERVED_FFFF 0xFFFF
2292#define ID_LED_RESERVED_82573 0xF746
2293#define ID_LED_DEFAULT_82573 0x1811
2294#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ 2203#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
2295 (ID_LED_OFF1_OFF2 << 8) | \ 2204 (ID_LED_OFF1_OFF2 << 8) | \
2296 (ID_LED_DEF1_DEF2 << 4) | \ 2205 (ID_LED_DEF1_DEF2 << 4) | \
2297 (ID_LED_DEF1_DEF2)) 2206 (ID_LED_DEF1_DEF2))
2298#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
2299 (ID_LED_DEF1_OFF2 << 8) | \
2300 (ID_LED_DEF1_ON2 << 4) | \
2301 (ID_LED_DEF1_DEF2))
2302#define ID_LED_DEF1_DEF2 0x1 2207#define ID_LED_DEF1_DEF2 0x1
2303#define ID_LED_DEF1_ON2 0x2 2208#define ID_LED_DEF1_ON2 0x2
2304#define ID_LED_DEF1_OFF2 0x3 2209#define ID_LED_DEF1_OFF2 0x3
@@ -2313,7 +2218,6 @@ struct e1000_host_command_info {
2313#define IGP_ACTIVITY_LED_ENABLE 0x0300 2218#define IGP_ACTIVITY_LED_ENABLE 0x0300
2314#define IGP_LED3_MODE 0x07000000 2219#define IGP_LED3_MODE 0x07000000
2315 2220
2316
2317/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */ 2221/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */
2318#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F 2222#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F
2319 2223
@@ -2384,11 +2288,8 @@ struct e1000_host_command_info {
2384 2288
2385#define DEFAULT_82542_TIPG_IPGR2 10 2289#define DEFAULT_82542_TIPG_IPGR2 10
2386#define DEFAULT_82543_TIPG_IPGR2 6 2290#define DEFAULT_82543_TIPG_IPGR2 6
2387#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
2388#define E1000_TIPG_IPGR2_SHIFT 20 2291#define E1000_TIPG_IPGR2_SHIFT 20
2389 2292
2390#define DEFAULT_80003ES2LAN_TIPG_IPGT_10_100 0x00000009
2391#define DEFAULT_80003ES2LAN_TIPG_IPGT_1000 0x00000008
2392#define E1000_TXDMAC_DPP 0x00000001 2293#define E1000_TXDMAC_DPP 0x00000001
2393 2294
2394/* Adaptive IFS defines */ 2295/* Adaptive IFS defines */
@@ -2421,9 +2322,9 @@ struct e1000_host_command_info {
2421#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 2322#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
2422 2323
2423/* PBA constants */ 2324/* PBA constants */
2424#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ 2325#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */
2425#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ 2326#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */
2426#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ 2327#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
2427#define E1000_PBA_20K 0x0014 2328#define E1000_PBA_20K 0x0014
2428#define E1000_PBA_22K 0x0016 2329#define E1000_PBA_22K 0x0016
2429#define E1000_PBA_24K 0x0018 2330#define E1000_PBA_24K 0x0018
@@ -2432,7 +2333,7 @@ struct e1000_host_command_info {
2432#define E1000_PBA_34K 0x0022 2333#define E1000_PBA_34K 0x0022
2433#define E1000_PBA_38K 0x0026 2334#define E1000_PBA_38K 0x0026
2434#define E1000_PBA_40K 0x0028 2335#define E1000_PBA_40K 0x0028
2435#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ 2336#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */
2436 2337
2437#define E1000_PBS_16K E1000_PBA_16K 2338#define E1000_PBS_16K E1000_PBA_16K
2438 2339
@@ -2442,9 +2343,9 @@ struct e1000_host_command_info {
2442#define FLOW_CONTROL_TYPE 0x8808 2343#define FLOW_CONTROL_TYPE 0x8808
2443 2344
2444/* The historical defaults for the flow control values are given below. */ 2345/* The historical defaults for the flow control values are given below. */
2445#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ 2346#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */
2446#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ 2347#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */
2447#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ 2348#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */
2448 2349
2449/* PCIX Config space */ 2350/* PCIX Config space */
2450#define PCIX_COMMAND_REGISTER 0xE6 2351#define PCIX_COMMAND_REGISTER 0xE6
@@ -2458,7 +2359,6 @@ struct e1000_host_command_info {
2458#define PCIX_STATUS_HI_MMRBC_4K 0x3 2359#define PCIX_STATUS_HI_MMRBC_4K 0x3
2459#define PCIX_STATUS_HI_MMRBC_2K 0x2 2360#define PCIX_STATUS_HI_MMRBC_2K 0x2
2460 2361
2461
2462/* Number of bits required to shift right the "pause" bits from the 2362/* Number of bits required to shift right the "pause" bits from the
2463 * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register. 2363 * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register.
2464 */ 2364 */
@@ -2479,14 +2379,11 @@ struct e1000_host_command_info {
2479 */ 2379 */
2480#define ILOS_SHIFT 3 2380#define ILOS_SHIFT 3
2481 2381
2482
2483#define RECEIVE_BUFFER_ALIGN_SIZE (256) 2382#define RECEIVE_BUFFER_ALIGN_SIZE (256)
2484 2383
2485/* Number of milliseconds we wait for auto-negotiation to complete */ 2384/* Number of milliseconds we wait for auto-negotiation to complete */
2486#define LINK_UP_TIMEOUT 500 2385#define LINK_UP_TIMEOUT 500
2487 2386
2488/* Number of 100 microseconds we wait for PCI Express master disable */
2489#define MASTER_DISABLE_TIMEOUT 800
2490/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ 2387/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */
2491#define AUTO_READ_DONE_TIMEOUT 10 2388#define AUTO_READ_DONE_TIMEOUT 10
2492/* Number of milliseconds we wait for PHY configuration done after MAC reset */ 2389/* Number of milliseconds we wait for PHY configuration done after MAC reset */
@@ -2534,7 +2431,6 @@ struct e1000_host_command_info {
2534 (((length) > (adapter)->min_frame_size) && \ 2431 (((length) > (adapter)->min_frame_size) && \
2535 ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1))))) 2432 ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1)))))
2536 2433
2537
2538/* Structures, enums, and macros for the PHY */ 2434/* Structures, enums, and macros for the PHY */
2539 2435
2540/* Bit definitions for the Management Data IO (MDIO) and Management Data 2436/* Bit definitions for the Management Data IO (MDIO) and Management Data
@@ -2551,49 +2447,49 @@ struct e1000_host_command_info {
2551 2447
2552/* PHY 1000 MII Register/Bit Definitions */ 2448/* PHY 1000 MII Register/Bit Definitions */
2553/* PHY Registers defined by IEEE */ 2449/* PHY Registers defined by IEEE */
2554#define PHY_CTRL 0x00 /* Control Register */ 2450#define PHY_CTRL 0x00 /* Control Register */
2555#define PHY_STATUS 0x01 /* Status Regiser */ 2451#define PHY_STATUS 0x01 /* Status Register */
2556#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ 2452#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
2557#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ 2453#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
2558#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ 2454#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
2559#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ 2455#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
2560#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ 2456#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
2561#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ 2457#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
2562#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ 2458#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
2563#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ 2459#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
2564#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ 2460#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
2565#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ 2461#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
2566 2462
2567#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ 2463#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
2568#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ 2464#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */
2569 2465
2570/* M88E1000 Specific Registers */ 2466/* M88E1000 Specific Registers */
2571#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ 2467#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
2572#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ 2468#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
2573#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ 2469#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */
2574#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ 2470#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */
2575#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ 2471#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
2576#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ 2472#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */
2577 2473
2578#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ 2474#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */
2579#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ 2475#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
2580#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ 2476#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
2581#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ 2477#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */
2582#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ 2478#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */
2583 2479
2584#define IGP01E1000_IEEE_REGS_PAGE 0x0000 2480#define IGP01E1000_IEEE_REGS_PAGE 0x0000
2585#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 2481#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300
2586#define IGP01E1000_IEEE_FORCE_GIGA 0x0140 2482#define IGP01E1000_IEEE_FORCE_GIGA 0x0140
2587 2483
2588/* IGP01E1000 Specific Registers */ 2484/* IGP01E1000 Specific Registers */
2589#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */ 2485#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */
2590#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */ 2486#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */
2591#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */ 2487#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */
2592#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ 2488#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */
2593#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ 2489#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */
2594#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ 2490#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */
2595#define IGP02E1000_PHY_POWER_MGMT 0x19 2491#define IGP02E1000_PHY_POWER_MGMT 0x19
2596#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ 2492#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */
2597 2493
2598/* IGP01E1000 AGC Registers - stores the cable length values*/ 2494/* IGP01E1000 AGC Registers - stores the cable length values*/
2599#define IGP01E1000_PHY_AGC_A 0x1172 2495#define IGP01E1000_PHY_AGC_A 0x1172
@@ -2636,192 +2532,119 @@ struct e1000_host_command_info {
2636 2532
2637#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 2533#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0
2638 2534
2639/* Bits...
2640 * 15-5: page
2641 * 4-0: register offset
2642 */
2643#define GG82563_PAGE_SHIFT 5
2644#define GG82563_REG(page, reg) \
2645 (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
2646#define GG82563_MIN_ALT_REG 30
2647
2648/* GG82563 Specific Registers */
2649#define GG82563_PHY_SPEC_CTRL \
2650 GG82563_REG(0, 16) /* PHY Specific Control */
2651#define GG82563_PHY_SPEC_STATUS \
2652 GG82563_REG(0, 17) /* PHY Specific Status */
2653#define GG82563_PHY_INT_ENABLE \
2654 GG82563_REG(0, 18) /* Interrupt Enable */
2655#define GG82563_PHY_SPEC_STATUS_2 \
2656 GG82563_REG(0, 19) /* PHY Specific Status 2 */
2657#define GG82563_PHY_RX_ERR_CNTR \
2658 GG82563_REG(0, 21) /* Receive Error Counter */
2659#define GG82563_PHY_PAGE_SELECT \
2660 GG82563_REG(0, 22) /* Page Select */
2661#define GG82563_PHY_SPEC_CTRL_2 \
2662 GG82563_REG(0, 26) /* PHY Specific Control 2 */
2663#define GG82563_PHY_PAGE_SELECT_ALT \
2664 GG82563_REG(0, 29) /* Alternate Page Select */
2665#define GG82563_PHY_TEST_CLK_CTRL \
2666 GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
2667
2668#define GG82563_PHY_MAC_SPEC_CTRL \
2669 GG82563_REG(2, 21) /* MAC Specific Control Register */
2670#define GG82563_PHY_MAC_SPEC_CTRL_2 \
2671 GG82563_REG(2, 26) /* MAC Specific Control 2 */
2672
2673#define GG82563_PHY_DSP_DISTANCE \
2674 GG82563_REG(5, 26) /* DSP Distance */
2675
2676/* Page 193 - Port Control Registers */
2677#define GG82563_PHY_KMRN_MODE_CTRL \
2678 GG82563_REG(193, 16) /* Kumeran Mode Control */
2679#define GG82563_PHY_PORT_RESET \
2680 GG82563_REG(193, 17) /* Port Reset */
2681#define GG82563_PHY_REVISION_ID \
2682 GG82563_REG(193, 18) /* Revision ID */
2683#define GG82563_PHY_DEVICE_ID \
2684 GG82563_REG(193, 19) /* Device ID */
2685#define GG82563_PHY_PWR_MGMT_CTRL \
2686 GG82563_REG(193, 20) /* Power Management Control */
2687#define GG82563_PHY_RATE_ADAPT_CTRL \
2688 GG82563_REG(193, 25) /* Rate Adaptation Control */
2689
2690/* Page 194 - KMRN Registers */
2691#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
2692 GG82563_REG(194, 16) /* FIFO's Control/Status */
2693#define GG82563_PHY_KMRN_CTRL \
2694 GG82563_REG(194, 17) /* Control */
2695#define GG82563_PHY_INBAND_CTRL \
2696 GG82563_REG(194, 18) /* Inband Control */
2697#define GG82563_PHY_KMRN_DIAGNOSTIC \
2698 GG82563_REG(194, 19) /* Diagnostic */
2699#define GG82563_PHY_ACK_TIMEOUTS \
2700 GG82563_REG(194, 20) /* Acknowledge Timeouts */
2701#define GG82563_PHY_ADV_ABILITY \
2702 GG82563_REG(194, 21) /* Advertised Ability */
2703#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
2704 GG82563_REG(194, 23) /* Link Partner Advertised Ability */
2705#define GG82563_PHY_ADV_NEXT_PAGE \
2706 GG82563_REG(194, 24) /* Advertised Next Page */
2707#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
2708 GG82563_REG(194, 25) /* Link Partner Advertised Next page */
2709#define GG82563_PHY_KMRN_MISC \
2710 GG82563_REG(194, 26) /* Misc. */
2711
2712/* PHY Control Register */ 2535/* PHY Control Register */
2713#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ 2536#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
2714#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ 2537#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
2715#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ 2538#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
2716#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ 2539#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
2717#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ 2540#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
2718#define MII_CR_POWER_DOWN 0x0800 /* Power down */ 2541#define MII_CR_POWER_DOWN 0x0800 /* Power down */
2719#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ 2542#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
2720#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ 2543#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
2721#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ 2544#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
2722#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ 2545#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
2723 2546
2724/* PHY Status Register */ 2547/* PHY Status Register */
2725#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ 2548#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
2726#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ 2549#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
2727#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ 2550#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
2728#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ 2551#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
2729#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ 2552#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
2730#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ 2553#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
2731#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ 2554#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
2732#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ 2555#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
2733#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ 2556#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
2734#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ 2557#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
2735#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ 2558#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
2736#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ 2559#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
2737#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ 2560#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
2738#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ 2561#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
2739#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ 2562#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
2740 2563
2741/* Autoneg Advertisement Register */ 2564/* Autoneg Advertisement Register */
2742#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ 2565#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
2743#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ 2566#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
2744#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ 2567#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
2745#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ 2568#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
2746#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ 2569#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
2747#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ 2570#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
2748#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ 2571#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
2749#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ 2572#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
2750#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ 2573#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
2751#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ 2574#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
2752 2575
2753/* Link Partner Ability Register (Base Page) */ 2576/* Link Partner Ability Register (Base Page) */
2754#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ 2577#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
2755#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ 2578#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */
2756#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ 2579#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */
2757#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ 2580#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */
2758#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ 2581#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */
2759#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ 2582#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
2760#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ 2583#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
2761#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ 2584#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
2762#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ 2585#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */
2763#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ 2586#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */
2764#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ 2587#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
2765 2588
2766/* Autoneg Expansion Register */ 2589/* Autoneg Expansion Register */
2767#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ 2590#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
2768#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ 2591#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */
2769#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ 2592#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */
2770#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ 2593#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
2771#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ 2594#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */
2772 2595
2773/* Next Page TX Register */ 2596/* Next Page TX Register */
2774#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ 2597#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
2775#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges 2598#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges
2776 * of different NP 2599 * of different NP
2777 */ 2600 */
2778#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg 2601#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg
2779 * 0 = cannot comply with msg 2602 * 0 = cannot comply with msg
2780 */ 2603 */
2781#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ 2604#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */
2782#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow 2605#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow
2783 * 0 = sending last NP 2606 * 0 = sending last NP
2784 */ 2607 */
2785 2608
2786/* Link Partner Next Page Register */ 2609/* Link Partner Next Page Register */
2787#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ 2610#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
2788#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges 2611#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges
2789 * of different NP 2612 * of different NP
2790 */ 2613 */
2791#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg 2614#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg
2792 * 0 = cannot comply with msg 2615 * 0 = cannot comply with msg
2793 */ 2616 */
2794#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ 2617#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */
2795#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */ 2618#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */
2796#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow 2619#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow
2797 * 0 = sending last NP 2620 * 0 = sending last NP
2798 */ 2621 */
2799 2622
2800/* 1000BASE-T Control Register */ 2623/* 1000BASE-T Control Register */
2801#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ 2624#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
2802#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ 2625#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
2803#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ 2626#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
2804#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ 2627#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
2805 /* 0=DTE device */ 2628 /* 0=DTE device */
2806#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ 2629#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
2807 /* 0=Configure PHY as Slave */ 2630 /* 0=Configure PHY as Slave */
2808#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ 2631#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
2809 /* 0=Automatic Master/Slave config */ 2632 /* 0=Automatic Master/Slave config */
2810#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ 2633#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
2811#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ 2634#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
2812#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ 2635#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
2813#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ 2636#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
2814#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ 2637#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
2815 2638
2816/* 1000BASE-T Status Register */ 2639/* 1000BASE-T Status Register */
2817#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ 2640#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */
2818#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ 2641#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */
2819#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ 2642#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
2820#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ 2643#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
2821#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ 2644#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
2822#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ 2645#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
2823#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ 2646#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
2824#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ 2647#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
2825#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 2648#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12
2826#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 2649#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13
2827#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 2650#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
@@ -2829,64 +2652,64 @@ struct e1000_host_command_info {
2829#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 2652#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100
2830 2653
2831/* Extended Status Register */ 2654/* Extended Status Register */
2832#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ 2655#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
2833#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ 2656#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
2834#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ 2657#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
2835#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ 2658#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
2836 2659
2837#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */ 2660#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */
2838#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */ 2661#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */
2839 2662
2840#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */ 2663#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */
2841 /* (0=enable, 1=disable) */ 2664 /* (0=enable, 1=disable) */
2842 2665
2843/* M88E1000 PHY Specific Control Register */ 2666/* M88E1000 PHY Specific Control Register */
2844#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ 2667#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
2845#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ 2668#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
2846#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ 2669#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
2847#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, 2670#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low,
2848 * 0=CLK125 toggling 2671 * 0=CLK125 toggling
2849 */ 2672 */
2850#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ 2673#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
2851 /* Manual MDI configuration */ 2674 /* Manual MDI configuration */
2852#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ 2675#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
2853#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, 2676#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover,
2854 * 100BASE-TX/10BASE-T: 2677 * 100BASE-TX/10BASE-T:
2855 * MDI Mode 2678 * MDI Mode
2856 */ 2679 */
2857#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled 2680#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled
2858 * all speeds. 2681 * all speeds.
2859 */ 2682 */
2860#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080 2683#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080
2861 /* 1=Enable Extended 10BASE-T distance 2684 /* 1=Enable Extended 10BASE-T distance
2862 * (Lower 10BASE-T RX Threshold) 2685 * (Lower 10BASE-T RX Threshold)
2863 * 0=Normal 10BASE-T RX Threshold */ 2686 * 0=Normal 10BASE-T RX Threshold */
2864#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 2687#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100
2865 /* 1=5-Bit interface in 100BASE-TX 2688 /* 1=5-Bit interface in 100BASE-TX
2866 * 0=MII interface in 100BASE-TX */ 2689 * 0=MII interface in 100BASE-TX */
2867#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ 2690#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
2868#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ 2691#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
2869#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ 2692#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
2870 2693
2871#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1 2694#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1
2872#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5 2695#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5
2873#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 2696#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
2874 2697
2875/* M88E1000 PHY Specific Status Register */ 2698/* M88E1000 PHY Specific Status Register */
2876#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ 2699#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */
2877#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ 2700#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
2878#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ 2701#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
2879#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ 2702#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
2880#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; 2703#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M;
2881 * 3=110-140M;4=>140M */ 2704 * 3=110-140M;4=>140M */
2882#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ 2705#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
2883#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ 2706#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
2884#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ 2707#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */
2885#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ 2708#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
2886#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ 2709#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
2887#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ 2710#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */
2888#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ 2711#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */
2889#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ 2712#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
2890 2713
2891#define M88E1000_PSSR_REV_POLARITY_SHIFT 1 2714#define M88E1000_PSSR_REV_POLARITY_SHIFT 1
2892#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5 2715#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5
@@ -2894,12 +2717,12 @@ struct e1000_host_command_info {
2894#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 2717#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
2895 2718
2896/* M88E1000 Extended PHY Specific Control Register */ 2719/* M88E1000 Extended PHY Specific Control Register */
2897#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ 2720#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
2898#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled. 2721#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled.
2899 * Will assert lost lock and bring 2722 * Will assert lost lock and bring
2900 * link down if idle not seen 2723 * link down if idle not seen
2901 * within 1ms in 1000BASE-T 2724 * within 1ms in 1000BASE-T
2902 */ 2725 */
2903/* Number of times we will attempt to autonegotiate before downshifting if we 2726/* Number of times we will attempt to autonegotiate before downshifting if we
2904 * are the master */ 2727 * are the master */
2905#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 2728#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
@@ -2914,9 +2737,9 @@ struct e1000_host_command_info {
2914#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 2737#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
2915#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 2738#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200
2916#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 2739#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300
2917#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ 2740#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */
2918#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ 2741#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
2919#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ 2742#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */
2920 2743
2921/* M88EC018 Rev 2 specific DownShift settings */ 2744/* M88EC018 Rev 2 specific DownShift settings */
2922#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 2745#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
@@ -2938,18 +2761,18 @@ struct e1000_host_command_info {
2938#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000 2761#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000
2939 2762
2940/* IGP01E1000 Specific Port Status Register - R/O */ 2763/* IGP01E1000 Specific Port Status Register - R/O */
2941#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */ 2764#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */
2942#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 2765#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
2943#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C 2766#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C
2944#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200 2767#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200
2945#define IGP01E1000_PSSR_LINK_UP 0x0400 2768#define IGP01E1000_PSSR_LINK_UP 0x0400
2946#define IGP01E1000_PSSR_MDIX 0x0800 2769#define IGP01E1000_PSSR_MDIX 0x0800
2947#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */ 2770#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */
2948#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000 2771#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000
2949#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000 2772#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000
2950#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 2773#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
2951#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */ 2774#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */
2952#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */ 2775#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */
2953 2776
2954/* IGP01E1000 Specific Port Control Register - R/W */ 2777/* IGP01E1000 Specific Port Control Register - R/W */
2955#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010 2778#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010
@@ -2957,16 +2780,16 @@ struct e1000_host_command_info {
2957#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400 2780#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400
2958#define IGP01E1000_PSCR_FLIP_CHIP 0x0800 2781#define IGP01E1000_PSCR_FLIP_CHIP 0x0800
2959#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 2782#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
2960#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */ 2783#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */
2961 2784
2962/* IGP01E1000 Specific Port Link Health Register */ 2785/* IGP01E1000 Specific Port Link Health Register */
2963#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 2786#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
2964#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000 2787#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000
2965#define IGP01E1000_PLHR_MASTER_FAULT 0x2000 2788#define IGP01E1000_PLHR_MASTER_FAULT 0x2000
2966#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000 2789#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000
2967#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */ 2790#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */
2968#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */ 2791#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */
2969#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */ 2792#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */
2970#define IGP01E1000_PLHR_DATA_ERR_0 0x0100 2793#define IGP01E1000_PLHR_DATA_ERR_0 0x0100
2971#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040 2794#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040
2972#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010 2795#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010
@@ -2981,9 +2804,9 @@ struct e1000_host_command_info {
2981#define IGP01E1000_MSE_CHANNEL_B 0x0F00 2804#define IGP01E1000_MSE_CHANNEL_B 0x0F00
2982#define IGP01E1000_MSE_CHANNEL_A 0xF000 2805#define IGP01E1000_MSE_CHANNEL_A 0xF000
2983 2806
2984#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ 2807#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
2985#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ 2808#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */
2986#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ 2809#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */
2987 2810
2988/* IGP01E1000 DSP reset macros */ 2811/* IGP01E1000 DSP reset macros */
2989#define DSP_RESET_ENABLE 0x0 2812#define DSP_RESET_ENABLE 0x0
@@ -2992,8 +2815,8 @@ struct e1000_host_command_info {
2992 2815
2993/* IGP01E1000 & IGP02E1000 AGC Registers */ 2816/* IGP01E1000 & IGP02E1000 AGC Registers */
2994 2817
2995#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ 2818#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */
2996#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ 2819#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */
2997 2820
2998/* IGP02E1000 AGC Register Length 9-bit mask */ 2821/* IGP02E1000 AGC Register Length 9-bit mask */
2999#define IGP02E1000_AGC_LENGTH_MASK 0x7F 2822#define IGP02E1000_AGC_LENGTH_MASK 0x7F
@@ -3011,9 +2834,9 @@ struct e1000_host_command_info {
3011#define IGP01E1000_PHY_POLARITY_MASK 0x0078 2834#define IGP01E1000_PHY_POLARITY_MASK 0x0078
3012 2835
3013/* IGP01E1000 GMII FIFO Register */ 2836/* IGP01E1000 GMII FIFO Register */
3014#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed 2837#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed
3015 * on Link-Up */ 2838 * on Link-Up */
3016#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */ 2839#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */
3017 2840
3018/* IGP01E1000 Analog Register */ 2841/* IGP01E1000 Analog Register */
3019#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 2842#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1
@@ -3032,114 +2855,6 @@ struct e1000_host_command_info {
3032#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 2855#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080
3033#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 2856#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500
3034 2857
3035/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
3036#define GG82563_PSCR_DISABLE_JABBER 0x0001 /* 1=Disable Jabber */
3037#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Polarity Reversal Disabled */
3038#define GG82563_PSCR_POWER_DOWN 0x0004 /* 1=Power Down */
3039#define GG82563_PSCR_COPPER_TRANSMITER_DISABLE 0x0008 /* 1=Transmitter Disabled */
3040#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
3041#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI configuration */
3042#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX configuration */
3043#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Automatic crossover */
3044#define GG82563_PSCR_ENALBE_EXTENDED_DISTANCE 0x0080 /* 1=Enable Extended Distance */
3045#define GG82563_PSCR_ENERGY_DETECT_MASK 0x0300
3046#define GG82563_PSCR_ENERGY_DETECT_OFF 0x0000 /* 00,01=Off */
3047#define GG82563_PSCR_ENERGY_DETECT_RX 0x0200 /* 10=Sense on Rx only (Energy Detect) */
3048#define GG82563_PSCR_ENERGY_DETECT_RX_TM 0x0300 /* 11=Sense and Tx NLP */
3049#define GG82563_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force Link Good */
3050#define GG82563_PSCR_DOWNSHIFT_ENABLE 0x0800 /* 1=Enable Downshift */
3051#define GG82563_PSCR_DOWNSHIFT_COUNTER_MASK 0x7000
3052#define GG82563_PSCR_DOWNSHIFT_COUNTER_SHIFT 12
3053
3054/* PHY Specific Status Register (Page 0, Register 17) */
3055#define GG82563_PSSR_JABBER 0x0001 /* 1=Jabber */
3056#define GG82563_PSSR_POLARITY 0x0002 /* 1=Polarity Reversed */
3057#define GG82563_PSSR_LINK 0x0008 /* 1=Link is Up */
3058#define GG82563_PSSR_ENERGY_DETECT 0x0010 /* 1=Sleep, 0=Active */
3059#define GG82563_PSSR_DOWNSHIFT 0x0020 /* 1=Downshift */
3060#define GG82563_PSSR_CROSSOVER_STATUS 0x0040 /* 1=MDIX, 0=MDI */
3061#define GG82563_PSSR_RX_PAUSE_ENABLED 0x0100 /* 1=Receive Pause Enabled */
3062#define GG82563_PSSR_TX_PAUSE_ENABLED 0x0200 /* 1=Transmit Pause Enabled */
3063#define GG82563_PSSR_LINK_UP 0x0400 /* 1=Link Up */
3064#define GG82563_PSSR_SPEED_DUPLEX_RESOLVED 0x0800 /* 1=Resolved */
3065#define GG82563_PSSR_PAGE_RECEIVED 0x1000 /* 1=Page Received */
3066#define GG82563_PSSR_DUPLEX 0x2000 /* 1-Full-Duplex */
3067#define GG82563_PSSR_SPEED_MASK 0xC000
3068#define GG82563_PSSR_SPEED_10MBPS 0x0000 /* 00=10Mbps */
3069#define GG82563_PSSR_SPEED_100MBPS 0x4000 /* 01=100Mbps */
3070#define GG82563_PSSR_SPEED_1000MBPS 0x8000 /* 10=1000Mbps */
3071
3072/* PHY Specific Status Register 2 (Page 0, Register 19) */
3073#define GG82563_PSSR2_JABBER 0x0001 /* 1=Jabber */
3074#define GG82563_PSSR2_POLARITY_CHANGED 0x0002 /* 1=Polarity Changed */
3075#define GG82563_PSSR2_ENERGY_DETECT_CHANGED 0x0010 /* 1=Energy Detect Changed */
3076#define GG82563_PSSR2_DOWNSHIFT_INTERRUPT 0x0020 /* 1=Downshift Detected */
3077#define GG82563_PSSR2_MDI_CROSSOVER_CHANGE 0x0040 /* 1=Crossover Changed */
3078#define GG82563_PSSR2_FALSE_CARRIER 0x0100 /* 1=False Carrier */
3079#define GG82563_PSSR2_SYMBOL_ERROR 0x0200 /* 1=Symbol Error */
3080#define GG82563_PSSR2_LINK_STATUS_CHANGED 0x0400 /* 1=Link Status Changed */
3081#define GG82563_PSSR2_AUTO_NEG_COMPLETED 0x0800 /* 1=Auto-Neg Completed */
3082#define GG82563_PSSR2_PAGE_RECEIVED 0x1000 /* 1=Page Received */
3083#define GG82563_PSSR2_DUPLEX_CHANGED 0x2000 /* 1=Duplex Changed */
3084#define GG82563_PSSR2_SPEED_CHANGED 0x4000 /* 1=Speed Changed */
3085#define GG82563_PSSR2_AUTO_NEG_ERROR 0x8000 /* 1=Auto-Neg Error */
3086
3087/* PHY Specific Control Register 2 (Page 0, Register 26) */
3088#define GG82563_PSCR2_10BT_POLARITY_FORCE 0x0002 /* 1=Force Negative Polarity */
3089#define GG82563_PSCR2_1000MB_TEST_SELECT_MASK 0x000C
3090#define GG82563_PSCR2_1000MB_TEST_SELECT_NORMAL 0x0000 /* 00,01=Normal Operation */
3091#define GG82563_PSCR2_1000MB_TEST_SELECT_112NS 0x0008 /* 10=Select 112ns Sequence */
3092#define GG82563_PSCR2_1000MB_TEST_SELECT_16NS 0x000C /* 11=Select 16ns Sequence */
3093#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Negotiation */
3094#define GG82563_PSCR2_1000BT_DISABLE 0x4000 /* 1=Disable 1000BASE-T */
3095#define GG82563_PSCR2_TRANSMITER_TYPE_MASK 0x8000
3096#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_B 0x0000 /* 0=Class B */
3097#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_A 0x8000 /* 1=Class A */
3098
3099/* MAC Specific Control Register (Page 2, Register 21) */
3100/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
3101#define GG82563_MSCR_TX_CLK_MASK 0x0007
3102#define GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ 0x0004
3103#define GG82563_MSCR_TX_CLK_100MBPS_25MHZ 0x0005
3104#define GG82563_MSCR_TX_CLK_1000MBPS_2_5MHZ 0x0006
3105#define GG82563_MSCR_TX_CLK_1000MBPS_25MHZ 0x0007
3106
3107#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
3108
3109/* DSP Distance Register (Page 5, Register 26) */
3110#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M;
3111 1 = 50-80M;
3112 2 = 80-110M;
3113 3 = 110-140M;
3114 4 = >140M */
3115
3116/* Kumeran Mode Control Register (Page 193, Register 16) */
3117#define GG82563_KMCR_PHY_LEDS_EN 0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */
3118#define GG82563_KMCR_FORCE_LINK_UP 0x0040 /* 1=Force Link Up */
3119#define GG82563_KMCR_SUPPRESS_SGMII_EPD_EXT 0x0080
3120#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT_MASK 0x0400
3121#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT 0x0400 /* 1=6.25MHz, 0=0.8MHz */
3122#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
3123
3124/* Power Management Control Register (Page 193, Register 20) */
3125#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 /* 1=Enalbe SERDES Electrical Idle */
3126#define GG82563_PMCR_DISABLE_PORT 0x0002 /* 1=Disable Port */
3127#define GG82563_PMCR_DISABLE_SERDES 0x0004 /* 1=Disable SERDES */
3128#define GG82563_PMCR_REVERSE_AUTO_NEG 0x0008 /* 1=Enable Reverse Auto-Negotiation */
3129#define GG82563_PMCR_DISABLE_1000_NON_D0 0x0010 /* 1=Disable 1000Mbps Auto-Neg in non D0 */
3130#define GG82563_PMCR_DISABLE_1000 0x0020 /* 1=Disable 1000Mbps Auto-Neg Always */
3131#define GG82563_PMCR_REVERSE_AUTO_NEG_D0A 0x0040 /* 1=Enable D0a Reverse Auto-Negotiation */
3132#define GG82563_PMCR_FORCE_POWER_STATE 0x0080 /* 1=Force Power State */
3133#define GG82563_PMCR_PROGRAMMED_POWER_STATE_MASK 0x0300
3134#define GG82563_PMCR_PROGRAMMED_POWER_STATE_DR 0x0000 /* 00=Dr */
3135#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0U 0x0100 /* 01=D0u */
3136#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0A 0x0200 /* 10=D0a */
3137#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D3 0x0300 /* 11=D3 */
3138
3139/* In-Band Control Register (Page 194, Register 18) */
3140#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding Use */
3141
3142
3143/* Bit definitions for valid PHY IDs. */ 2858/* Bit definitions for valid PHY IDs. */
3144/* I = Integrated 2859/* I = Integrated
3145 * E = External 2860 * E = External
@@ -3154,8 +2869,6 @@ struct e1000_host_command_info {
3154#define M88E1011_I_REV_4 0x04 2869#define M88E1011_I_REV_4 0x04
3155#define M88E1111_I_PHY_ID 0x01410CC0 2870#define M88E1111_I_PHY_ID 0x01410CC0
3156#define L1LXT971A_PHY_ID 0x001378E0 2871#define L1LXT971A_PHY_ID 0x001378E0
3157#define GG82563_E_PHY_ID 0x01410CA0
3158
3159 2872
3160/* Bits... 2873/* Bits...
3161 * 15-5: page 2874 * 15-5: page
@@ -3166,41 +2879,41 @@ struct e1000_host_command_info {
3166 (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) 2879 (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
3167 2880
3168#define IGP3_PHY_PORT_CTRL \ 2881#define IGP3_PHY_PORT_CTRL \
3169 PHY_REG(769, 17) /* Port General Configuration */ 2882 PHY_REG(769, 17) /* Port General Configuration */
3170#define IGP3_PHY_RATE_ADAPT_CTRL \ 2883#define IGP3_PHY_RATE_ADAPT_CTRL \
3171 PHY_REG(769, 25) /* Rate Adapter Control Register */ 2884 PHY_REG(769, 25) /* Rate Adapter Control Register */
3172 2885
3173#define IGP3_KMRN_FIFO_CTRL_STATS \ 2886#define IGP3_KMRN_FIFO_CTRL_STATS \
3174 PHY_REG(770, 16) /* KMRN FIFO's control/status register */ 2887 PHY_REG(770, 16) /* KMRN FIFO's control/status register */
3175#define IGP3_KMRN_POWER_MNG_CTRL \ 2888#define IGP3_KMRN_POWER_MNG_CTRL \
3176 PHY_REG(770, 17) /* KMRN Power Management Control Register */ 2889 PHY_REG(770, 17) /* KMRN Power Management Control Register */
3177#define IGP3_KMRN_INBAND_CTRL \ 2890#define IGP3_KMRN_INBAND_CTRL \
3178 PHY_REG(770, 18) /* KMRN Inband Control Register */ 2891 PHY_REG(770, 18) /* KMRN Inband Control Register */
3179#define IGP3_KMRN_DIAG \ 2892#define IGP3_KMRN_DIAG \
3180 PHY_REG(770, 19) /* KMRN Diagnostic register */ 2893 PHY_REG(770, 19) /* KMRN Diagnostic register */
3181#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ 2894#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */
3182#define IGP3_KMRN_ACK_TIMEOUT \ 2895#define IGP3_KMRN_ACK_TIMEOUT \
3183 PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ 2896 PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */
3184 2897
3185#define IGP3_VR_CTRL \ 2898#define IGP3_VR_CTRL \
3186 PHY_REG(776, 18) /* Voltage regulator control register */ 2899 PHY_REG(776, 18) /* Voltage regulator control register */
3187#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ 2900#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */
3188#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */ 2901#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */
3189 2902
3190#define IGP3_CAPABILITY \ 2903#define IGP3_CAPABILITY \
3191 PHY_REG(776, 19) /* IGP3 Capability Register */ 2904 PHY_REG(776, 19) /* IGP3 Capability Register */
3192 2905
3193/* Capabilities for SKU Control */ 2906/* Capabilities for SKU Control */
3194#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ 2907#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */
3195#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ 2908#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */
3196#define IGP3_CAP_ASF 0x0004 /* Support ASF */ 2909#define IGP3_CAP_ASF 0x0004 /* Support ASF */
3197#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ 2910#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */
3198#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ 2911#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */
3199#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ 2912#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */
3200#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ 2913#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */
3201#define IGP3_CAP_RSS 0x0080 /* Support RSS */ 2914#define IGP3_CAP_RSS 0x0080 /* Support RSS */
3202#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ 2915#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */
3203#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ 2916#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */
3204 2917
3205#define IGP3_PPC_JORDAN_EN 0x0001 2918#define IGP3_PPC_JORDAN_EN 0x0001
3206#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 2919#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002
@@ -3210,69 +2923,69 @@ struct e1000_host_command_info {
3210#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 2923#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020
3211#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 2924#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040
3212 2925
3213#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ 2926#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */
3214#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ 2927#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */
3215 2928
3216#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) 2929#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18)
3217#define IGP3_KMRN_EC_DIS_INBAND 0x0080 2930#define IGP3_KMRN_EC_DIS_INBAND 0x0080
3218 2931
3219#define IGP03E1000_E_PHY_ID 0x02A80390 2932#define IGP03E1000_E_PHY_ID 0x02A80390
3220#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ 2933#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */
3221#define IFE_PLUS_E_PHY_ID 0x02A80320 2934#define IFE_PLUS_E_PHY_ID 0x02A80320
3222#define IFE_C_E_PHY_ID 0x02A80310 2935#define IFE_C_E_PHY_ID 0x02A80310
3223 2936
3224#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ 2937#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */
3225#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ 2938#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */
3226#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ 2939#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */
3227#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnet Counter */ 2940#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnect Counter */
3228#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ 2941#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */
3229#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ 2942#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */
3230#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ 2943#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */
3231#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ 2944#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */
3232#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ 2945#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */
3233#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ 2946#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */
3234#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ 2947#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */
3235#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ 2948#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */
3236#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ 2949#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */
3237 2950
3238#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Defaut 1 = Disable auto reduced power down */ 2951#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Default 1 = Disable auto reduced power down */
3239#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ 2952#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */
3240#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ 2953#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */
3241#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ 2954#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */
3242#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ 2955#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */
3243#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ 2956#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */
3244#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ 2957#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */
3245#define IFE_PESC_POLARITY_REVERSED_SHIFT 8 2958#define IFE_PESC_POLARITY_REVERSED_SHIFT 8
3246 2959
3247#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dyanmic Power Down disabled */ 2960#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dynamic Power Down disabled */
3248#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ 2961#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */
3249#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ 2962#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */
3250#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ 2963#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */
3251#define IFE_PSC_FORCE_POLARITY_SHIFT 5 2964#define IFE_PSC_FORCE_POLARITY_SHIFT 5
3252#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 2965#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4
3253 2966
3254#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ 2967#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */
3255#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ 2968#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */
3256#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ 2969#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
3257#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */ 2970#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */
3258#define IFE_PMC_MDIX_MODE_SHIFT 6 2971#define IFE_PMC_MDIX_MODE_SHIFT 6
3259#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ 2972#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */
3260 2973
3261#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ 2974#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */
3262#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ 2975#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */
3263#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ 2976#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */
3264#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ 2977#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */
3265#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ 2978#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */
3266#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ 2979#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */
3267#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ 2980#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */
3268#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ 2981#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */
3269#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ 2982#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */
3270#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ 2983#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
3271#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ 2984#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
3272 2985
3273#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ 2986#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */
3274#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ 2987#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */
3275#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ 2988#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */
3276#define ICH_FLASH_SEG_SIZE_256 256 2989#define ICH_FLASH_SEG_SIZE_256 256
3277#define ICH_FLASH_SEG_SIZE_4K 4096 2990#define ICH_FLASH_SEG_SIZE_4K 4096
3278#define ICH_FLASH_SEG_SIZE_64K 65536 2991#define ICH_FLASH_SEG_SIZE_64K 65536
@@ -3305,74 +3018,6 @@ struct e1000_host_command_info {
3305#define ICH_GFPREG_BASE_MASK 0x1FFF 3018#define ICH_GFPREG_BASE_MASK 0x1FFF
3306#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF 3019#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
3307 3020
3308/* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
3309/* Offset 04h HSFSTS */
3310union ich8_hws_flash_status {
3311 struct ich8_hsfsts {
3312#ifdef __BIG_ENDIAN
3313 u16 reserved2 :6;
3314 u16 fldesvalid :1;
3315 u16 flockdn :1;
3316 u16 flcdone :1;
3317 u16 flcerr :1;
3318 u16 dael :1;
3319 u16 berasesz :2;
3320 u16 flcinprog :1;
3321 u16 reserved1 :2;
3322#else
3323 u16 flcdone :1; /* bit 0 Flash Cycle Done */
3324 u16 flcerr :1; /* bit 1 Flash Cycle Error */
3325 u16 dael :1; /* bit 2 Direct Access error Log */
3326 u16 berasesz :2; /* bit 4:3 Block/Sector Erase Size */
3327 u16 flcinprog :1; /* bit 5 flash SPI cycle in Progress */
3328 u16 reserved1 :2; /* bit 13:6 Reserved */
3329 u16 reserved2 :6; /* bit 13:6 Reserved */
3330 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
3331 u16 flockdn :1; /* bit 15 Flash Configuration Lock-Down */
3332#endif
3333 } hsf_status;
3334 u16 regval;
3335};
3336
3337/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */
3338/* Offset 06h FLCTL */
3339union ich8_hws_flash_ctrl {
3340 struct ich8_hsflctl {
3341#ifdef __BIG_ENDIAN
3342 u16 fldbcount :2;
3343 u16 flockdn :6;
3344 u16 flcgo :1;
3345 u16 flcycle :2;
3346 u16 reserved :5;
3347#else
3348 u16 flcgo :1; /* 0 Flash Cycle Go */
3349 u16 flcycle :2; /* 2:1 Flash Cycle */
3350 u16 reserved :5; /* 7:3 Reserved */
3351 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
3352 u16 flockdn :6; /* 15:10 Reserved */
3353#endif
3354 } hsf_ctrl;
3355 u16 regval;
3356};
3357
3358/* ICH8 Flash Region Access Permissions */
3359union ich8_hws_flash_regacc {
3360 struct ich8_flracc {
3361#ifdef __BIG_ENDIAN
3362 u32 gmwag :8;
3363 u32 gmrag :8;
3364 u32 grwa :8;
3365 u32 grra :8;
3366#else
3367 u32 grra :8; /* 0:7 GbE region Read Access */
3368 u32 grwa :8; /* 8:15 GbE region Write Access */
3369 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
3370 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
3371#endif
3372 } hsf_flregacc;
3373 u16 regval;
3374};
3375
3376/* Miscellaneous PHY bit definitions. */ 3021/* Miscellaneous PHY bit definitions. */
3377#define PHY_PREAMBLE 0xFFFFFFFF 3022#define PHY_PREAMBLE 0xFFFFFFFF
3378#define PHY_SOF 0x01 3023#define PHY_SOF 0x01
@@ -3384,10 +3029,10 @@ union ich8_hws_flash_regacc {
3384#define MII_CR_SPEED_100 0x2000 3029#define MII_CR_SPEED_100 0x2000
3385#define MII_CR_SPEED_10 0x0000 3030#define MII_CR_SPEED_10 0x0000
3386#define E1000_PHY_ADDRESS 0x01 3031#define E1000_PHY_ADDRESS 0x01
3387#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ 3032#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */
3388#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ 3033#define PHY_FORCE_TIME 20 /* 2.0 Seconds */
3389#define PHY_REVISION_MASK 0xFFFFFFF0 3034#define PHY_REVISION_MASK 0xFFFFFFF0
3390#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */ 3035#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */
3391#define REG4_SPEED_MASK 0x01E0 3036#define REG4_SPEED_MASK 0x01E0
3392#define REG9_SPEED_MASK 0x0300 3037#define REG9_SPEED_MASK 0x0300
3393#define ADVERTISE_10_HALF 0x0001 3038#define ADVERTISE_10_HALF 0x0001
@@ -3396,8 +3041,8 @@ union ich8_hws_flash_regacc {
3396#define ADVERTISE_100_FULL 0x0008 3041#define ADVERTISE_100_FULL 0x0008
3397#define ADVERTISE_1000_HALF 0x0010 3042#define ADVERTISE_1000_HALF 0x0010
3398#define ADVERTISE_1000_FULL 0x0020 3043#define ADVERTISE_1000_FULL 0x0020
3399#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ 3044#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */
3400#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds*/ 3045#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */
3401#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds*/ 3046#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */
3402 3047
3403#endif /* _E1000_HW_H_ */ 3048#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index c66dd4f9437c..bcd192ca47b0 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
31 31
32char e1000_driver_name[] = "e1000"; 32char e1000_driver_name[] = "e1000";
33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
34#define DRV_VERSION "7.3.21-k3-NAPI" 34#define DRV_VERSION "7.3.21-k5-NAPI"
35const char e1000_driver_version[] = DRV_VERSION; 35const char e1000_driver_version[] = DRV_VERSION;
36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
37 37
@@ -131,7 +131,6 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
131static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 131static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
132static int e1000_set_mac(struct net_device *netdev, void *p); 132static int e1000_set_mac(struct net_device *netdev, void *p);
133static irqreturn_t e1000_intr(int irq, void *data); 133static irqreturn_t e1000_intr(int irq, void *data);
134static irqreturn_t e1000_intr_msi(int irq, void *data);
135static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 134static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
136 struct e1000_tx_ring *tx_ring); 135 struct e1000_tx_ring *tx_ring);
137static int e1000_clean(struct napi_struct *napi, int budget); 136static int e1000_clean(struct napi_struct *napi, int budget);
@@ -258,25 +257,14 @@ module_exit(e1000_exit_module);
258 257
259static int e1000_request_irq(struct e1000_adapter *adapter) 258static int e1000_request_irq(struct e1000_adapter *adapter)
260{ 259{
261 struct e1000_hw *hw = &adapter->hw;
262 struct net_device *netdev = adapter->netdev; 260 struct net_device *netdev = adapter->netdev;
263 irq_handler_t handler = e1000_intr; 261 irq_handler_t handler = e1000_intr;
264 int irq_flags = IRQF_SHARED; 262 int irq_flags = IRQF_SHARED;
265 int err; 263 int err;
266 264
267 if (hw->mac_type >= e1000_82571) {
268 adapter->have_msi = !pci_enable_msi(adapter->pdev);
269 if (adapter->have_msi) {
270 handler = e1000_intr_msi;
271 irq_flags = 0;
272 }
273 }
274
275 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 265 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
276 netdev); 266 netdev);
277 if (err) { 267 if (err) {
278 if (adapter->have_msi)
279 pci_disable_msi(adapter->pdev);
280 DPRINTK(PROBE, ERR, 268 DPRINTK(PROBE, ERR,
281 "Unable to allocate interrupt Error: %d\n", err); 269 "Unable to allocate interrupt Error: %d\n", err);
282 } 270 }
@@ -289,9 +277,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
289 struct net_device *netdev = adapter->netdev; 277 struct net_device *netdev = adapter->netdev;
290 278
291 free_irq(adapter->pdev->irq, netdev); 279 free_irq(adapter->pdev->irq, netdev);
292
293 if (adapter->have_msi)
294 pci_disable_msi(adapter->pdev);
295} 280}
296 281
297/** 282/**
@@ -345,76 +330,6 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
345 } 330 }
346} 331}
347 332
348/**
349 * e1000_release_hw_control - release control of the h/w to f/w
350 * @adapter: address of board private structure
351 *
352 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
353 * For ASF and Pass Through versions of f/w this means that the
354 * driver is no longer loaded. For AMT version (only with 82573) i
355 * of the f/w this means that the network i/f is closed.
356 *
357 **/
358
359static void e1000_release_hw_control(struct e1000_adapter *adapter)
360{
361 u32 ctrl_ext;
362 u32 swsm;
363 struct e1000_hw *hw = &adapter->hw;
364
365 /* Let firmware taken over control of h/w */
366 switch (hw->mac_type) {
367 case e1000_82573:
368 swsm = er32(SWSM);
369 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
370 break;
371 case e1000_82571:
372 case e1000_82572:
373 case e1000_80003es2lan:
374 case e1000_ich8lan:
375 ctrl_ext = er32(CTRL_EXT);
376 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
377 break;
378 default:
379 break;
380 }
381}
382
383/**
384 * e1000_get_hw_control - get control of the h/w from f/w
385 * @adapter: address of board private structure
386 *
387 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
388 * For ASF and Pass Through versions of f/w this means that
389 * the driver is loaded. For AMT version (only with 82573)
390 * of the f/w this means that the network i/f is open.
391 *
392 **/
393
394static void e1000_get_hw_control(struct e1000_adapter *adapter)
395{
396 u32 ctrl_ext;
397 u32 swsm;
398 struct e1000_hw *hw = &adapter->hw;
399
400 /* Let firmware know the driver has taken over */
401 switch (hw->mac_type) {
402 case e1000_82573:
403 swsm = er32(SWSM);
404 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
405 break;
406 case e1000_82571:
407 case e1000_82572:
408 case e1000_80003es2lan:
409 case e1000_ich8lan:
410 ctrl_ext = er32(CTRL_EXT);
411 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
412 break;
413 default:
414 break;
415 }
416}
417
418static void e1000_init_manageability(struct e1000_adapter *adapter) 333static void e1000_init_manageability(struct e1000_adapter *adapter)
419{ 334{
420 struct e1000_hw *hw = &adapter->hw; 335 struct e1000_hw *hw = &adapter->hw;
@@ -425,20 +340,6 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
425 /* disable hardware interception of ARP */ 340 /* disable hardware interception of ARP */
426 manc &= ~(E1000_MANC_ARP_EN); 341 manc &= ~(E1000_MANC_ARP_EN);
427 342
428 /* enable receiving management packets to the host */
429 /* this will probably generate destination unreachable messages
430 * from the host OS, but the packets will be handled on SMBUS */
431 if (hw->has_manc2h) {
432 u32 manc2h = er32(MANC2H);
433
434 manc |= E1000_MANC_EN_MNG2HOST;
435#define E1000_MNG2HOST_PORT_623 (1 << 5)
436#define E1000_MNG2HOST_PORT_664 (1 << 6)
437 manc2h |= E1000_MNG2HOST_PORT_623;
438 manc2h |= E1000_MNG2HOST_PORT_664;
439 ew32(MANC2H, manc2h);
440 }
441
442 ew32(MANC, manc); 343 ew32(MANC, manc);
443 } 344 }
444} 345}
@@ -453,12 +354,6 @@ static void e1000_release_manageability(struct e1000_adapter *adapter)
453 /* re-enable hardware interception of ARP */ 354 /* re-enable hardware interception of ARP */
454 manc |= E1000_MANC_ARP_EN; 355 manc |= E1000_MANC_ARP_EN;
455 356
456 if (hw->has_manc2h)
457 manc &= ~E1000_MANC_EN_MNG2HOST;
458
459 /* don't explicitly have to mess with MANC2H since
460 * MANC has an enable disable that gates MANC2H */
461
462 ew32(MANC, manc); 357 ew32(MANC, manc);
463 } 358 }
464} 359}
@@ -563,15 +458,6 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
563 if (er32(MANC) & E1000_MANC_SMBUS_EN) 458 if (er32(MANC) & E1000_MANC_SMBUS_EN)
564 goto out; 459 goto out;
565 break; 460 break;
566 case e1000_82571:
567 case e1000_82572:
568 case e1000_82573:
569 case e1000_80003es2lan:
570 case e1000_ich8lan:
571 if (e1000_check_mng_mode(hw) ||
572 e1000_check_phy_reset_block(hw))
573 goto out;
574 break;
575 default: 461 default:
576 goto out; 462 goto out;
577 } 463 }
@@ -599,8 +485,7 @@ void e1000_down(struct e1000_adapter *adapter)
599 ew32(RCTL, rctl & ~E1000_RCTL_EN); 485 ew32(RCTL, rctl & ~E1000_RCTL_EN);
600 /* flush and sleep below */ 486 /* flush and sleep below */
601 487
602 /* can be netif_tx_disable when NETIF_F_LLTX is removed */ 488 netif_tx_disable(netdev);
603 netif_stop_queue(netdev);
604 489
605 /* disable transmits in the hardware */ 490 /* disable transmits in the hardware */
606 tctl = er32(TCTL); 491 tctl = er32(TCTL);
@@ -671,16 +556,6 @@ void e1000_reset(struct e1000_adapter *adapter)
671 legacy_pba_adjust = true; 556 legacy_pba_adjust = true;
672 pba = E1000_PBA_30K; 557 pba = E1000_PBA_30K;
673 break; 558 break;
674 case e1000_82571:
675 case e1000_82572:
676 case e1000_80003es2lan:
677 pba = E1000_PBA_38K;
678 break;
679 case e1000_82573:
680 pba = E1000_PBA_20K;
681 break;
682 case e1000_ich8lan:
683 pba = E1000_PBA_8K;
684 case e1000_undefined: 559 case e1000_undefined:
685 case e1000_num_macs: 560 case e1000_num_macs:
686 break; 561 break;
@@ -744,16 +619,8 @@ void e1000_reset(struct e1000_adapter *adapter)
744 619
745 /* if short on rx space, rx wins and must trump tx 620 /* if short on rx space, rx wins and must trump tx
746 * adjustment or use Early Receive if available */ 621 * adjustment or use Early Receive if available */
747 if (pba < min_rx_space) { 622 if (pba < min_rx_space)
748 switch (hw->mac_type) { 623 pba = min_rx_space;
749 case e1000_82573:
750 /* ERT enabled in e1000_configure_rx */
751 break;
752 default:
753 pba = min_rx_space;
754 break;
755 }
756 }
757 } 624 }
758 } 625 }
759 626
@@ -789,7 +656,6 @@ void e1000_reset(struct e1000_adapter *adapter)
789 656
790 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 657 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
791 if (hw->mac_type >= e1000_82544 && 658 if (hw->mac_type >= e1000_82544 &&
792 hw->mac_type <= e1000_82547_rev_2 &&
793 hw->autoneg == 1 && 659 hw->autoneg == 1 &&
794 hw->autoneg_advertised == ADVERTISE_1000_FULL) { 660 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
795 u32 ctrl = er32(CTRL); 661 u32 ctrl = er32(CTRL);
@@ -806,20 +672,6 @@ void e1000_reset(struct e1000_adapter *adapter)
806 e1000_reset_adaptive(hw); 672 e1000_reset_adaptive(hw);
807 e1000_phy_get_info(hw, &adapter->phy_info); 673 e1000_phy_get_info(hw, &adapter->phy_info);
808 674
809 if (!adapter->smart_power_down &&
810 (hw->mac_type == e1000_82571 ||
811 hw->mac_type == e1000_82572)) {
812 u16 phy_data = 0;
813 /* speed up time to link by disabling smart power down, ignore
814 * the return value of this function because there is nothing
815 * different we would do if it failed */
816 e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
817 &phy_data);
818 phy_data &= ~IGP02E1000_PM_SPD;
819 e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
820 phy_data);
821 }
822
823 e1000_release_manageability(adapter); 675 e1000_release_manageability(adapter);
824} 676}
825 677
@@ -1046,17 +898,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1046 goto err_sw_init; 898 goto err_sw_init;
1047 899
1048 err = -EIO; 900 err = -EIO;
1049 /* Flash BAR mapping must happen after e1000_sw_init
1050 * because it depends on mac_type */
1051 if ((hw->mac_type == e1000_ich8lan) &&
1052 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
1053 hw->flash_address = pci_ioremap_bar(pdev, 1);
1054 if (!hw->flash_address)
1055 goto err_flashmap;
1056 }
1057
1058 if (e1000_check_phy_reset_block(hw))
1059 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
1060 901
1061 if (hw->mac_type >= e1000_82543) { 902 if (hw->mac_type >= e1000_82543) {
1062 netdev->features = NETIF_F_SG | 903 netdev->features = NETIF_F_SG |
@@ -1064,21 +905,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1064 NETIF_F_HW_VLAN_TX | 905 NETIF_F_HW_VLAN_TX |
1065 NETIF_F_HW_VLAN_RX | 906 NETIF_F_HW_VLAN_RX |
1066 NETIF_F_HW_VLAN_FILTER; 907 NETIF_F_HW_VLAN_FILTER;
1067 if (hw->mac_type == e1000_ich8lan)
1068 netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
1069 } 908 }
1070 909
1071 if ((hw->mac_type >= e1000_82544) && 910 if ((hw->mac_type >= e1000_82544) &&
1072 (hw->mac_type != e1000_82547)) 911 (hw->mac_type != e1000_82547))
1073 netdev->features |= NETIF_F_TSO; 912 netdev->features |= NETIF_F_TSO;
1074 913
1075 if (hw->mac_type > e1000_82547_rev_2)
1076 netdev->features |= NETIF_F_TSO6;
1077 if (pci_using_dac) 914 if (pci_using_dac)
1078 netdev->features |= NETIF_F_HIGHDMA; 915 netdev->features |= NETIF_F_HIGHDMA;
1079 916
1080 netdev->vlan_features |= NETIF_F_TSO; 917 netdev->vlan_features |= NETIF_F_TSO;
1081 netdev->vlan_features |= NETIF_F_TSO6;
1082 netdev->vlan_features |= NETIF_F_HW_CSUM; 918 netdev->vlan_features |= NETIF_F_HW_CSUM;
1083 netdev->vlan_features |= NETIF_F_SG; 919 netdev->vlan_features |= NETIF_F_SG;
1084 920
@@ -1153,15 +989,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1153 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); 989 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1154 eeprom_apme_mask = E1000_EEPROM_82544_APM; 990 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1155 break; 991 break;
1156 case e1000_ich8lan:
1157 e1000_read_eeprom(hw,
1158 EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
1159 eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
1160 break;
1161 case e1000_82546: 992 case e1000_82546:
1162 case e1000_82546_rev_3: 993 case e1000_82546_rev_3:
1163 case e1000_82571:
1164 case e1000_80003es2lan:
1165 if (er32(STATUS) & E1000_STATUS_FUNC_1){ 994 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1166 e1000_read_eeprom(hw, 995 e1000_read_eeprom(hw,
1167 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 996 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
@@ -1185,17 +1014,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1185 break; 1014 break;
1186 case E1000_DEV_ID_82546EB_FIBER: 1015 case E1000_DEV_ID_82546EB_FIBER:
1187 case E1000_DEV_ID_82546GB_FIBER: 1016 case E1000_DEV_ID_82546GB_FIBER:
1188 case E1000_DEV_ID_82571EB_FIBER:
1189 /* Wake events only supported on port A for dual fiber 1017 /* Wake events only supported on port A for dual fiber
1190 * regardless of eeprom setting */ 1018 * regardless of eeprom setting */
1191 if (er32(STATUS) & E1000_STATUS_FUNC_1) 1019 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1192 adapter->eeprom_wol = 0; 1020 adapter->eeprom_wol = 0;
1193 break; 1021 break;
1194 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1022 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1195 case E1000_DEV_ID_82571EB_QUAD_COPPER:
1196 case E1000_DEV_ID_82571EB_QUAD_FIBER:
1197 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
1198 case E1000_DEV_ID_82571PT_QUAD_COPPER:
1199 /* if quad port adapter, disable WoL on all but port A */ 1023 /* if quad port adapter, disable WoL on all but port A */
1200 if (global_quad_port_a != 0) 1024 if (global_quad_port_a != 0)
1201 adapter->eeprom_wol = 0; 1025 adapter->eeprom_wol = 0;
@@ -1213,39 +1037,18 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1213 1037
1214 /* print bus type/speed/width info */ 1038 /* print bus type/speed/width info */
1215 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", 1039 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
1216 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : 1040 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1217 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")), 1041 ((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
1218 ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1219 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
1220 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" : 1042 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
1221 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" : 1043 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
1222 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"), 1044 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
1223 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : 1045 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit"));
1224 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
1225 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
1226 "32-bit"));
1227 1046
1228 printk("%pM\n", netdev->dev_addr); 1047 printk("%pM\n", netdev->dev_addr);
1229 1048
1230 if (hw->bus_type == e1000_bus_type_pci_express) {
1231 DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
1232 "longer be supported by this driver in the future.\n",
1233 pdev->vendor, pdev->device);
1234 DPRINTK(PROBE, WARNING, "please use the \"e1000e\" "
1235 "driver instead.\n");
1236 }
1237
1238 /* reset the hardware with the new settings */ 1049 /* reset the hardware with the new settings */
1239 e1000_reset(adapter); 1050 e1000_reset(adapter);
1240 1051
1241 /* If the controller is 82573 and f/w is AMT, do not set
1242 * DRV_LOAD until the interface is up. For all other cases,
1243 * let the f/w know that the h/w is now under the control
1244 * of the driver. */
1245 if (hw->mac_type != e1000_82573 ||
1246 !e1000_check_mng_mode(hw))
1247 e1000_get_hw_control(adapter);
1248
1249 strcpy(netdev->name, "eth%d"); 1052 strcpy(netdev->name, "eth%d");
1250 err = register_netdev(netdev); 1053 err = register_netdev(netdev);
1251 if (err) 1054 if (err)
@@ -1260,14 +1063,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1260 return 0; 1063 return 0;
1261 1064
1262err_register: 1065err_register:
1263 e1000_release_hw_control(adapter);
1264err_eeprom: 1066err_eeprom:
1265 if (!e1000_check_phy_reset_block(hw)) 1067 e1000_phy_hw_reset(hw);
1266 e1000_phy_hw_reset(hw);
1267 1068
1268 if (hw->flash_address) 1069 if (hw->flash_address)
1269 iounmap(hw->flash_address); 1070 iounmap(hw->flash_address);
1270err_flashmap:
1271 kfree(adapter->tx_ring); 1071 kfree(adapter->tx_ring);
1272 kfree(adapter->rx_ring); 1072 kfree(adapter->rx_ring);
1273err_sw_init: 1073err_sw_init:
@@ -1298,18 +1098,18 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
1298 struct e1000_adapter *adapter = netdev_priv(netdev); 1098 struct e1000_adapter *adapter = netdev_priv(netdev);
1299 struct e1000_hw *hw = &adapter->hw; 1099 struct e1000_hw *hw = &adapter->hw;
1300 1100
1101 set_bit(__E1000_DOWN, &adapter->flags);
1102 del_timer_sync(&adapter->tx_fifo_stall_timer);
1103 del_timer_sync(&adapter->watchdog_timer);
1104 del_timer_sync(&adapter->phy_info_timer);
1105
1301 cancel_work_sync(&adapter->reset_task); 1106 cancel_work_sync(&adapter->reset_task);
1302 1107
1303 e1000_release_manageability(adapter); 1108 e1000_release_manageability(adapter);
1304 1109
1305 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1306 * would have already happened in close and is redundant. */
1307 e1000_release_hw_control(adapter);
1308
1309 unregister_netdev(netdev); 1110 unregister_netdev(netdev);
1310 1111
1311 if (!e1000_check_phy_reset_block(hw)) 1112 e1000_phy_hw_reset(hw);
1312 e1000_phy_hw_reset(hw);
1313 1113
1314 kfree(adapter->tx_ring); 1114 kfree(adapter->tx_ring);
1315 kfree(adapter->rx_ring); 1115 kfree(adapter->rx_ring);
@@ -1472,12 +1272,6 @@ static int e1000_open(struct net_device *netdev)
1472 e1000_update_mng_vlan(adapter); 1272 e1000_update_mng_vlan(adapter);
1473 } 1273 }
1474 1274
1475 /* If AMT is enabled, let the firmware know that the network
1476 * interface is now open */
1477 if (hw->mac_type == e1000_82573 &&
1478 e1000_check_mng_mode(hw))
1479 e1000_get_hw_control(adapter);
1480
1481 /* before we allocate an interrupt, we must be ready to handle it. 1275 /* before we allocate an interrupt, we must be ready to handle it.
1482 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1276 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1483 * as soon as we call pci_request_irq, so we have to setup our 1277 * as soon as we call pci_request_irq, so we have to setup our
@@ -1503,7 +1297,6 @@ static int e1000_open(struct net_device *netdev)
1503 return E1000_SUCCESS; 1297 return E1000_SUCCESS;
1504 1298
1505err_req_irq: 1299err_req_irq:
1506 e1000_release_hw_control(adapter);
1507 e1000_power_down_phy(adapter); 1300 e1000_power_down_phy(adapter);
1508 e1000_free_all_rx_resources(adapter); 1301 e1000_free_all_rx_resources(adapter);
1509err_setup_rx: 1302err_setup_rx:
@@ -1548,12 +1341,6 @@ static int e1000_close(struct net_device *netdev)
1548 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1341 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1549 } 1342 }
1550 1343
1551 /* If AMT is enabled, let the firmware know that the network
1552 * interface is now closed */
1553 if (hw->mac_type == e1000_82573 &&
1554 e1000_check_mng_mode(hw))
1555 e1000_release_hw_control(adapter);
1556
1557 return 0; 1344 return 0;
1558} 1345}
1559 1346
@@ -1692,7 +1479,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1692{ 1479{
1693 u64 tdba; 1480 u64 tdba;
1694 struct e1000_hw *hw = &adapter->hw; 1481 struct e1000_hw *hw = &adapter->hw;
1695 u32 tdlen, tctl, tipg, tarc; 1482 u32 tdlen, tctl, tipg;
1696 u32 ipgr1, ipgr2; 1483 u32 ipgr1, ipgr2;
1697 1484
1698 /* Setup the HW Tx Head and Tail descriptor pointers */ 1485 /* Setup the HW Tx Head and Tail descriptor pointers */
@@ -1714,8 +1501,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1714 } 1501 }
1715 1502
1716 /* Set the default values for the Tx Inter Packet Gap timer */ 1503 /* Set the default values for the Tx Inter Packet Gap timer */
1717 if (hw->mac_type <= e1000_82547_rev_2 && 1504 if ((hw->media_type == e1000_media_type_fiber ||
1718 (hw->media_type == e1000_media_type_fiber ||
1719 hw->media_type == e1000_media_type_internal_serdes)) 1505 hw->media_type == e1000_media_type_internal_serdes))
1720 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1506 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1721 else 1507 else
@@ -1728,10 +1514,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1728 ipgr1 = DEFAULT_82542_TIPG_IPGR1; 1514 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1729 ipgr2 = DEFAULT_82542_TIPG_IPGR2; 1515 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1730 break; 1516 break;
1731 case e1000_80003es2lan:
1732 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1733 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
1734 break;
1735 default: 1517 default:
1736 ipgr1 = DEFAULT_82543_TIPG_IPGR1; 1518 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1737 ipgr2 = DEFAULT_82543_TIPG_IPGR2; 1519 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
@@ -1754,21 +1536,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1754 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 1536 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1755 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1537 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1756 1538
1757 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
1758 tarc = er32(TARC0);
1759 /* set the speed mode bit, we'll clear it if we're not at
1760 * gigabit link later */
1761 tarc |= (1 << 21);
1762 ew32(TARC0, tarc);
1763 } else if (hw->mac_type == e1000_80003es2lan) {
1764 tarc = er32(TARC0);
1765 tarc |= 1;
1766 ew32(TARC0, tarc);
1767 tarc = er32(TARC1);
1768 tarc |= 1;
1769 ew32(TARC1, tarc);
1770 }
1771
1772 e1000_config_collision_dist(hw); 1539 e1000_config_collision_dist(hw);
1773 1540
1774 /* Setup Transmit Descriptor Settings for eop descriptor */ 1541 /* Setup Transmit Descriptor Settings for eop descriptor */
@@ -1804,7 +1571,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1804static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 1571static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1805 struct e1000_rx_ring *rxdr) 1572 struct e1000_rx_ring *rxdr)
1806{ 1573{
1807 struct e1000_hw *hw = &adapter->hw;
1808 struct pci_dev *pdev = adapter->pdev; 1574 struct pci_dev *pdev = adapter->pdev;
1809 int size, desc_len; 1575 int size, desc_len;
1810 1576
@@ -1817,10 +1583,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1817 } 1583 }
1818 memset(rxdr->buffer_info, 0, size); 1584 memset(rxdr->buffer_info, 0, size);
1819 1585
1820 if (hw->mac_type <= e1000_82547_rev_2) 1586 desc_len = sizeof(struct e1000_rx_desc);
1821 desc_len = sizeof(struct e1000_rx_desc);
1822 else
1823 desc_len = sizeof(union e1000_rx_desc_packet_split);
1824 1587
1825 /* Round up to nearest 4K */ 1588 /* Round up to nearest 4K */
1826 1589
@@ -1977,7 +1740,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1977{ 1740{
1978 u64 rdba; 1741 u64 rdba;
1979 struct e1000_hw *hw = &adapter->hw; 1742 struct e1000_hw *hw = &adapter->hw;
1980 u32 rdlen, rctl, rxcsum, ctrl_ext; 1743 u32 rdlen, rctl, rxcsum;
1981 1744
1982 if (adapter->netdev->mtu > ETH_DATA_LEN) { 1745 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1983 rdlen = adapter->rx_ring[0].count * 1746 rdlen = adapter->rx_ring[0].count *
@@ -2004,17 +1767,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2004 ew32(ITR, 1000000000 / (adapter->itr * 256)); 1767 ew32(ITR, 1000000000 / (adapter->itr * 256));
2005 } 1768 }
2006 1769
2007 if (hw->mac_type >= e1000_82571) {
2008 ctrl_ext = er32(CTRL_EXT);
2009 /* Reset delay timers after every interrupt */
2010 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
2011 /* Auto-Mask interrupts upon ICR access */
2012 ctrl_ext |= E1000_CTRL_EXT_IAME;
2013 ew32(IAM, 0xffffffff);
2014 ew32(CTRL_EXT, ctrl_ext);
2015 E1000_WRITE_FLUSH();
2016 }
2017
2018 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1770 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2019 * the Base and Length of the Rx Descriptor Ring */ 1771 * the Base and Length of the Rx Descriptor Ring */
2020 switch (adapter->num_rx_queues) { 1772 switch (adapter->num_rx_queues) {
@@ -2329,22 +2081,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
2329 2081
2330 e1000_rar_set(hw, hw->mac_addr, 0); 2082 e1000_rar_set(hw, hw->mac_addr, 0);
2331 2083
2332 /* With 82571 controllers, LAA may be overwritten (with the default)
2333 * due to controller reset from the other port. */
2334 if (hw->mac_type == e1000_82571) {
2335 /* activate the work around */
2336 hw->laa_is_present = 1;
2337
2338 /* Hold a copy of the LAA in RAR[14] This is done so that
2339 * between the time RAR[0] gets clobbered and the time it
2340 * gets fixed (in e1000_watchdog), the actual LAA is in one
2341 * of the RARs and no incoming packets directed to this port
2342 * are dropped. Eventaully the LAA will be in RAR[0] and
2343 * RAR[14] */
2344 e1000_rar_set(hw, hw->mac_addr,
2345 E1000_RAR_ENTRIES - 1);
2346 }
2347
2348 if (hw->mac_type == e1000_82542_rev2_0) 2084 if (hw->mac_type == e1000_82542_rev2_0)
2349 e1000_leave_82542_rst(adapter); 2085 e1000_leave_82542_rst(adapter);
2350 2086
@@ -2371,9 +2107,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2371 u32 rctl; 2107 u32 rctl;
2372 u32 hash_value; 2108 u32 hash_value;
2373 int i, rar_entries = E1000_RAR_ENTRIES; 2109 int i, rar_entries = E1000_RAR_ENTRIES;
2374 int mta_reg_count = (hw->mac_type == e1000_ich8lan) ? 2110 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2375 E1000_NUM_MTA_REGISTERS_ICH8LAN :
2376 E1000_NUM_MTA_REGISTERS;
2377 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); 2111 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2378 2112
2379 if (!mcarray) { 2113 if (!mcarray) {
@@ -2381,13 +2115,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2381 return; 2115 return;
2382 } 2116 }
2383 2117
2384 if (hw->mac_type == e1000_ich8lan)
2385 rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
2386
2387 /* reserve RAR[14] for LAA over-write work-around */
2388 if (hw->mac_type == e1000_82571)
2389 rar_entries--;
2390
2391 /* Check for Promiscuous and All Multicast modes */ 2118 /* Check for Promiscuous and All Multicast modes */
2392 2119
2393 rctl = er32(RCTL); 2120 rctl = er32(RCTL);
@@ -2396,15 +2123,13 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2396 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2123 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2397 rctl &= ~E1000_RCTL_VFE; 2124 rctl &= ~E1000_RCTL_VFE;
2398 } else { 2125 } else {
2399 if (netdev->flags & IFF_ALLMULTI) { 2126 if (netdev->flags & IFF_ALLMULTI)
2400 rctl |= E1000_RCTL_MPE; 2127 rctl |= E1000_RCTL_MPE;
2401 } else { 2128 else
2402 rctl &= ~E1000_RCTL_MPE; 2129 rctl &= ~E1000_RCTL_MPE;
2403 } 2130 /* Enable VLAN filter if there is a VLAN */
2404 if (adapter->hw.mac_type != e1000_ich8lan) 2131 if (adapter->vlgrp)
2405 /* Enable VLAN filter if there is a VLAN */ 2132 rctl |= E1000_RCTL_VFE;
2406 if (adapter->vlgrp)
2407 rctl |= E1000_RCTL_VFE;
2408 } 2133 }
2409 2134
2410 if (netdev->uc.count > rar_entries - 1) { 2135 if (netdev->uc.count > rar_entries - 1) {
@@ -2427,7 +2152,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2427 * 2152 *
2428 * RAR 0 is used for the station MAC adddress 2153 * RAR 0 is used for the station MAC adddress
2429 * if there are not 14 addresses, go ahead and clear the filters 2154 * if there are not 14 addresses, go ahead and clear the filters
2430 * -- with 82571 controllers only 0-13 entries are filled here
2431 */ 2155 */
2432 i = 1; 2156 i = 1;
2433 if (use_uc) 2157 if (use_uc)
@@ -2521,12 +2245,46 @@ static void e1000_82547_tx_fifo_stall(unsigned long data)
2521 adapter->tx_fifo_head = 0; 2245 adapter->tx_fifo_head = 0;
2522 atomic_set(&adapter->tx_fifo_stall, 0); 2246 atomic_set(&adapter->tx_fifo_stall, 0);
2523 netif_wake_queue(netdev); 2247 netif_wake_queue(netdev);
2524 } else { 2248 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2525 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); 2249 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
2526 } 2250 }
2527 } 2251 }
2528} 2252}
2529 2253
2254static bool e1000_has_link(struct e1000_adapter *adapter)
2255{
2256 struct e1000_hw *hw = &adapter->hw;
2257 bool link_active = false;
2258
2259 /* get_link_status is set on LSC (link status) interrupt or
2260 * rx sequence error interrupt. get_link_status will stay
2261 * false until the e1000_check_for_link establishes link
2262 * for copper adapters ONLY
2263 */
2264 switch (hw->media_type) {
2265 case e1000_media_type_copper:
2266 if (hw->get_link_status) {
2267 e1000_check_for_link(hw);
2268 link_active = !hw->get_link_status;
2269 } else {
2270 link_active = true;
2271 }
2272 break;
2273 case e1000_media_type_fiber:
2274 e1000_check_for_link(hw);
2275 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2276 break;
2277 case e1000_media_type_internal_serdes:
2278 e1000_check_for_link(hw);
2279 link_active = hw->serdes_has_link;
2280 break;
2281 default:
2282 break;
2283 }
2284
2285 return link_active;
2286}
2287
2530/** 2288/**
2531 * e1000_watchdog - Timer Call-back 2289 * e1000_watchdog - Timer Call-back
2532 * @data: pointer to adapter cast into an unsigned long 2290 * @data: pointer to adapter cast into an unsigned long
@@ -2538,33 +2296,16 @@ static void e1000_watchdog(unsigned long data)
2538 struct net_device *netdev = adapter->netdev; 2296 struct net_device *netdev = adapter->netdev;
2539 struct e1000_tx_ring *txdr = adapter->tx_ring; 2297 struct e1000_tx_ring *txdr = adapter->tx_ring;
2540 u32 link, tctl; 2298 u32 link, tctl;
2541 s32 ret_val;
2542
2543 ret_val = e1000_check_for_link(hw);
2544 if ((ret_val == E1000_ERR_PHY) &&
2545 (hw->phy_type == e1000_phy_igp_3) &&
2546 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2547 /* See e1000_kumeran_lock_loss_workaround() */
2548 DPRINTK(LINK, INFO,
2549 "Gigabit has been disabled, downgrading speed\n");
2550 }
2551 2299
2552 if (hw->mac_type == e1000_82573) { 2300 link = e1000_has_link(adapter);
2553 e1000_enable_tx_pkt_filtering(hw); 2301 if ((netif_carrier_ok(netdev)) && link)
2554 if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id) 2302 goto link_up;
2555 e1000_update_mng_vlan(adapter);
2556 }
2557
2558 if ((hw->media_type == e1000_media_type_internal_serdes) &&
2559 !(er32(TXCW) & E1000_TXCW_ANE))
2560 link = !hw->serdes_link_down;
2561 else
2562 link = er32(STATUS) & E1000_STATUS_LU;
2563 2303
2564 if (link) { 2304 if (link) {
2565 if (!netif_carrier_ok(netdev)) { 2305 if (!netif_carrier_ok(netdev)) {
2566 u32 ctrl; 2306 u32 ctrl;
2567 bool txb2b = true; 2307 bool txb2b = true;
2308 /* update snapshot of PHY registers on LSC */
2568 e1000_get_speed_and_duplex(hw, 2309 e1000_get_speed_and_duplex(hw,
2569 &adapter->link_speed, 2310 &adapter->link_speed,
2570 &adapter->link_duplex); 2311 &adapter->link_duplex);
@@ -2589,7 +2330,7 @@ static void e1000_watchdog(unsigned long data)
2589 case SPEED_10: 2330 case SPEED_10:
2590 txb2b = false; 2331 txb2b = false;
2591 netdev->tx_queue_len = 10; 2332 netdev->tx_queue_len = 10;
2592 adapter->tx_timeout_factor = 8; 2333 adapter->tx_timeout_factor = 16;
2593 break; 2334 break;
2594 case SPEED_100: 2335 case SPEED_100:
2595 txb2b = false; 2336 txb2b = false;
@@ -2598,52 +2339,16 @@ static void e1000_watchdog(unsigned long data)
2598 break; 2339 break;
2599 } 2340 }
2600 2341
2601 if ((hw->mac_type == e1000_82571 || 2342 /* enable transmits in the hardware */
2602 hw->mac_type == e1000_82572) &&
2603 !txb2b) {
2604 u32 tarc0;
2605 tarc0 = er32(TARC0);
2606 tarc0 &= ~(1 << 21);
2607 ew32(TARC0, tarc0);
2608 }
2609
2610 /* disable TSO for pcie and 10/100 speeds, to avoid
2611 * some hardware issues */
2612 if (!adapter->tso_force &&
2613 hw->bus_type == e1000_bus_type_pci_express){
2614 switch (adapter->link_speed) {
2615 case SPEED_10:
2616 case SPEED_100:
2617 DPRINTK(PROBE,INFO,
2618 "10/100 speed: disabling TSO\n");
2619 netdev->features &= ~NETIF_F_TSO;
2620 netdev->features &= ~NETIF_F_TSO6;
2621 break;
2622 case SPEED_1000:
2623 netdev->features |= NETIF_F_TSO;
2624 netdev->features |= NETIF_F_TSO6;
2625 break;
2626 default:
2627 /* oops */
2628 break;
2629 }
2630 }
2631
2632 /* enable transmits in the hardware, need to do this
2633 * after setting TARC0 */
2634 tctl = er32(TCTL); 2343 tctl = er32(TCTL);
2635 tctl |= E1000_TCTL_EN; 2344 tctl |= E1000_TCTL_EN;
2636 ew32(TCTL, tctl); 2345 ew32(TCTL, tctl);
2637 2346
2638 netif_carrier_on(netdev); 2347 netif_carrier_on(netdev);
2639 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); 2348 if (!test_bit(__E1000_DOWN, &adapter->flags))
2349 mod_timer(&adapter->phy_info_timer,
2350 round_jiffies(jiffies + 2 * HZ));
2640 adapter->smartspeed = 0; 2351 adapter->smartspeed = 0;
2641 } else {
2642 /* make sure the receive unit is started */
2643 if (hw->rx_needs_kicking) {
2644 u32 rctl = er32(RCTL);
2645 ew32(RCTL, rctl | E1000_RCTL_EN);
2646 }
2647 } 2352 }
2648 } else { 2353 } else {
2649 if (netif_carrier_ok(netdev)) { 2354 if (netif_carrier_ok(netdev)) {
@@ -2652,21 +2357,16 @@ static void e1000_watchdog(unsigned long data)
2652 printk(KERN_INFO "e1000: %s NIC Link is Down\n", 2357 printk(KERN_INFO "e1000: %s NIC Link is Down\n",
2653 netdev->name); 2358 netdev->name);
2654 netif_carrier_off(netdev); 2359 netif_carrier_off(netdev);
2655 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); 2360
2656 2361 if (!test_bit(__E1000_DOWN, &adapter->flags))
2657 /* 80003ES2LAN workaround-- 2362 mod_timer(&adapter->phy_info_timer,
2658 * For packet buffer work-around on link down event; 2363 round_jiffies(jiffies + 2 * HZ));
2659 * disable receives in the ISR and
2660 * reset device here in the watchdog
2661 */
2662 if (hw->mac_type == e1000_80003es2lan)
2663 /* reset device */
2664 schedule_work(&adapter->reset_task);
2665 } 2364 }
2666 2365
2667 e1000_smartspeed(adapter); 2366 e1000_smartspeed(adapter);
2668 } 2367 }
2669 2368
2369link_up:
2670 e1000_update_stats(adapter); 2370 e1000_update_stats(adapter);
2671 2371
2672 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 2372 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
@@ -2700,13 +2400,10 @@ static void e1000_watchdog(unsigned long data)
2700 /* Force detection of hung controller every watchdog period */ 2400 /* Force detection of hung controller every watchdog period */
2701 adapter->detect_tx_hung = true; 2401 adapter->detect_tx_hung = true;
2702 2402
2703 /* With 82571 controllers, LAA may be overwritten due to controller
2704 * reset from the other port. Set the appropriate LAA in RAR[0] */
2705 if (hw->mac_type == e1000_82571 && hw->laa_is_present)
2706 e1000_rar_set(hw, hw->mac_addr, 0);
2707
2708 /* Reset the timer */ 2403 /* Reset the timer */
2709 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); 2404 if (!test_bit(__E1000_DOWN, &adapter->flags))
2405 mod_timer(&adapter->watchdog_timer,
2406 round_jiffies(jiffies + 2 * HZ));
2710} 2407}
2711 2408
2712enum latency_range { 2409enum latency_range {
@@ -2718,6 +2415,11 @@ enum latency_range {
2718 2415
2719/** 2416/**
2720 * e1000_update_itr - update the dynamic ITR value based on statistics 2417 * e1000_update_itr - update the dynamic ITR value based on statistics
2418 * @adapter: pointer to adapter
2419 * @itr_setting: current adapter->itr
2420 * @packets: the number of packets during this measurement interval
2421 * @bytes: the number of bytes during this measurement interval
2422 *
2721 * Stores a new ITR value based on packets and byte 2423 * Stores a new ITR value based on packets and byte
2722 * counts during the last interrupt. The advantage of per interrupt 2424 * counts during the last interrupt. The advantage of per interrupt
2723 * computation is faster updates and more accurate ITR for the current 2425 * computation is faster updates and more accurate ITR for the current
@@ -2727,10 +2429,6 @@ enum latency_range {
2727 * while increasing bulk throughput. 2429 * while increasing bulk throughput.
2728 * this functionality is controlled by the InterruptThrottleRate module 2430 * this functionality is controlled by the InterruptThrottleRate module
2729 * parameter (see e1000_param.c) 2431 * parameter (see e1000_param.c)
2730 * @adapter: pointer to adapter
2731 * @itr_setting: current adapter->itr
2732 * @packets: the number of packets during this measurement interval
2733 * @bytes: the number of bytes during this measurement interval
2734 **/ 2432 **/
2735static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2433static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2736 u16 itr_setting, int packets, int bytes) 2434 u16 itr_setting, int packets, int bytes)
@@ -3035,8 +2733,9 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3035 size -= 4; 2733 size -= 4;
3036 2734
3037 buffer_info->length = size; 2735 buffer_info->length = size;
3038 buffer_info->dma = skb_shinfo(skb)->dma_head + offset; 2736 /* set time_stamp *before* dma to help avoid a possible race */
3039 buffer_info->time_stamp = jiffies; 2737 buffer_info->time_stamp = jiffies;
2738 buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
3040 buffer_info->next_to_watch = i; 2739 buffer_info->next_to_watch = i;
3041 2740
3042 len -= size; 2741 len -= size;
@@ -3071,13 +2770,14 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3071 * Avoid terminating buffers within evenly-aligned 2770 * Avoid terminating buffers within evenly-aligned
3072 * dwords. */ 2771 * dwords. */
3073 if (unlikely(adapter->pcix_82544 && 2772 if (unlikely(adapter->pcix_82544 &&
3074 !((unsigned long)(frag->page+offset+size-1) & 4) && 2773 !((unsigned long)(page_to_phys(frag->page) + offset
3075 size > 4)) 2774 + size - 1) & 4) &&
2775 size > 4))
3076 size -= 4; 2776 size -= 4;
3077 2777
3078 buffer_info->length = size; 2778 buffer_info->length = size;
3079 buffer_info->dma = map[f] + offset;
3080 buffer_info->time_stamp = jiffies; 2779 buffer_info->time_stamp = jiffies;
2780 buffer_info->dma = map[f] + offset;
3081 buffer_info->next_to_watch = i; 2781 buffer_info->next_to_watch = i;
3082 2782
3083 len -= size; 2783 len -= size;
@@ -3186,41 +2886,6 @@ no_fifo_stall_required:
3186 return 0; 2886 return 0;
3187} 2887}
3188 2888
3189#define MINIMUM_DHCP_PACKET_SIZE 282
3190static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
3191 struct sk_buff *skb)
3192{
3193 struct e1000_hw *hw = &adapter->hw;
3194 u16 length, offset;
3195 if (vlan_tx_tag_present(skb)) {
3196 if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) &&
3197 ( hw->mng_cookie.status &
3198 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
3199 return 0;
3200 }
3201 if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
3202 struct ethhdr *eth = (struct ethhdr *)skb->data;
3203 if ((htons(ETH_P_IP) == eth->h_proto)) {
3204 const struct iphdr *ip =
3205 (struct iphdr *)((u8 *)skb->data+14);
3206 if (IPPROTO_UDP == ip->protocol) {
3207 struct udphdr *udp =
3208 (struct udphdr *)((u8 *)ip +
3209 (ip->ihl << 2));
3210 if (ntohs(udp->dest) == 67) {
3211 offset = (u8 *)udp + 8 - skb->data;
3212 length = skb->len - offset;
3213
3214 return e1000_mng_write_dhcp_info(hw,
3215 (u8 *)udp + 8,
3216 length);
3217 }
3218 }
3219 }
3220 }
3221 return 0;
3222}
3223
3224static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) 2889static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3225{ 2890{
3226 struct e1000_adapter *adapter = netdev_priv(netdev); 2891 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3279,11 +2944,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3279 return NETDEV_TX_OK; 2944 return NETDEV_TX_OK;
3280 } 2945 }
3281 2946
3282 /* 82571 and newer doesn't need the workaround that limited descriptor
3283 * length to 4kB */
3284 if (hw->mac_type >= e1000_82571)
3285 max_per_txd = 8192;
3286
3287 mss = skb_shinfo(skb)->gso_size; 2947 mss = skb_shinfo(skb)->gso_size;
3288 /* The controller does a simple calculation to 2948 /* The controller does a simple calculation to
3289 * make sure there is enough room in the FIFO before 2949 * make sure there is enough room in the FIFO before
@@ -3296,9 +2956,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3296 max_per_txd = min(mss << 2, max_per_txd); 2956 max_per_txd = min(mss << 2, max_per_txd);
3297 max_txd_pwr = fls(max_per_txd) - 1; 2957 max_txd_pwr = fls(max_per_txd) - 1;
3298 2958
3299 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
3300 * points to just header, pull a few bytes of payload from
3301 * frags into skb->data */
3302 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2959 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3303 if (skb->data_len && hdr_len == len) { 2960 if (skb->data_len && hdr_len == len) {
3304 switch (hw->mac_type) { 2961 switch (hw->mac_type) {
@@ -3313,10 +2970,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3313 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) 2970 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3314 break; 2971 break;
3315 /* fall through */ 2972 /* fall through */
3316 case e1000_82571:
3317 case e1000_82572:
3318 case e1000_82573:
3319 case e1000_ich8lan:
3320 pull_size = min((unsigned int)4, skb->data_len); 2973 pull_size = min((unsigned int)4, skb->data_len);
3321 if (!__pskb_pull_tail(skb, pull_size)) { 2974 if (!__pskb_pull_tail(skb, pull_size)) {
3322 DPRINTK(DRV, ERR, 2975 DPRINTK(DRV, ERR,
@@ -3361,11 +3014,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3361 if (adapter->pcix_82544) 3014 if (adapter->pcix_82544)
3362 count += nr_frags; 3015 count += nr_frags;
3363 3016
3364
3365 if (hw->tx_pkt_filtering &&
3366 (hw->mac_type == e1000_82573))
3367 e1000_transfer_dhcp_info(adapter, skb);
3368
3369 /* need: count + 2 desc gap to keep tail from touching 3017 /* need: count + 2 desc gap to keep tail from touching
3370 * head, otherwise try next time */ 3018 * head, otherwise try next time */
3371 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) 3019 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
@@ -3374,7 +3022,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3374 if (unlikely(hw->mac_type == e1000_82547)) { 3022 if (unlikely(hw->mac_type == e1000_82547)) {
3375 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { 3023 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
3376 netif_stop_queue(netdev); 3024 netif_stop_queue(netdev);
3377 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); 3025 if (!test_bit(__E1000_DOWN, &adapter->flags))
3026 mod_timer(&adapter->tx_fifo_stall_timer,
3027 jiffies + 1);
3378 return NETDEV_TX_BUSY; 3028 return NETDEV_TX_BUSY;
3379 } 3029 }
3380 } 3030 }
@@ -3393,14 +3043,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3393 } 3043 }
3394 3044
3395 if (likely(tso)) { 3045 if (likely(tso)) {
3396 tx_ring->last_tx_tso = 1; 3046 if (likely(hw->mac_type != e1000_82544))
3047 tx_ring->last_tx_tso = 1;
3397 tx_flags |= E1000_TX_FLAGS_TSO; 3048 tx_flags |= E1000_TX_FLAGS_TSO;
3398 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) 3049 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3399 tx_flags |= E1000_TX_FLAGS_CSUM; 3050 tx_flags |= E1000_TX_FLAGS_CSUM;
3400 3051
3401 /* Old method was to assume IPv4 packet by default if TSO was enabled.
3402 * 82571 hardware supports TSO capabilities for IPv6 as well...
3403 * no longer assume, we must. */
3404 if (likely(skb->protocol == htons(ETH_P_IP))) 3052 if (likely(skb->protocol == htons(ETH_P_IP)))
3405 tx_flags |= E1000_TX_FLAGS_IPV4; 3053 tx_flags |= E1000_TX_FLAGS_IPV4;
3406 3054
@@ -3472,7 +3120,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3472 struct e1000_adapter *adapter = netdev_priv(netdev); 3120 struct e1000_adapter *adapter = netdev_priv(netdev);
3473 struct e1000_hw *hw = &adapter->hw; 3121 struct e1000_hw *hw = &adapter->hw;
3474 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 3122 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3475 u16 eeprom_data = 0;
3476 3123
3477 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3124 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3478 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3125 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
@@ -3483,44 +3130,23 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3483 /* Adapter-specific max frame size limits. */ 3130 /* Adapter-specific max frame size limits. */
3484 switch (hw->mac_type) { 3131 switch (hw->mac_type) {
3485 case e1000_undefined ... e1000_82542_rev2_1: 3132 case e1000_undefined ... e1000_82542_rev2_1:
3486 case e1000_ich8lan:
3487 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 3133 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3488 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); 3134 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
3489 return -EINVAL; 3135 return -EINVAL;
3490 } 3136 }
3491 break; 3137 break;
3492 case e1000_82573:
3493 /* Jumbo Frames not supported if:
3494 * - this is not an 82573L device
3495 * - ASPM is enabled in any way (0x1A bits 3:2) */
3496 e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1,
3497 &eeprom_data);
3498 if ((hw->device_id != E1000_DEV_ID_82573L) ||
3499 (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
3500 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3501 DPRINTK(PROBE, ERR,
3502 "Jumbo Frames not supported.\n");
3503 return -EINVAL;
3504 }
3505 break;
3506 }
3507 /* ERT will be enabled later to enable wire speed receives */
3508
3509 /* fall through to get support */
3510 case e1000_82571:
3511 case e1000_82572:
3512 case e1000_80003es2lan:
3513#define MAX_STD_JUMBO_FRAME_SIZE 9234
3514 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3515 DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
3516 return -EINVAL;
3517 }
3518 break;
3519 default: 3138 default:
3520 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ 3139 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3521 break; 3140 break;
3522 } 3141 }
3523 3142
3143 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3144 msleep(1);
3145 /* e1000_down has a dependency on max_frame_size */
3146 hw->max_frame_size = max_frame;
3147 if (netif_running(netdev))
3148 e1000_down(adapter);
3149
3524 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3150 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3525 * means we reserve 2 more, this pushes us to allocate from the next 3151 * means we reserve 2 more, this pushes us to allocate from the next
3526 * larger slab size. 3152 * larger slab size.
@@ -3549,11 +3175,16 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3549 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3175 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3550 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3176 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3551 3177
3178 printk(KERN_INFO "e1000: %s changing MTU from %d to %d\n",
3179 netdev->name, netdev->mtu, new_mtu);
3552 netdev->mtu = new_mtu; 3180 netdev->mtu = new_mtu;
3553 hw->max_frame_size = max_frame;
3554 3181
3555 if (netif_running(netdev)) 3182 if (netif_running(netdev))
3556 e1000_reinit_locked(adapter); 3183 e1000_up(adapter);
3184 else
3185 e1000_reset(adapter);
3186
3187 clear_bit(__E1000_RESETTING, &adapter->flags);
3557 3188
3558 return 0; 3189 return 0;
3559} 3190}
@@ -3596,14 +3227,12 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3596 adapter->stats.mprc += er32(MPRC); 3227 adapter->stats.mprc += er32(MPRC);
3597 adapter->stats.roc += er32(ROC); 3228 adapter->stats.roc += er32(ROC);
3598 3229
3599 if (hw->mac_type != e1000_ich8lan) { 3230 adapter->stats.prc64 += er32(PRC64);
3600 adapter->stats.prc64 += er32(PRC64); 3231 adapter->stats.prc127 += er32(PRC127);
3601 adapter->stats.prc127 += er32(PRC127); 3232 adapter->stats.prc255 += er32(PRC255);
3602 adapter->stats.prc255 += er32(PRC255); 3233 adapter->stats.prc511 += er32(PRC511);
3603 adapter->stats.prc511 += er32(PRC511); 3234 adapter->stats.prc1023 += er32(PRC1023);
3604 adapter->stats.prc1023 += er32(PRC1023); 3235 adapter->stats.prc1522 += er32(PRC1522);
3605 adapter->stats.prc1522 += er32(PRC1522);
3606 }
3607 3236
3608 adapter->stats.symerrs += er32(SYMERRS); 3237 adapter->stats.symerrs += er32(SYMERRS);
3609 adapter->stats.mpc += er32(MPC); 3238 adapter->stats.mpc += er32(MPC);
@@ -3632,14 +3261,12 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3632 adapter->stats.toth += er32(TOTH); 3261 adapter->stats.toth += er32(TOTH);
3633 adapter->stats.tpr += er32(TPR); 3262 adapter->stats.tpr += er32(TPR);
3634 3263
3635 if (hw->mac_type != e1000_ich8lan) { 3264 adapter->stats.ptc64 += er32(PTC64);
3636 adapter->stats.ptc64 += er32(PTC64); 3265 adapter->stats.ptc127 += er32(PTC127);
3637 adapter->stats.ptc127 += er32(PTC127); 3266 adapter->stats.ptc255 += er32(PTC255);
3638 adapter->stats.ptc255 += er32(PTC255); 3267 adapter->stats.ptc511 += er32(PTC511);
3639 adapter->stats.ptc511 += er32(PTC511); 3268 adapter->stats.ptc1023 += er32(PTC1023);
3640 adapter->stats.ptc1023 += er32(PTC1023); 3269 adapter->stats.ptc1522 += er32(PTC1522);
3641 adapter->stats.ptc1522 += er32(PTC1522);
3642 }
3643 3270
3644 adapter->stats.mptc += er32(MPTC); 3271 adapter->stats.mptc += er32(MPTC);
3645 adapter->stats.bptc += er32(BPTC); 3272 adapter->stats.bptc += er32(BPTC);
@@ -3659,20 +3286,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3659 adapter->stats.tsctc += er32(TSCTC); 3286 adapter->stats.tsctc += er32(TSCTC);
3660 adapter->stats.tsctfc += er32(TSCTFC); 3287 adapter->stats.tsctfc += er32(TSCTFC);
3661 } 3288 }
3662 if (hw->mac_type > e1000_82547_rev_2) {
3663 adapter->stats.iac += er32(IAC);
3664 adapter->stats.icrxoc += er32(ICRXOC);
3665
3666 if (hw->mac_type != e1000_ich8lan) {
3667 adapter->stats.icrxptc += er32(ICRXPTC);
3668 adapter->stats.icrxatc += er32(ICRXATC);
3669 adapter->stats.ictxptc += er32(ICTXPTC);
3670 adapter->stats.ictxatc += er32(ICTXATC);
3671 adapter->stats.ictxqec += er32(ICTXQEC);
3672 adapter->stats.ictxqmtc += er32(ICTXQMTC);
3673 adapter->stats.icrxdmtc += er32(ICRXDMTC);
3674 }
3675 }
3676 3289
3677 /* Fill out the OS statistics structure */ 3290 /* Fill out the OS statistics structure */
3678 adapter->net_stats.multicast = adapter->stats.mprc; 3291 adapter->net_stats.multicast = adapter->stats.mprc;
@@ -3731,49 +3344,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3731} 3344}
3732 3345
3733/** 3346/**
3734 * e1000_intr_msi - Interrupt Handler
3735 * @irq: interrupt number
3736 * @data: pointer to a network interface device structure
3737 **/
3738
3739static irqreturn_t e1000_intr_msi(int irq, void *data)
3740{
3741 struct net_device *netdev = data;
3742 struct e1000_adapter *adapter = netdev_priv(netdev);
3743 struct e1000_hw *hw = &adapter->hw;
3744 u32 icr = er32(ICR);
3745
3746 /* in NAPI mode read ICR disables interrupts using IAM */
3747
3748 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3749 hw->get_link_status = 1;
3750 /* 80003ES2LAN workaround-- For packet buffer work-around on
3751 * link down event; disable receives here in the ISR and reset
3752 * adapter in watchdog */
3753 if (netif_carrier_ok(netdev) &&
3754 (hw->mac_type == e1000_80003es2lan)) {
3755 /* disable receives */
3756 u32 rctl = er32(RCTL);
3757 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3758 }
3759 /* guard against interrupt when we're going down */
3760 if (!test_bit(__E1000_DOWN, &adapter->flags))
3761 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3762 }
3763
3764 if (likely(napi_schedule_prep(&adapter->napi))) {
3765 adapter->total_tx_bytes = 0;
3766 adapter->total_tx_packets = 0;
3767 adapter->total_rx_bytes = 0;
3768 adapter->total_rx_packets = 0;
3769 __napi_schedule(&adapter->napi);
3770 } else
3771 e1000_irq_enable(adapter);
3772
3773 return IRQ_HANDLED;
3774}
3775
3776/**
3777 * e1000_intr - Interrupt Handler 3347 * e1000_intr - Interrupt Handler
3778 * @irq: interrupt number 3348 * @irq: interrupt number
3779 * @data: pointer to a network interface device structure 3349 * @data: pointer to a network interface device structure
@@ -3784,43 +3354,22 @@ static irqreturn_t e1000_intr(int irq, void *data)
3784 struct net_device *netdev = data; 3354 struct net_device *netdev = data;
3785 struct e1000_adapter *adapter = netdev_priv(netdev); 3355 struct e1000_adapter *adapter = netdev_priv(netdev);
3786 struct e1000_hw *hw = &adapter->hw; 3356 struct e1000_hw *hw = &adapter->hw;
3787 u32 rctl, icr = er32(ICR); 3357 u32 icr = er32(ICR);
3788 3358
3789 if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags))) 3359 if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags)))
3790 return IRQ_NONE; /* Not our interrupt */ 3360 return IRQ_NONE; /* Not our interrupt */
3791 3361
3792 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3793 * not set, then the adapter didn't send an interrupt */
3794 if (unlikely(hw->mac_type >= e1000_82571 &&
3795 !(icr & E1000_ICR_INT_ASSERTED)))
3796 return IRQ_NONE;
3797
3798 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
3799 * need for the IMC write */
3800
3801 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3362 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3802 hw->get_link_status = 1; 3363 hw->get_link_status = 1;
3803 /* 80003ES2LAN workaround--
3804 * For packet buffer work-around on link down event;
3805 * disable receives here in the ISR and
3806 * reset adapter in watchdog
3807 */
3808 if (netif_carrier_ok(netdev) &&
3809 (hw->mac_type == e1000_80003es2lan)) {
3810 /* disable receives */
3811 rctl = er32(RCTL);
3812 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3813 }
3814 /* guard against interrupt when we're going down */ 3364 /* guard against interrupt when we're going down */
3815 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3365 if (!test_bit(__E1000_DOWN, &adapter->flags))
3816 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3366 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3817 } 3367 }
3818 3368
3819 if (unlikely(hw->mac_type < e1000_82571)) { 3369 /* disable interrupts, without the synchronize_irq bit */
3820 /* disable interrupts, without the synchronize_irq bit */ 3370 ew32(IMC, ~0);
3821 ew32(IMC, ~0); 3371 E1000_WRITE_FLUSH();
3822 E1000_WRITE_FLUSH(); 3372
3823 }
3824 if (likely(napi_schedule_prep(&adapter->napi))) { 3373 if (likely(napi_schedule_prep(&adapter->napi))) {
3825 adapter->total_tx_bytes = 0; 3374 adapter->total_tx_bytes = 0;
3826 adapter->total_tx_packets = 0; 3375 adapter->total_tx_packets = 0;
@@ -3844,17 +3393,13 @@ static irqreturn_t e1000_intr(int irq, void *data)
3844static int e1000_clean(struct napi_struct *napi, int budget) 3393static int e1000_clean(struct napi_struct *napi, int budget)
3845{ 3394{
3846 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 3395 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
3847 struct net_device *poll_dev = adapter->netdev; 3396 int tx_clean_complete = 0, work_done = 0;
3848 int tx_cleaned = 0, work_done = 0;
3849
3850 adapter = netdev_priv(poll_dev);
3851 3397
3852 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); 3398 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3853 3399
3854 adapter->clean_rx(adapter, &adapter->rx_ring[0], 3400 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3855 &work_done, budget);
3856 3401
3857 if (!tx_cleaned) 3402 if (!tx_clean_complete)
3858 work_done = budget; 3403 work_done = budget;
3859 3404
3860 /* If budget not fully consumed, exit the polling mode */ 3405 /* If budget not fully consumed, exit the polling mode */
@@ -3925,7 +3470,9 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3925 * sees the new next_to_clean. 3470 * sees the new next_to_clean.
3926 */ 3471 */
3927 smp_mb(); 3472 smp_mb();
3928 if (netif_queue_stopped(netdev)) { 3473
3474 if (netif_queue_stopped(netdev) &&
3475 !(test_bit(__E1000_DOWN, &adapter->flags))) {
3929 netif_wake_queue(netdev); 3476 netif_wake_queue(netdev);
3930 ++adapter->restart_queue; 3477 ++adapter->restart_queue;
3931 } 3478 }
@@ -3935,8 +3482,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3935 /* Detect a transmit hang in hardware, this serializes the 3482 /* Detect a transmit hang in hardware, this serializes the
3936 * check with the clearing of time_stamp and movement of i */ 3483 * check with the clearing of time_stamp and movement of i */
3937 adapter->detect_tx_hung = false; 3484 adapter->detect_tx_hung = false;
3938 if (tx_ring->buffer_info[i].time_stamp && 3485 if (tx_ring->buffer_info[eop].time_stamp &&
3939 time_after(jiffies, tx_ring->buffer_info[i].time_stamp + 3486 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3940 (adapter->tx_timeout_factor * HZ)) 3487 (adapter->tx_timeout_factor * HZ))
3941 && !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3488 && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3942 3489
@@ -3958,7 +3505,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3958 readl(hw->hw_addr + tx_ring->tdt), 3505 readl(hw->hw_addr + tx_ring->tdt),
3959 tx_ring->next_to_use, 3506 tx_ring->next_to_use,
3960 tx_ring->next_to_clean, 3507 tx_ring->next_to_clean,
3961 tx_ring->buffer_info[i].time_stamp, 3508 tx_ring->buffer_info[eop].time_stamp,
3962 eop, 3509 eop,
3963 jiffies, 3510 jiffies,
3964 eop_desc->upper.fields.status); 3511 eop_desc->upper.fields.status);
@@ -3999,25 +3546,13 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3999 return; 3546 return;
4000 } 3547 }
4001 /* TCP/UDP Checksum has not been calculated */ 3548 /* TCP/UDP Checksum has not been calculated */
4002 if (hw->mac_type <= e1000_82547_rev_2) { 3549 if (!(status & E1000_RXD_STAT_TCPCS))
4003 if (!(status & E1000_RXD_STAT_TCPCS)) 3550 return;
4004 return; 3551
4005 } else {
4006 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
4007 return;
4008 }
4009 /* It must be a TCP or UDP packet with a valid checksum */ 3552 /* It must be a TCP or UDP packet with a valid checksum */
4010 if (likely(status & E1000_RXD_STAT_TCPCS)) { 3553 if (likely(status & E1000_RXD_STAT_TCPCS)) {
4011 /* TCP checksum is good */ 3554 /* TCP checksum is good */
4012 skb->ip_summed = CHECKSUM_UNNECESSARY; 3555 skb->ip_summed = CHECKSUM_UNNECESSARY;
4013 } else if (hw->mac_type > e1000_82547_rev_2) {
4014 /* IP fragment with UDP payload */
4015 /* Hardware complements the payload checksum, so we undo it
4016 * and then put the value in host order for further stack use.
4017 */
4018 __sum16 sum = (__force __sum16)htons(csum);
4019 skb->csum = csum_unfold(~sum);
4020 skb->ip_summed = CHECKSUM_COMPLETE;
4021 } 3556 }
4022 adapter->hw_csum_good++; 3557 adapter->hw_csum_good++;
4023} 3558}
@@ -4814,20 +4349,6 @@ void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4814 pcix_set_mmrbc(adapter->pdev, mmrbc); 4349 pcix_set_mmrbc(adapter->pdev, mmrbc);
4815} 4350}
4816 4351
4817s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
4818{
4819 struct e1000_adapter *adapter = hw->back;
4820 u16 cap_offset;
4821
4822 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
4823 if (!cap_offset)
4824 return -E1000_ERR_CONFIG;
4825
4826 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
4827
4828 return E1000_SUCCESS;
4829}
4830
4831void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) 4352void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4832{ 4353{
4833 outl(value, port); 4354 outl(value, port);
@@ -4850,33 +4371,27 @@ static void e1000_vlan_rx_register(struct net_device *netdev,
4850 ctrl |= E1000_CTRL_VME; 4371 ctrl |= E1000_CTRL_VME;
4851 ew32(CTRL, ctrl); 4372 ew32(CTRL, ctrl);
4852 4373
4853 if (adapter->hw.mac_type != e1000_ich8lan) { 4374 /* enable VLAN receive filtering */
4854 /* enable VLAN receive filtering */ 4375 rctl = er32(RCTL);
4855 rctl = er32(RCTL); 4376 rctl &= ~E1000_RCTL_CFIEN;
4856 rctl &= ~E1000_RCTL_CFIEN; 4377 if (!(netdev->flags & IFF_PROMISC))
4857 if (!(netdev->flags & IFF_PROMISC)) 4378 rctl |= E1000_RCTL_VFE;
4858 rctl |= E1000_RCTL_VFE; 4379 ew32(RCTL, rctl);
4859 ew32(RCTL, rctl); 4380 e1000_update_mng_vlan(adapter);
4860 e1000_update_mng_vlan(adapter);
4861 }
4862 } else { 4381 } else {
4863 /* disable VLAN tag insert/strip */ 4382 /* disable VLAN tag insert/strip */
4864 ctrl = er32(CTRL); 4383 ctrl = er32(CTRL);
4865 ctrl &= ~E1000_CTRL_VME; 4384 ctrl &= ~E1000_CTRL_VME;
4866 ew32(CTRL, ctrl); 4385 ew32(CTRL, ctrl);
4867 4386
4868 if (adapter->hw.mac_type != e1000_ich8lan) { 4387 /* disable VLAN receive filtering */
4869 /* disable VLAN receive filtering */ 4388 rctl = er32(RCTL);
4870 rctl = er32(RCTL); 4389 rctl &= ~E1000_RCTL_VFE;
4871 rctl &= ~E1000_RCTL_VFE; 4390 ew32(RCTL, rctl);
4872 ew32(RCTL, rctl);
4873 4391
4874 if (adapter->mng_vlan_id != 4392 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
4875 (u16)E1000_MNG_VLAN_NONE) { 4393 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4876 e1000_vlan_rx_kill_vid(netdev, 4394 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4877 adapter->mng_vlan_id);
4878 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4879 }
4880 } 4395 }
4881 } 4396 }
4882 4397
@@ -4913,14 +4428,6 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4913 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4428 if (!test_bit(__E1000_DOWN, &adapter->flags))
4914 e1000_irq_enable(adapter); 4429 e1000_irq_enable(adapter);
4915 4430
4916 if ((hw->mng_cookie.status &
4917 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4918 (vid == adapter->mng_vlan_id)) {
4919 /* release control to f/w */
4920 e1000_release_hw_control(adapter);
4921 return;
4922 }
4923
4924 /* remove VID from filter table */ 4431 /* remove VID from filter table */
4925 index = (vid >> 5) & 0x7F; 4432 index = (vid >> 5) & 0x7F;
4926 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4433 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
@@ -5031,16 +4538,13 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5031 } 4538 }
5032 4539
5033 if (hw->media_type == e1000_media_type_fiber || 4540 if (hw->media_type == e1000_media_type_fiber ||
5034 hw->media_type == e1000_media_type_internal_serdes) { 4541 hw->media_type == e1000_media_type_internal_serdes) {
5035 /* keep the laser running in D3 */ 4542 /* keep the laser running in D3 */
5036 ctrl_ext = er32(CTRL_EXT); 4543 ctrl_ext = er32(CTRL_EXT);
5037 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 4544 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5038 ew32(CTRL_EXT, ctrl_ext); 4545 ew32(CTRL_EXT, ctrl_ext);
5039 } 4546 }
5040 4547
5041 /* Allow time for pending master requests to run */
5042 e1000_disable_pciex_master(hw);
5043
5044 ew32(WUC, E1000_WUC_PME_EN); 4548 ew32(WUC, E1000_WUC_PME_EN);
5045 ew32(WUFC, wufc); 4549 ew32(WUFC, wufc);
5046 } else { 4550 } else {
@@ -5056,16 +4560,9 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5056 if (adapter->en_mng_pt) 4560 if (adapter->en_mng_pt)
5057 *enable_wake = true; 4561 *enable_wake = true;
5058 4562
5059 if (hw->phy_type == e1000_phy_igp_3)
5060 e1000_phy_powerdown_workaround(hw);
5061
5062 if (netif_running(netdev)) 4563 if (netif_running(netdev))
5063 e1000_free_irq(adapter); 4564 e1000_free_irq(adapter);
5064 4565
5065 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5066 * would have already happened in close and is redundant. */
5067 e1000_release_hw_control(adapter);
5068
5069 pci_disable_device(pdev); 4566 pci_disable_device(pdev);
5070 4567
5071 return 0; 4568 return 0;
@@ -5131,14 +4628,6 @@ static int e1000_resume(struct pci_dev *pdev)
5131 4628
5132 netif_device_attach(netdev); 4629 netif_device_attach(netdev);
5133 4630
5134 /* If the controller is 82573 and f/w is AMT, do not set
5135 * DRV_LOAD until the interface is up. For all other cases,
5136 * let the f/w know that the h/w is now under the control
5137 * of the driver. */
5138 if (hw->mac_type != e1000_82573 ||
5139 !e1000_check_mng_mode(hw))
5140 e1000_get_hw_control(adapter);
5141
5142 return 0; 4631 return 0;
5143} 4632}
5144#endif 4633#endif
@@ -5174,7 +4663,7 @@ static void e1000_netpoll(struct net_device *netdev)
5174/** 4663/**
5175 * e1000_io_error_detected - called when PCI error is detected 4664 * e1000_io_error_detected - called when PCI error is detected
5176 * @pdev: Pointer to PCI device 4665 * @pdev: Pointer to PCI device
5177 * @state: The current pci conneection state 4666 * @state: The current pci connection state
5178 * 4667 *
5179 * This function is called after a PCI bus error affecting 4668 * This function is called after a PCI bus error affecting
5180 * this device has been detected. 4669 * this device has been detected.
@@ -5243,7 +4732,6 @@ static void e1000_io_resume(struct pci_dev *pdev)
5243{ 4732{
5244 struct net_device *netdev = pci_get_drvdata(pdev); 4733 struct net_device *netdev = pci_get_drvdata(pdev);
5245 struct e1000_adapter *adapter = netdev_priv(netdev); 4734 struct e1000_adapter *adapter = netdev_priv(netdev);
5246 struct e1000_hw *hw = &adapter->hw;
5247 4735
5248 e1000_init_manageability(adapter); 4736 e1000_init_manageability(adapter);
5249 4737
@@ -5255,15 +4743,6 @@ static void e1000_io_resume(struct pci_dev *pdev)
5255 } 4743 }
5256 4744
5257 netif_device_attach(netdev); 4745 netif_device_attach(netdev);
5258
5259 /* If the controller is 82573 and f/w is AMT, do not set
5260 * DRV_LOAD until the interface is up. For all other cases,
5261 * let the f/w know that the h/w is now under the control
5262 * of the driver. */
5263 if (hw->mac_type != e1000_82573 ||
5264 !e1000_check_mng_mode(hw))
5265 e1000_get_hw_control(adapter);
5266
5267} 4746}
5268 4747
5269/* e1000_main.c */ 4748/* e1000_main.c */
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 213437d13154..38d2741ccae9 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -518,22 +518,6 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
518 adapter->smart_power_down = opt.def; 518 adapter->smart_power_down = opt.def;
519 } 519 }
520 } 520 }
521 { /* Kumeran Lock Loss Workaround */
522 opt = (struct e1000_option) {
523 .type = enable_option,
524 .name = "Kumeran Lock Loss Workaround",
525 .err = "defaulting to Enabled",
526 .def = OPTION_ENABLED
527 };
528
529 if (num_KumeranLockLoss > bd) {
530 unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
531 e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
532 adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss;
533 } else {
534 adapter->hw.kmrn_lock_loss_workaround_disabled = !opt.def;
535 }
536 }
537 521
538 switch (adapter->hw.media_type) { 522 switch (adapter->hw.media_type) {
539 case e1000_media_type_fiber: 523 case e1000_media_type_fiber:
@@ -626,12 +610,6 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
626 .p = dplx_list }} 610 .p = dplx_list }}
627 }; 611 };
628 612
629 if (e1000_check_phy_reset_block(&adapter->hw)) {
630 DPRINTK(PROBE, INFO,
631 "Link active due to SoL/IDER Session. "
632 "Speed/Duplex/AutoNeg parameter ignored.\n");
633 return;
634 }
635 if (num_Duplex > bd) { 613 if (num_Duplex > bd) {
636 dplx = Duplex[bd]; 614 dplx = Duplex[bd];
637 e1000_validate_option(&dplx, &opt, adapter); 615 e1000_validate_option(&dplx, &opt, adapter);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index b53b40ba88a8..d1e0563a67df 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1803,7 +1803,7 @@ struct e1000_info e1000_82574_info = {
1803 | FLAG_HAS_AMT 1803 | FLAG_HAS_AMT
1804 | FLAG_HAS_CTRLEXT_ON_LOAD, 1804 | FLAG_HAS_CTRLEXT_ON_LOAD,
1805 .pba = 20, 1805 .pba = 20,
1806 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, 1806 .max_hw_frame_size = DEFAULT_JUMBO,
1807 .get_variants = e1000_get_variants_82571, 1807 .get_variants = e1000_get_variants_82571,
1808 .mac_ops = &e82571_mac_ops, 1808 .mac_ops = &e82571_mac_ops,
1809 .phy_ops = &e82_phy_ops_bm, 1809 .phy_ops = &e82_phy_ops_bm,
@@ -1820,7 +1820,7 @@ struct e1000_info e1000_82583_info = {
1820 | FLAG_HAS_AMT 1820 | FLAG_HAS_AMT
1821 | FLAG_HAS_CTRLEXT_ON_LOAD, 1821 | FLAG_HAS_CTRLEXT_ON_LOAD,
1822 .pba = 20, 1822 .pba = 20,
1823 .max_hw_frame_size = DEFAULT_JUMBO, 1823 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
1824 .get_variants = e1000_get_variants_82571, 1824 .get_variants = e1000_get_variants_82571,
1825 .mac_ops = &e82571_mac_ops, 1825 .mac_ops = &e82571_mac_ops,
1826 .phy_ops = &e82_phy_ops_bm, 1826 .phy_ops = &e82_phy_ops_bm,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 981936c1fb46..405a144ebcad 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -519,9 +519,13 @@ extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
519extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); 519extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
520extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); 520extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
521extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); 521extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
522extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
523 u16 *data);
522extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); 524extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
523extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); 525extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
524extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); 526extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
527extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
528 u16 data);
525extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw); 529extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
526extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); 530extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
527extern s32 e1000e_get_cfg_done(struct e1000_hw *hw); 531extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
@@ -538,7 +542,11 @@ extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
538extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); 542extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
539extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); 543extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
540extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); 544extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
545extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
546 u16 data);
541extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); 547extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
548extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
549 u16 *data);
542extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, 550extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
543 u32 usec_interval, bool *success); 551 u32 usec_interval, bool *success);
544extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); 552extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
@@ -546,7 +554,11 @@ extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
546extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); 554extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
547extern s32 e1000e_check_downshift(struct e1000_hw *hw); 555extern s32 e1000e_check_downshift(struct e1000_hw *hw);
548extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); 556extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
557extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
558 u16 *data);
549extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); 559extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
560extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
561 u16 data);
550extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow); 562extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
551extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); 563extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
552extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); 564extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index fd44d9f90769..7b05cf47f7f5 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -764,11 +764,13 @@ struct e1000_phy_operations {
764 s32 (*get_cable_length)(struct e1000_hw *); 764 s32 (*get_cable_length)(struct e1000_hw *);
765 s32 (*get_phy_info)(struct e1000_hw *); 765 s32 (*get_phy_info)(struct e1000_hw *);
766 s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *); 766 s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
767 s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
767 void (*release_phy)(struct e1000_hw *); 768 void (*release_phy)(struct e1000_hw *);
768 s32 (*reset_phy)(struct e1000_hw *); 769 s32 (*reset_phy)(struct e1000_hw *);
769 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); 770 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
770 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); 771 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
771 s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); 772 s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
773 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
772 s32 (*cfg_on_link_up)(struct e1000_hw *); 774 s32 (*cfg_on_link_up)(struct e1000_hw *);
773}; 775};
774 776
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 99df2abf82a9..b6388b9535fd 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -122,6 +122,13 @@
122 122
123#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ 123#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
124 124
125#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
126
127/* OEM Bits Phy Register */
128#define HV_OEM_BITS PHY_REG(768, 25)
129#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
130#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
131
125/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ 132/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
126/* Offset 04h HSFSTS */ 133/* Offset 04h HSFSTS */
127union ich8_hws_flash_status { 134union ich8_hws_flash_status {
@@ -200,6 +207,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
200static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); 207static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
201static s32 e1000_led_on_pchlan(struct e1000_hw *hw); 208static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
202static s32 e1000_led_off_pchlan(struct e1000_hw *hw); 209static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
210static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
203 211
204static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 212static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
205{ 213{
@@ -242,7 +250,11 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
242 250
243 phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan; 251 phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
244 phy->ops.read_phy_reg = e1000_read_phy_reg_hv; 252 phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
253 phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
254 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
255 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
245 phy->ops.write_phy_reg = e1000_write_phy_reg_hv; 256 phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
257 phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
246 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 258 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
247 259
248 phy->id = e1000_phy_unknown; 260 phy->id = e1000_phy_unknown;
@@ -303,6 +315,8 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
303 case IGP03E1000_E_PHY_ID: 315 case IGP03E1000_E_PHY_ID:
304 phy->type = e1000_phy_igp_3; 316 phy->type = e1000_phy_igp_3;
305 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 317 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
318 phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
319 phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
306 break; 320 break;
307 case IFE_E_PHY_ID: 321 case IFE_E_PHY_ID:
308 case IFE_PLUS_E_PHY_ID: 322 case IFE_PLUS_E_PHY_ID:
@@ -568,12 +582,39 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
568static DEFINE_MUTEX(nvm_mutex); 582static DEFINE_MUTEX(nvm_mutex);
569 583
570/** 584/**
585 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
586 * @hw: pointer to the HW structure
587 *
588 * Acquires the mutex for performing NVM operations.
589 **/
590static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
591{
592 mutex_lock(&nvm_mutex);
593
594 return 0;
595}
596
597/**
598 * e1000_release_nvm_ich8lan - Release NVM mutex
599 * @hw: pointer to the HW structure
600 *
601 * Releases the mutex used while performing NVM operations.
602 **/
603static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
604{
605 mutex_unlock(&nvm_mutex);
606
607 return;
608}
609
610static DEFINE_MUTEX(swflag_mutex);
611
612/**
571 * e1000_acquire_swflag_ich8lan - Acquire software control flag 613 * e1000_acquire_swflag_ich8lan - Acquire software control flag
572 * @hw: pointer to the HW structure 614 * @hw: pointer to the HW structure
573 * 615 *
574 * Acquires the software control flag for performing NVM and PHY 616 * Acquires the software control flag for performing PHY and select
575 * operations. This is a function pointer entry point only called by 617 * MAC CSR accesses.
576 * read/write routines for the PHY and NVM parts.
577 **/ 618 **/
578static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) 619static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
579{ 620{
@@ -582,7 +623,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
582 623
583 might_sleep(); 624 might_sleep();
584 625
585 mutex_lock(&nvm_mutex); 626 mutex_lock(&swflag_mutex);
586 627
587 while (timeout) { 628 while (timeout) {
588 extcnf_ctrl = er32(EXTCNF_CTRL); 629 extcnf_ctrl = er32(EXTCNF_CTRL);
@@ -599,7 +640,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
599 goto out; 640 goto out;
600 } 641 }
601 642
602 timeout = PHY_CFG_TIMEOUT * 2; 643 timeout = SW_FLAG_TIMEOUT;
603 644
604 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 645 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
605 ew32(EXTCNF_CTRL, extcnf_ctrl); 646 ew32(EXTCNF_CTRL, extcnf_ctrl);
@@ -623,7 +664,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
623 664
624out: 665out:
625 if (ret_val) 666 if (ret_val)
626 mutex_unlock(&nvm_mutex); 667 mutex_unlock(&swflag_mutex);
627 668
628 return ret_val; 669 return ret_val;
629} 670}
@@ -632,9 +673,8 @@ out:
632 * e1000_release_swflag_ich8lan - Release software control flag 673 * e1000_release_swflag_ich8lan - Release software control flag
633 * @hw: pointer to the HW structure 674 * @hw: pointer to the HW structure
634 * 675 *
635 * Releases the software control flag for performing NVM and PHY operations. 676 * Releases the software control flag for performing PHY and select
636 * This is a function pointer entry point only called by read/write 677 * MAC CSR accesses.
637 * routines for the PHY and NVM parts.
638 **/ 678 **/
639static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) 679static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
640{ 680{
@@ -644,7 +684,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
644 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 684 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
645 ew32(EXTCNF_CTRL, extcnf_ctrl); 685 ew32(EXTCNF_CTRL, extcnf_ctrl);
646 686
647 mutex_unlock(&nvm_mutex); 687 mutex_unlock(&swflag_mutex);
688
689 return;
648} 690}
649 691
650/** 692/**
@@ -844,7 +886,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
844 u32 i; 886 u32 i;
845 u32 data, cnf_size, cnf_base_addr, sw_cfg_mask; 887 u32 data, cnf_size, cnf_base_addr, sw_cfg_mask;
846 s32 ret_val; 888 s32 ret_val;
847 u16 word_addr, reg_data, reg_addr, phy_page = 0; 889 u16 reg, word_addr, reg_data, reg_addr, phy_page = 0;
848 890
849 ret_val = e1000e_phy_hw_reset_generic(hw); 891 ret_val = e1000e_phy_hw_reset_generic(hw);
850 if (ret_val) 892 if (ret_val)
@@ -859,6 +901,10 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
859 return ret_val; 901 return ret_val;
860 } 902 }
861 903
904 /* Dummy read to clear the phy wakeup bit after lcd reset */
905 if (hw->mac.type == e1000_pchlan)
906 e1e_rphy(hw, BM_WUC, &reg);
907
862 /* 908 /*
863 * Initialize the PHY from the NVM on ICH platforms. This 909 * Initialize the PHY from the NVM on ICH platforms. This
864 * is needed due to an issue where the NVM configuration is 910 * is needed due to an issue where the NVM configuration is
@@ -1054,6 +1100,38 @@ static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw)
1054} 1100}
1055 1101
1056/** 1102/**
1103 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
1104 * @hw: pointer to the HW structure
1105 * @active: true to enable LPLU, false to disable
1106 *
1107 * Sets the LPLU state according to the active flag. For PCH, if OEM write
1108 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
1109 * the phy speed. This function will manually set the LPLU bit and restart
1110 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
1111 * since it configures the same bit.
1112 **/
1113static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
1114{
1115 s32 ret_val = 0;
1116 u16 oem_reg;
1117
1118 ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
1119 if (ret_val)
1120 goto out;
1121
1122 if (active)
1123 oem_reg |= HV_OEM_BITS_LPLU;
1124 else
1125 oem_reg &= ~HV_OEM_BITS_LPLU;
1126
1127 oem_reg |= HV_OEM_BITS_RESTART_AN;
1128 ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg);
1129
1130out:
1131 return ret_val;
1132}
1133
1134/**
1057 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state 1135 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
1058 * @hw: pointer to the HW structure 1136 * @hw: pointer to the HW structure
1059 * @active: TRUE to enable LPLU, FALSE to disable 1137 * @active: TRUE to enable LPLU, FALSE to disable
@@ -1314,12 +1392,11 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1314 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 1392 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
1315 (words == 0)) { 1393 (words == 0)) {
1316 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 1394 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1317 return -E1000_ERR_NVM; 1395 ret_val = -E1000_ERR_NVM;
1396 goto out;
1318 } 1397 }
1319 1398
1320 ret_val = e1000_acquire_swflag_ich8lan(hw); 1399 nvm->ops.acquire_nvm(hw);
1321 if (ret_val)
1322 goto out;
1323 1400
1324 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 1401 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
1325 if (ret_val) { 1402 if (ret_val) {
@@ -1345,7 +1422,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1345 } 1422 }
1346 } 1423 }
1347 1424
1348 e1000_release_swflag_ich8lan(hw); 1425 nvm->ops.release_nvm(hw);
1349 1426
1350out: 1427out:
1351 if (ret_val) 1428 if (ret_val)
@@ -1603,11 +1680,15 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1603 return -E1000_ERR_NVM; 1680 return -E1000_ERR_NVM;
1604 } 1681 }
1605 1682
1683 nvm->ops.acquire_nvm(hw);
1684
1606 for (i = 0; i < words; i++) { 1685 for (i = 0; i < words; i++) {
1607 dev_spec->shadow_ram[offset+i].modified = 1; 1686 dev_spec->shadow_ram[offset+i].modified = 1;
1608 dev_spec->shadow_ram[offset+i].value = data[i]; 1687 dev_spec->shadow_ram[offset+i].value = data[i];
1609 } 1688 }
1610 1689
1690 nvm->ops.release_nvm(hw);
1691
1611 return 0; 1692 return 0;
1612} 1693}
1613 1694
@@ -1637,9 +1718,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1637 if (nvm->type != e1000_nvm_flash_sw) 1718 if (nvm->type != e1000_nvm_flash_sw)
1638 goto out; 1719 goto out;
1639 1720
1640 ret_val = e1000_acquire_swflag_ich8lan(hw); 1721 nvm->ops.acquire_nvm(hw);
1641 if (ret_val)
1642 goto out;
1643 1722
1644 /* 1723 /*
1645 * We're writing to the opposite bank so if we're on bank 1, 1724 * We're writing to the opposite bank so if we're on bank 1,
@@ -1657,7 +1736,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1657 old_bank_offset = 0; 1736 old_bank_offset = 0;
1658 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); 1737 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
1659 if (ret_val) { 1738 if (ret_val) {
1660 e1000_release_swflag_ich8lan(hw); 1739 nvm->ops.release_nvm(hw);
1661 goto out; 1740 goto out;
1662 } 1741 }
1663 } else { 1742 } else {
@@ -1665,7 +1744,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1665 new_bank_offset = 0; 1744 new_bank_offset = 0;
1666 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); 1745 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
1667 if (ret_val) { 1746 if (ret_val) {
1668 e1000_release_swflag_ich8lan(hw); 1747 nvm->ops.release_nvm(hw);
1669 goto out; 1748 goto out;
1670 } 1749 }
1671 } 1750 }
@@ -1723,7 +1802,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1723 if (ret_val) { 1802 if (ret_val) {
1724 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ 1803 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
1725 hw_dbg(hw, "Flash commit failed.\n"); 1804 hw_dbg(hw, "Flash commit failed.\n");
1726 e1000_release_swflag_ich8lan(hw); 1805 nvm->ops.release_nvm(hw);
1727 goto out; 1806 goto out;
1728 } 1807 }
1729 1808
@@ -1736,7 +1815,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1736 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 1815 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
1737 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); 1816 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
1738 if (ret_val) { 1817 if (ret_val) {
1739 e1000_release_swflag_ich8lan(hw); 1818 nvm->ops.release_nvm(hw);
1740 goto out; 1819 goto out;
1741 } 1820 }
1742 data &= 0xBFFF; 1821 data &= 0xBFFF;
@@ -1744,7 +1823,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1744 act_offset * 2 + 1, 1823 act_offset * 2 + 1,
1745 (u8)(data >> 8)); 1824 (u8)(data >> 8));
1746 if (ret_val) { 1825 if (ret_val) {
1747 e1000_release_swflag_ich8lan(hw); 1826 nvm->ops.release_nvm(hw);
1748 goto out; 1827 goto out;
1749 } 1828 }
1750 1829
@@ -1757,7 +1836,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1757 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 1836 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
1758 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 1837 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
1759 if (ret_val) { 1838 if (ret_val) {
1760 e1000_release_swflag_ich8lan(hw); 1839 nvm->ops.release_nvm(hw);
1761 goto out; 1840 goto out;
1762 } 1841 }
1763 1842
@@ -1767,7 +1846,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1767 dev_spec->shadow_ram[i].value = 0xFFFF; 1846 dev_spec->shadow_ram[i].value = 0xFFFF;
1768 } 1847 }
1769 1848
1770 e1000_release_swflag_ich8lan(hw); 1849 nvm->ops.release_nvm(hw);
1771 1850
1772 /* 1851 /*
1773 * Reload the EEPROM, or else modifications will not appear 1852 * Reload the EEPROM, or else modifications will not appear
@@ -1831,14 +1910,12 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
1831 **/ 1910 **/
1832void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) 1911void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
1833{ 1912{
1913 struct e1000_nvm_info *nvm = &hw->nvm;
1834 union ich8_flash_protected_range pr0; 1914 union ich8_flash_protected_range pr0;
1835 union ich8_hws_flash_status hsfsts; 1915 union ich8_hws_flash_status hsfsts;
1836 u32 gfpreg; 1916 u32 gfpreg;
1837 s32 ret_val;
1838 1917
1839 ret_val = e1000_acquire_swflag_ich8lan(hw); 1918 nvm->ops.acquire_nvm(hw);
1840 if (ret_val)
1841 return;
1842 1919
1843 gfpreg = er32flash(ICH_FLASH_GFPREG); 1920 gfpreg = er32flash(ICH_FLASH_GFPREG);
1844 1921
@@ -1859,7 +1936,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
1859 hsfsts.hsf_status.flockdn = true; 1936 hsfsts.hsf_status.flockdn = true;
1860 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); 1937 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
1861 1938
1862 e1000_release_swflag_ich8lan(hw); 1939 nvm->ops.release_nvm(hw);
1863} 1940}
1864 1941
1865/** 1942/**
@@ -2229,6 +2306,7 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
2229 **/ 2306 **/
2230static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) 2307static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2231{ 2308{
2309 u16 reg;
2232 u32 ctrl, icr, kab; 2310 u32 ctrl, icr, kab;
2233 s32 ret_val; 2311 s32 ret_val;
2234 2312
@@ -2304,6 +2382,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2304 hw_dbg(hw, "Auto Read Done did not complete\n"); 2382 hw_dbg(hw, "Auto Read Done did not complete\n");
2305 } 2383 }
2306 } 2384 }
2385 /* Dummy read to clear the phy wakeup bit after lcd reset */
2386 if (hw->mac.type == e1000_pchlan)
2387 e1e_rphy(hw, BM_WUC, &reg);
2307 2388
2308 /* 2389 /*
2309 * For PCH, this write will make sure that any noise 2390 * For PCH, this write will make sure that any noise
@@ -2843,9 +2924,8 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
2843 E1000_PHY_CTRL_GBE_DISABLE; 2924 E1000_PHY_CTRL_GBE_DISABLE;
2844 ew32(PHY_CTRL, phy_ctrl); 2925 ew32(PHY_CTRL, phy_ctrl);
2845 2926
2846 /* Workaround SWFLAG unexpectedly set during S0->Sx */
2847 if (hw->mac.type == e1000_pchlan) 2927 if (hw->mac.type == e1000_pchlan)
2848 udelay(500); 2928 e1000_phy_hw_reset_ich8lan(hw);
2849 default: 2929 default:
2850 break; 2930 break;
2851 } 2931 }
@@ -3113,9 +3193,9 @@ static struct e1000_phy_operations ich8_phy_ops = {
3113}; 3193};
3114 3194
3115static struct e1000_nvm_operations ich8_nvm_ops = { 3195static struct e1000_nvm_operations ich8_nvm_ops = {
3116 .acquire_nvm = e1000_acquire_swflag_ich8lan, 3196 .acquire_nvm = e1000_acquire_nvm_ich8lan,
3117 .read_nvm = e1000_read_nvm_ich8lan, 3197 .read_nvm = e1000_read_nvm_ich8lan,
3118 .release_nvm = e1000_release_swflag_ich8lan, 3198 .release_nvm = e1000_release_nvm_ich8lan,
3119 .update_nvm = e1000_update_nvm_checksum_ich8lan, 3199 .update_nvm = e1000_update_nvm_checksum_ich8lan,
3120 .valid_led_default = e1000_valid_led_default_ich8lan, 3200 .valid_led_default = e1000_valid_led_default_ich8lan,
3121 .validate_nvm = e1000_validate_nvm_checksum_ich8lan, 3201 .validate_nvm = e1000_validate_nvm_checksum_ich8lan,
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 16c193a6c95c..0687c6aa4e46 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4982,12 +4982,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4982 goto err_pci_reg; 4982 goto err_pci_reg;
4983 4983
4984 /* AER (Advanced Error Reporting) hooks */ 4984 /* AER (Advanced Error Reporting) hooks */
4985 err = pci_enable_pcie_error_reporting(pdev); 4985 pci_enable_pcie_error_reporting(pdev);
4986 if (err) {
4987 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
4988 "0x%x\n", err);
4989 /* non-fatal, continue */
4990 }
4991 4986
4992 pci_set_master(pdev); 4987 pci_set_master(pdev);
4993 /* PCI config space info */ 4988 /* PCI config space info */
@@ -5263,7 +5258,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5263{ 5258{
5264 struct net_device *netdev = pci_get_drvdata(pdev); 5259 struct net_device *netdev = pci_get_drvdata(pdev);
5265 struct e1000_adapter *adapter = netdev_priv(netdev); 5260 struct e1000_adapter *adapter = netdev_priv(netdev);
5266 int err;
5267 5261
5268 /* 5262 /*
5269 * flush_scheduled work may reschedule our watchdog task, so 5263 * flush_scheduled work may reschedule our watchdog task, so
@@ -5299,10 +5293,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5299 free_netdev(netdev); 5293 free_netdev(netdev);
5300 5294
5301 /* AER disable */ 5295 /* AER disable */
5302 err = pci_disable_pcie_error_reporting(pdev); 5296 pci_disable_pcie_error_reporting(pdev);
5303 if (err)
5304 dev_err(&pdev->dev,
5305 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
5306 5297
5307 pci_disable_device(pdev); 5298 pci_disable_device(pdev);
5308} 5299}
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 994401fd0664..f9d33ab05e97 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -164,16 +164,25 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
164 * MDIC mode. No harm in trying again in this case since 164 * MDIC mode. No harm in trying again in this case since
165 * the PHY ID is unknown at this point anyway 165 * the PHY ID is unknown at this point anyway
166 */ 166 */
167 ret_val = phy->ops.acquire_phy(hw);
168 if (ret_val)
169 goto out;
167 ret_val = e1000_set_mdio_slow_mode_hv(hw, true); 170 ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
168 if (ret_val) 171 if (ret_val)
169 goto out; 172 goto out;
173 phy->ops.release_phy(hw);
170 174
171 retry_count++; 175 retry_count++;
172 } 176 }
173out: 177out:
174 /* Revert to MDIO fast mode, if applicable */ 178 /* Revert to MDIO fast mode, if applicable */
175 if (retry_count) 179 if (retry_count) {
180 ret_val = phy->ops.acquire_phy(hw);
181 if (ret_val)
182 return ret_val;
176 ret_val = e1000_set_mdio_slow_mode_hv(hw, false); 183 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
184 phy->ops.release_phy(hw);
185 }
177 186
178 return ret_val; 187 return ret_val;
179} 188}
@@ -354,94 +363,173 @@ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
354} 363}
355 364
356/** 365/**
357 * e1000e_read_phy_reg_igp - Read igp PHY register 366 * __e1000e_read_phy_reg_igp - Read igp PHY register
358 * @hw: pointer to the HW structure 367 * @hw: pointer to the HW structure
359 * @offset: register offset to be read 368 * @offset: register offset to be read
360 * @data: pointer to the read data 369 * @data: pointer to the read data
370 * @locked: semaphore has already been acquired or not
361 * 371 *
362 * Acquires semaphore, if necessary, then reads the PHY register at offset 372 * Acquires semaphore, if necessary, then reads the PHY register at offset
363 * and storing the retrieved information in data. Release any acquired 373 * and stores the retrieved information in data. Release any acquired
364 * semaphores before exiting. 374 * semaphores before exiting.
365 **/ 375 **/
366s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) 376static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
377 bool locked)
367{ 378{
368 s32 ret_val; 379 s32 ret_val = 0;
369 380
370 ret_val = hw->phy.ops.acquire_phy(hw); 381 if (!locked) {
371 if (ret_val) 382 if (!(hw->phy.ops.acquire_phy))
372 return ret_val; 383 goto out;
384
385 ret_val = hw->phy.ops.acquire_phy(hw);
386 if (ret_val)
387 goto out;
388 }
373 389
374 if (offset > MAX_PHY_MULTI_PAGE_REG) { 390 if (offset > MAX_PHY_MULTI_PAGE_REG) {
375 ret_val = e1000e_write_phy_reg_mdic(hw, 391 ret_val = e1000e_write_phy_reg_mdic(hw,
376 IGP01E1000_PHY_PAGE_SELECT, 392 IGP01E1000_PHY_PAGE_SELECT,
377 (u16)offset); 393 (u16)offset);
378 if (ret_val) { 394 if (ret_val)
379 hw->phy.ops.release_phy(hw); 395 goto release;
380 return ret_val;
381 }
382 } 396 }
383 397
384 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 398 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
385 data); 399 data);
386
387 hw->phy.ops.release_phy(hw);
388 400
401release:
402 if (!locked)
403 hw->phy.ops.release_phy(hw);
404out:
389 return ret_val; 405 return ret_val;
390} 406}
391 407
392/** 408/**
409 * e1000e_read_phy_reg_igp - Read igp PHY register
410 * @hw: pointer to the HW structure
411 * @offset: register offset to be read
412 * @data: pointer to the read data
413 *
414 * Acquires semaphore then reads the PHY register at offset and stores the
415 * retrieved information in data.
416 * Release the acquired semaphore before exiting.
417 **/
418s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
419{
420 return __e1000e_read_phy_reg_igp(hw, offset, data, false);
421}
422
423/**
424 * e1000e_read_phy_reg_igp_locked - Read igp PHY register
425 * @hw: pointer to the HW structure
426 * @offset: register offset to be read
427 * @data: pointer to the read data
428 *
429 * Reads the PHY register at offset and stores the retrieved information
430 * in data. Assumes semaphore already acquired.
431 **/
432s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
433{
434 return __e1000e_read_phy_reg_igp(hw, offset, data, true);
435}
436
437/**
393 * e1000e_write_phy_reg_igp - Write igp PHY register 438 * e1000e_write_phy_reg_igp - Write igp PHY register
394 * @hw: pointer to the HW structure 439 * @hw: pointer to the HW structure
395 * @offset: register offset to write to 440 * @offset: register offset to write to
396 * @data: data to write at register offset 441 * @data: data to write at register offset
442 * @locked: semaphore has already been acquired or not
397 * 443 *
398 * Acquires semaphore, if necessary, then writes the data to PHY register 444 * Acquires semaphore, if necessary, then writes the data to PHY register
399 * at the offset. Release any acquired semaphores before exiting. 445 * at the offset. Release any acquired semaphores before exiting.
400 **/ 446 **/
401s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) 447static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
448 bool locked)
402{ 449{
403 s32 ret_val; 450 s32 ret_val = 0;
404 451
405 ret_val = hw->phy.ops.acquire_phy(hw); 452 if (!locked) {
406 if (ret_val) 453 if (!(hw->phy.ops.acquire_phy))
407 return ret_val; 454 goto out;
455
456 ret_val = hw->phy.ops.acquire_phy(hw);
457 if (ret_val)
458 goto out;
459 }
408 460
409 if (offset > MAX_PHY_MULTI_PAGE_REG) { 461 if (offset > MAX_PHY_MULTI_PAGE_REG) {
410 ret_val = e1000e_write_phy_reg_mdic(hw, 462 ret_val = e1000e_write_phy_reg_mdic(hw,
411 IGP01E1000_PHY_PAGE_SELECT, 463 IGP01E1000_PHY_PAGE_SELECT,
412 (u16)offset); 464 (u16)offset);
413 if (ret_val) { 465 if (ret_val)
414 hw->phy.ops.release_phy(hw); 466 goto release;
415 return ret_val;
416 }
417 } 467 }
418 468
419 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 469 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
420 data); 470 data);
421 471
422 hw->phy.ops.release_phy(hw); 472release:
473 if (!locked)
474 hw->phy.ops.release_phy(hw);
423 475
476out:
424 return ret_val; 477 return ret_val;
425} 478}
426 479
427/** 480/**
428 * e1000e_read_kmrn_reg - Read kumeran register 481 * e1000e_write_phy_reg_igp - Write igp PHY register
482 * @hw: pointer to the HW structure
483 * @offset: register offset to write to
484 * @data: data to write at register offset
485 *
486 * Acquires semaphore then writes the data to PHY register
487 * at the offset. Release any acquired semaphores before exiting.
488 **/
489s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
490{
491 return __e1000e_write_phy_reg_igp(hw, offset, data, false);
492}
493
494/**
495 * e1000e_write_phy_reg_igp_locked - Write igp PHY register
496 * @hw: pointer to the HW structure
497 * @offset: register offset to write to
498 * @data: data to write at register offset
499 *
500 * Writes the data to PHY register at the offset.
501 * Assumes semaphore already acquired.
502 **/
503s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
504{
505 return __e1000e_write_phy_reg_igp(hw, offset, data, true);
506}
507
508/**
509 * __e1000_read_kmrn_reg - Read kumeran register
429 * @hw: pointer to the HW structure 510 * @hw: pointer to the HW structure
430 * @offset: register offset to be read 511 * @offset: register offset to be read
431 * @data: pointer to the read data 512 * @data: pointer to the read data
513 * @locked: semaphore has already been acquired or not
432 * 514 *
433 * Acquires semaphore, if necessary. Then reads the PHY register at offset 515 * Acquires semaphore, if necessary. Then reads the PHY register at offset
434 * using the kumeran interface. The information retrieved is stored in data. 516 * using the kumeran interface. The information retrieved is stored in data.
435 * Release any acquired semaphores before exiting. 517 * Release any acquired semaphores before exiting.
436 **/ 518 **/
437s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) 519static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
520 bool locked)
438{ 521{
439 u32 kmrnctrlsta; 522 u32 kmrnctrlsta;
440 s32 ret_val; 523 s32 ret_val = 0;
441 524
442 ret_val = hw->phy.ops.acquire_phy(hw); 525 if (!locked) {
443 if (ret_val) 526 if (!(hw->phy.ops.acquire_phy))
444 return ret_val; 527 goto out;
528
529 ret_val = hw->phy.ops.acquire_phy(hw);
530 if (ret_val)
531 goto out;
532 }
445 533
446 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & 534 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
447 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; 535 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
@@ -452,41 +540,111 @@ s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
452 kmrnctrlsta = er32(KMRNCTRLSTA); 540 kmrnctrlsta = er32(KMRNCTRLSTA);
453 *data = (u16)kmrnctrlsta; 541 *data = (u16)kmrnctrlsta;
454 542
455 hw->phy.ops.release_phy(hw); 543 if (!locked)
544 hw->phy.ops.release_phy(hw);
456 545
546out:
457 return ret_val; 547 return ret_val;
458} 548}
459 549
460/** 550/**
461 * e1000e_write_kmrn_reg - Write kumeran register 551 * e1000e_read_kmrn_reg - Read kumeran register
552 * @hw: pointer to the HW structure
553 * @offset: register offset to be read
554 * @data: pointer to the read data
555 *
556 * Acquires semaphore then reads the PHY register at offset using the
557 * kumeran interface. The information retrieved is stored in data.
558 * Release the acquired semaphore before exiting.
559 **/
560s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
561{
562 return __e1000_read_kmrn_reg(hw, offset, data, false);
563}
564
565/**
566 * e1000_read_kmrn_reg_locked - Read kumeran register
567 * @hw: pointer to the HW structure
568 * @offset: register offset to be read
569 * @data: pointer to the read data
570 *
571 * Reads the PHY register at offset using the kumeran interface. The
572 * information retrieved is stored in data.
573 * Assumes semaphore already acquired.
574 **/
575s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
576{
577 return __e1000_read_kmrn_reg(hw, offset, data, true);
578}
579
580/**
581 * __e1000_write_kmrn_reg - Write kumeran register
462 * @hw: pointer to the HW structure 582 * @hw: pointer to the HW structure
463 * @offset: register offset to write to 583 * @offset: register offset to write to
464 * @data: data to write at register offset 584 * @data: data to write at register offset
585 * @locked: semaphore has already been acquired or not
465 * 586 *
466 * Acquires semaphore, if necessary. Then write the data to PHY register 587 * Acquires semaphore, if necessary. Then write the data to PHY register
467 * at the offset using the kumeran interface. Release any acquired semaphores 588 * at the offset using the kumeran interface. Release any acquired semaphores
468 * before exiting. 589 * before exiting.
469 **/ 590 **/
470s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) 591static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
592 bool locked)
471{ 593{
472 u32 kmrnctrlsta; 594 u32 kmrnctrlsta;
473 s32 ret_val; 595 s32 ret_val = 0;
474 596
475 ret_val = hw->phy.ops.acquire_phy(hw); 597 if (!locked) {
476 if (ret_val) 598 if (!(hw->phy.ops.acquire_phy))
477 return ret_val; 599 goto out;
600
601 ret_val = hw->phy.ops.acquire_phy(hw);
602 if (ret_val)
603 goto out;
604 }
478 605
479 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & 606 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
480 E1000_KMRNCTRLSTA_OFFSET) | data; 607 E1000_KMRNCTRLSTA_OFFSET) | data;
481 ew32(KMRNCTRLSTA, kmrnctrlsta); 608 ew32(KMRNCTRLSTA, kmrnctrlsta);
482 609
483 udelay(2); 610 udelay(2);
484 hw->phy.ops.release_phy(hw);
485 611
612 if (!locked)
613 hw->phy.ops.release_phy(hw);
614
615out:
486 return ret_val; 616 return ret_val;
487} 617}
488 618
489/** 619/**
620 * e1000e_write_kmrn_reg - Write kumeran register
621 * @hw: pointer to the HW structure
622 * @offset: register offset to write to
623 * @data: data to write at register offset
624 *
625 * Acquires semaphore then writes the data to the PHY register at the offset
626 * using the kumeran interface. Release the acquired semaphore before exiting.
627 **/
628s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
629{
630 return __e1000_write_kmrn_reg(hw, offset, data, false);
631}
632
633/**
634 * e1000_write_kmrn_reg_locked - Write kumeran register
635 * @hw: pointer to the HW structure
636 * @offset: register offset to write to
637 * @data: data to write at register offset
638 *
639 * Write the data to PHY register at the offset using the kumeran interface.
640 * Assumes semaphore already acquired.
641 **/
642s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
643{
644 return __e1000_write_kmrn_reg(hw, offset, data, true);
645}
646
647/**
490 * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link 648 * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
491 * @hw: pointer to the HW structure 649 * @hw: pointer to the HW structure
492 * 650 *
@@ -2105,6 +2263,10 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2105 u32 page = offset >> IGP_PAGE_SHIFT; 2263 u32 page = offset >> IGP_PAGE_SHIFT;
2106 u32 page_shift = 0; 2264 u32 page_shift = 0;
2107 2265
2266 ret_val = hw->phy.ops.acquire_phy(hw);
2267 if (ret_val)
2268 return ret_val;
2269
2108 /* Page 800 works differently than the rest so it has its own func */ 2270 /* Page 800 works differently than the rest so it has its own func */
2109 if (page == BM_WUC_PAGE) { 2271 if (page == BM_WUC_PAGE) {
2110 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, 2272 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
@@ -2112,10 +2274,6 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2112 goto out; 2274 goto out;
2113 } 2275 }
2114 2276
2115 ret_val = hw->phy.ops.acquire_phy(hw);
2116 if (ret_val)
2117 goto out;
2118
2119 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 2277 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2120 2278
2121 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2279 if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2135,18 +2293,15 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2135 /* Page is shifted left, PHY expects (page x 32) */ 2293 /* Page is shifted left, PHY expects (page x 32) */
2136 ret_val = e1000e_write_phy_reg_mdic(hw, page_select, 2294 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
2137 (page << page_shift)); 2295 (page << page_shift));
2138 if (ret_val) { 2296 if (ret_val)
2139 hw->phy.ops.release_phy(hw);
2140 goto out; 2297 goto out;
2141 }
2142 } 2298 }
2143 2299
2144 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2300 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2145 data); 2301 data);
2146 2302
2147 hw->phy.ops.release_phy(hw);
2148
2149out: 2303out:
2304 hw->phy.ops.release_phy(hw);
2150 return ret_val; 2305 return ret_val;
2151} 2306}
2152 2307
@@ -2167,6 +2322,10 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2167 u32 page = offset >> IGP_PAGE_SHIFT; 2322 u32 page = offset >> IGP_PAGE_SHIFT;
2168 u32 page_shift = 0; 2323 u32 page_shift = 0;
2169 2324
2325 ret_val = hw->phy.ops.acquire_phy(hw);
2326 if (ret_val)
2327 return ret_val;
2328
2170 /* Page 800 works differently than the rest so it has its own func */ 2329 /* Page 800 works differently than the rest so it has its own func */
2171 if (page == BM_WUC_PAGE) { 2330 if (page == BM_WUC_PAGE) {
2172 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, 2331 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
@@ -2174,10 +2333,6 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2174 goto out; 2333 goto out;
2175 } 2334 }
2176 2335
2177 ret_val = hw->phy.ops.acquire_phy(hw);
2178 if (ret_val)
2179 goto out;
2180
2181 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 2336 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2182 2337
2183 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2338 if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2197,17 +2352,14 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2197 /* Page is shifted left, PHY expects (page x 32) */ 2352 /* Page is shifted left, PHY expects (page x 32) */
2198 ret_val = e1000e_write_phy_reg_mdic(hw, page_select, 2353 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
2199 (page << page_shift)); 2354 (page << page_shift));
2200 if (ret_val) { 2355 if (ret_val)
2201 hw->phy.ops.release_phy(hw);
2202 goto out; 2356 goto out;
2203 }
2204 } 2357 }
2205 2358
2206 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2359 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2207 data); 2360 data);
2208 hw->phy.ops.release_phy(hw);
2209
2210out: 2361out:
2362 hw->phy.ops.release_phy(hw);
2211 return ret_val; 2363 return ret_val;
2212} 2364}
2213 2365
@@ -2226,17 +2378,17 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
2226 s32 ret_val; 2378 s32 ret_val;
2227 u16 page = (u16)(offset >> IGP_PAGE_SHIFT); 2379 u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
2228 2380
2381 ret_val = hw->phy.ops.acquire_phy(hw);
2382 if (ret_val)
2383 return ret_val;
2384
2229 /* Page 800 works differently than the rest so it has its own func */ 2385 /* Page 800 works differently than the rest so it has its own func */
2230 if (page == BM_WUC_PAGE) { 2386 if (page == BM_WUC_PAGE) {
2231 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, 2387 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
2232 true); 2388 true);
2233 return ret_val; 2389 goto out;
2234 } 2390 }
2235 2391
2236 ret_val = hw->phy.ops.acquire_phy(hw);
2237 if (ret_val)
2238 return ret_val;
2239
2240 hw->phy.addr = 1; 2392 hw->phy.addr = 1;
2241 2393
2242 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2394 if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2245,16 +2397,14 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
2245 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, 2397 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
2246 page); 2398 page);
2247 2399
2248 if (ret_val) { 2400 if (ret_val)
2249 hw->phy.ops.release_phy(hw); 2401 goto out;
2250 return ret_val;
2251 }
2252 } 2402 }
2253 2403
2254 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2404 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2255 data); 2405 data);
2406out:
2256 hw->phy.ops.release_phy(hw); 2407 hw->phy.ops.release_phy(hw);
2257
2258 return ret_val; 2408 return ret_val;
2259} 2409}
2260 2410
@@ -2272,17 +2422,17 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
2272 s32 ret_val; 2422 s32 ret_val;
2273 u16 page = (u16)(offset >> IGP_PAGE_SHIFT); 2423 u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
2274 2424
2425 ret_val = hw->phy.ops.acquire_phy(hw);
2426 if (ret_val)
2427 return ret_val;
2428
2275 /* Page 800 works differently than the rest so it has its own func */ 2429 /* Page 800 works differently than the rest so it has its own func */
2276 if (page == BM_WUC_PAGE) { 2430 if (page == BM_WUC_PAGE) {
2277 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, 2431 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
2278 false); 2432 false);
2279 return ret_val; 2433 goto out;
2280 } 2434 }
2281 2435
2282 ret_val = hw->phy.ops.acquire_phy(hw);
2283 if (ret_val)
2284 return ret_val;
2285
2286 hw->phy.addr = 1; 2436 hw->phy.addr = 1;
2287 2437
2288 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2438 if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2290,17 +2440,15 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
2290 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, 2440 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
2291 page); 2441 page);
2292 2442
2293 if (ret_val) { 2443 if (ret_val)
2294 hw->phy.ops.release_phy(hw); 2444 goto out;
2295 return ret_val;
2296 }
2297 } 2445 }
2298 2446
2299 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2447 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2300 data); 2448 data);
2301 2449
2450out:
2302 hw->phy.ops.release_phy(hw); 2451 hw->phy.ops.release_phy(hw);
2303
2304 return ret_val; 2452 return ret_val;
2305} 2453}
2306 2454
@@ -2320,6 +2468,8 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
2320 * 3) Write the address using the address opcode (0x11) 2468 * 3) Write the address using the address opcode (0x11)
2321 * 4) Read or write the data using the data opcode (0x12) 2469 * 4) Read or write the data using the data opcode (0x12)
2322 * 5) Restore 769_17.2 to its original value 2470 * 5) Restore 769_17.2 to its original value
2471 *
2472 * Assumes semaphore already acquired.
2323 **/ 2473 **/
2324static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, 2474static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
2325 u16 *data, bool read) 2475 u16 *data, bool read)
@@ -2327,20 +2477,12 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
2327 s32 ret_val; 2477 s32 ret_val;
2328 u16 reg = BM_PHY_REG_NUM(offset); 2478 u16 reg = BM_PHY_REG_NUM(offset);
2329 u16 phy_reg = 0; 2479 u16 phy_reg = 0;
2330 u8 phy_acquired = 1;
2331
2332 2480
2333 /* Gig must be disabled for MDIO accesses to page 800 */ 2481 /* Gig must be disabled for MDIO accesses to page 800 */
2334 if ((hw->mac.type == e1000_pchlan) && 2482 if ((hw->mac.type == e1000_pchlan) &&
2335 (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) 2483 (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
2336 hw_dbg(hw, "Attempting to access page 800 while gig enabled\n"); 2484 hw_dbg(hw, "Attempting to access page 800 while gig enabled\n");
2337 2485
2338 ret_val = hw->phy.ops.acquire_phy(hw);
2339 if (ret_val) {
2340 phy_acquired = 0;
2341 goto out;
2342 }
2343
2344 /* All operations in this function are phy address 1 */ 2486 /* All operations in this function are phy address 1 */
2345 hw->phy.addr = 1; 2487 hw->phy.addr = 1;
2346 2488
@@ -2397,8 +2539,6 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
2397 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); 2539 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
2398 2540
2399out: 2541out:
2400 if (phy_acquired == 1)
2401 hw->phy.ops.release_phy(hw);
2402 return ret_val; 2542 return ret_val;
2403} 2543}
2404 2544
@@ -2439,52 +2579,63 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
2439 return 0; 2579 return 0;
2440} 2580}
2441 2581
2582/**
2583 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2584 * @hw: pointer to the HW structure
2585 * @slow: true for slow mode, false for normal mode
2586 *
2587 * Assumes semaphore already acquired.
2588 **/
2442s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow) 2589s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
2443{ 2590{
2444 s32 ret_val = 0; 2591 s32 ret_val = 0;
2445 u16 data = 0; 2592 u16 data = 0;
2446 2593
2447 ret_val = hw->phy.ops.acquire_phy(hw);
2448 if (ret_val)
2449 return ret_val;
2450
2451 /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */ 2594 /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
2452 hw->phy.addr = 1; 2595 hw->phy.addr = 1;
2453 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 2596 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
2454 (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); 2597 (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
2455 if (ret_val) { 2598 if (ret_val)
2456 hw->phy.ops.release_phy(hw); 2599 goto out;
2457 return ret_val; 2600
2458 }
2459 ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1, 2601 ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
2460 (0x2180 | (slow << 10))); 2602 (0x2180 | (slow << 10)));
2603 if (ret_val)
2604 goto out;
2461 2605
2462 /* dummy read when reverting to fast mode - throw away result */ 2606 /* dummy read when reverting to fast mode - throw away result */
2463 if (!slow) 2607 if (!slow)
2464 e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data); 2608 ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
2465
2466 hw->phy.ops.release_phy(hw);
2467 2609
2610out:
2468 return ret_val; 2611 return ret_val;
2469} 2612}
2470 2613
2471/** 2614/**
2472 * e1000_read_phy_reg_hv - Read HV PHY register 2615 * __e1000_read_phy_reg_hv - Read HV PHY register
2473 * @hw: pointer to the HW structure 2616 * @hw: pointer to the HW structure
2474 * @offset: register offset to be read 2617 * @offset: register offset to be read
2475 * @data: pointer to the read data 2618 * @data: pointer to the read data
2619 * @locked: semaphore has already been acquired or not
2476 * 2620 *
2477 * Acquires semaphore, if necessary, then reads the PHY register at offset 2621 * Acquires semaphore, if necessary, then reads the PHY register at offset
2478 * and storing the retrieved information in data. Release any acquired 2622 * and stores the retrieved information in data. Release any acquired
2479 * semaphore before exiting. 2623 * semaphore before exiting.
2480 **/ 2624 **/
2481s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) 2625static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2626 bool locked)
2482{ 2627{
2483 s32 ret_val; 2628 s32 ret_val;
2484 u16 page = BM_PHY_REG_PAGE(offset); 2629 u16 page = BM_PHY_REG_PAGE(offset);
2485 u16 reg = BM_PHY_REG_NUM(offset); 2630 u16 reg = BM_PHY_REG_NUM(offset);
2486 bool in_slow_mode = false; 2631 bool in_slow_mode = false;
2487 2632
2633 if (!locked) {
2634 ret_val = hw->phy.ops.acquire_phy(hw);
2635 if (ret_val)
2636 return ret_val;
2637 }
2638
2488 /* Workaround failure in MDIO access while cable is disconnected */ 2639 /* Workaround failure in MDIO access while cable is disconnected */
2489 if ((hw->phy.type == e1000_phy_82577) && 2640 if ((hw->phy.type == e1000_phy_82577) &&
2490 !(er32(STATUS) & E1000_STATUS_LU)) { 2641 !(er32(STATUS) & E1000_STATUS_LU)) {
@@ -2508,10 +2659,6 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
2508 goto out; 2659 goto out;
2509 } 2660 }
2510 2661
2511 ret_val = hw->phy.ops.acquire_phy(hw);
2512 if (ret_val)
2513 goto out;
2514
2515 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); 2662 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
2516 2663
2517 if (page == HV_INTC_FC_PAGE_START) 2664 if (page == HV_INTC_FC_PAGE_START)
@@ -2529,42 +2676,76 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
2529 ret_val = e1000e_write_phy_reg_mdic(hw, 2676 ret_val = e1000e_write_phy_reg_mdic(hw,
2530 IGP01E1000_PHY_PAGE_SELECT, 2677 IGP01E1000_PHY_PAGE_SELECT,
2531 (page << IGP_PAGE_SHIFT)); 2678 (page << IGP_PAGE_SHIFT));
2532 if (ret_val) {
2533 hw->phy.ops.release_phy(hw);
2534 goto out;
2535 }
2536 hw->phy.addr = phy_addr; 2679 hw->phy.addr = phy_addr;
2537 } 2680 }
2538 } 2681 }
2539 2682
2540 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2683 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
2541 data); 2684 data);
2542 hw->phy.ops.release_phy(hw);
2543
2544out: 2685out:
2545 /* Revert to MDIO fast mode, if applicable */ 2686 /* Revert to MDIO fast mode, if applicable */
2546 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) 2687 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
2547 ret_val = e1000_set_mdio_slow_mode_hv(hw, false); 2688 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
2548 2689
2690 if (!locked)
2691 hw->phy.ops.release_phy(hw);
2692
2549 return ret_val; 2693 return ret_val;
2550} 2694}
2551 2695
2552/** 2696/**
2553 * e1000_write_phy_reg_hv - Write HV PHY register 2697 * e1000_read_phy_reg_hv - Read HV PHY register
2698 * @hw: pointer to the HW structure
2699 * @offset: register offset to be read
2700 * @data: pointer to the read data
2701 *
2702 * Acquires semaphore then reads the PHY register at offset and stores
2703 * the retrieved information in data. Release the acquired semaphore
2704 * before exiting.
2705 **/
2706s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
2707{
2708 return __e1000_read_phy_reg_hv(hw, offset, data, false);
2709}
2710
2711/**
2712 * e1000_read_phy_reg_hv_locked - Read HV PHY register
2713 * @hw: pointer to the HW structure
2714 * @offset: register offset to be read
2715 * @data: pointer to the read data
2716 *
2717 * Reads the PHY register at offset and stores the retrieved information
2718 * in data. Assumes semaphore already acquired.
2719 **/
2720s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
2721{
2722 return __e1000_read_phy_reg_hv(hw, offset, data, true);
2723}
2724
2725/**
2726 * __e1000_write_phy_reg_hv - Write HV PHY register
2554 * @hw: pointer to the HW structure 2727 * @hw: pointer to the HW structure
2555 * @offset: register offset to write to 2728 * @offset: register offset to write to
2556 * @data: data to write at register offset 2729 * @data: data to write at register offset
2730 * @locked: semaphore has already been acquired or not
2557 * 2731 *
2558 * Acquires semaphore, if necessary, then writes the data to PHY register 2732 * Acquires semaphore, if necessary, then writes the data to PHY register
2559 * at the offset. Release any acquired semaphores before exiting. 2733 * at the offset. Release any acquired semaphores before exiting.
2560 **/ 2734 **/
2561s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) 2735static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2736 bool locked)
2562{ 2737{
2563 s32 ret_val; 2738 s32 ret_val;
2564 u16 page = BM_PHY_REG_PAGE(offset); 2739 u16 page = BM_PHY_REG_PAGE(offset);
2565 u16 reg = BM_PHY_REG_NUM(offset); 2740 u16 reg = BM_PHY_REG_NUM(offset);
2566 bool in_slow_mode = false; 2741 bool in_slow_mode = false;
2567 2742
2743 if (!locked) {
2744 ret_val = hw->phy.ops.acquire_phy(hw);
2745 if (ret_val)
2746 return ret_val;
2747 }
2748
2568 /* Workaround failure in MDIO access while cable is disconnected */ 2749 /* Workaround failure in MDIO access while cable is disconnected */
2569 if ((hw->phy.type == e1000_phy_82577) && 2750 if ((hw->phy.type == e1000_phy_82577) &&
2570 !(er32(STATUS) & E1000_STATUS_LU)) { 2751 !(er32(STATUS) & E1000_STATUS_LU)) {
@@ -2588,10 +2769,6 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
2588 goto out; 2769 goto out;
2589 } 2770 }
2590 2771
2591 ret_val = hw->phy.ops.acquire_phy(hw);
2592 if (ret_val)
2593 goto out;
2594
2595 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); 2772 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
2596 2773
2597 if (page == HV_INTC_FC_PAGE_START) 2774 if (page == HV_INTC_FC_PAGE_START)
@@ -2607,15 +2784,10 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
2607 ((MAX_PHY_REG_ADDRESS & reg) == 0) && 2784 ((MAX_PHY_REG_ADDRESS & reg) == 0) &&
2608 (data & (1 << 11))) { 2785 (data & (1 << 11))) {
2609 u16 data2 = 0x7EFF; 2786 u16 data2 = 0x7EFF;
2610 hw->phy.ops.release_phy(hw);
2611 ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3, 2787 ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3,
2612 &data2, false); 2788 &data2, false);
2613 if (ret_val) 2789 if (ret_val)
2614 goto out; 2790 goto out;
2615
2616 ret_val = hw->phy.ops.acquire_phy(hw);
2617 if (ret_val)
2618 goto out;
2619 } 2791 }
2620 2792
2621 if (reg > MAX_PHY_MULTI_PAGE_REG) { 2793 if (reg > MAX_PHY_MULTI_PAGE_REG) {
@@ -2630,27 +2802,53 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
2630 ret_val = e1000e_write_phy_reg_mdic(hw, 2802 ret_val = e1000e_write_phy_reg_mdic(hw,
2631 IGP01E1000_PHY_PAGE_SELECT, 2803 IGP01E1000_PHY_PAGE_SELECT,
2632 (page << IGP_PAGE_SHIFT)); 2804 (page << IGP_PAGE_SHIFT));
2633 if (ret_val) {
2634 hw->phy.ops.release_phy(hw);
2635 goto out;
2636 }
2637 hw->phy.addr = phy_addr; 2805 hw->phy.addr = phy_addr;
2638 } 2806 }
2639 } 2807 }
2640 2808
2641 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2809 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
2642 data); 2810 data);
2643 hw->phy.ops.release_phy(hw);
2644 2811
2645out: 2812out:
2646 /* Revert to MDIO fast mode, if applicable */ 2813 /* Revert to MDIO fast mode, if applicable */
2647 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) 2814 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
2648 ret_val = e1000_set_mdio_slow_mode_hv(hw, false); 2815 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
2649 2816
2817 if (!locked)
2818 hw->phy.ops.release_phy(hw);
2819
2650 return ret_val; 2820 return ret_val;
2651} 2821}
2652 2822
2653/** 2823/**
2824 * e1000_write_phy_reg_hv - Write HV PHY register
2825 * @hw: pointer to the HW structure
2826 * @offset: register offset to write to
2827 * @data: data to write at register offset
2828 *
2829 * Acquires semaphore then writes the data to PHY register at the offset.
2830 * Release the acquired semaphores before exiting.
2831 **/
2832s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
2833{
2834 return __e1000_write_phy_reg_hv(hw, offset, data, false);
2835}
2836
2837/**
2838 * e1000_write_phy_reg_hv_locked - Write HV PHY register
2839 * @hw: pointer to the HW structure
2840 * @offset: register offset to write to
2841 * @data: data to write at register offset
2842 *
2843 * Writes the data to PHY register at the offset. Assumes semaphore
2844 * already acquired.
2845 **/
2846s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
2847{
2848 return __e1000_write_phy_reg_hv(hw, offset, data, true);
2849}
2850
2851/**
2654 * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page 2852 * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
2655 * @page: page to be accessed 2853 * @page: page to be accessed
2656 **/ 2854 **/
@@ -2671,10 +2869,9 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page)
2671 * @data: pointer to the data to be read or written 2869 * @data: pointer to the data to be read or written
2672 * @read: determines if operation is read or written 2870 * @read: determines if operation is read or written
2673 * 2871 *
2674 * Acquires semaphore, if necessary, then reads the PHY register at offset 2872 * Reads the PHY register at offset and stores the retreived information
2675 * and storing the retreived information in data. Release any acquired 2873 * in data. Assumes semaphore already acquired. Note that the procedure
2676 * semaphores before exiting. Note that the procedure to read these regs 2874 * to read these regs uses the address port and data port to read/write.
2677 * uses the address port and data port to read/write.
2678 **/ 2875 **/
2679static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, 2876static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
2680 u16 *data, bool read) 2877 u16 *data, bool read)
@@ -2682,20 +2879,12 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
2682 s32 ret_val; 2879 s32 ret_val;
2683 u32 addr_reg = 0; 2880 u32 addr_reg = 0;
2684 u32 data_reg = 0; 2881 u32 data_reg = 0;
2685 u8 phy_acquired = 1;
2686 2882
2687 /* This takes care of the difference with desktop vs mobile phy */ 2883 /* This takes care of the difference with desktop vs mobile phy */
2688 addr_reg = (hw->phy.type == e1000_phy_82578) ? 2884 addr_reg = (hw->phy.type == e1000_phy_82578) ?
2689 I82578_ADDR_REG : I82577_ADDR_REG; 2885 I82578_ADDR_REG : I82577_ADDR_REG;
2690 data_reg = addr_reg + 1; 2886 data_reg = addr_reg + 1;
2691 2887
2692 ret_val = hw->phy.ops.acquire_phy(hw);
2693 if (ret_val) {
2694 hw_dbg(hw, "Could not acquire PHY\n");
2695 phy_acquired = 0;
2696 goto out;
2697 }
2698
2699 /* All operations in this function are phy address 2 */ 2888 /* All operations in this function are phy address 2 */
2700 hw->phy.addr = 2; 2889 hw->phy.addr = 2;
2701 2890
@@ -2718,8 +2907,6 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
2718 } 2907 }
2719 2908
2720out: 2909out:
2721 if (phy_acquired == 1)
2722 hw->phy.ops.release_phy(hw);
2723 return ret_val; 2910 return ret_val;
2724} 2911}
2725 2912
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 977c3d358279..41bd7aeafd82 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -3083,7 +3083,6 @@ static const struct net_device_ops ehea_netdev_ops = {
3083 .ndo_poll_controller = ehea_netpoll, 3083 .ndo_poll_controller = ehea_netpoll,
3084#endif 3084#endif
3085 .ndo_get_stats = ehea_get_stats, 3085 .ndo_get_stats = ehea_get_stats,
3086 .ndo_change_mtu = eth_change_mtu,
3087 .ndo_set_mac_address = ehea_set_mac_addr, 3086 .ndo_set_mac_address = ehea_set_mac_addr,
3088 .ndo_validate_addr = eth_validate_addr, 3087 .ndo_validate_addr = eth_validate_addr,
3089 .ndo_set_multicast_list = ehea_set_multicast_list, 3088 .ndo_set_multicast_list = ehea_set_multicast_list,
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 3747457f5e69..bc7c5b7abb88 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -751,7 +751,7 @@ int ehea_create_busmap(void)
751 751
752 mutex_lock(&ehea_busmap_mutex); 752 mutex_lock(&ehea_busmap_mutex);
753 ehea_mr_len = 0; 753 ehea_mr_len = 0;
754 ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, NULL, 754 ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
755 ehea_create_busmap_callback); 755 ehea_create_busmap_callback);
756 mutex_unlock(&ehea_busmap_mutex); 756 mutex_unlock(&ehea_busmap_mutex);
757 return ret; 757 return ret;
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 117fc6c12e34..66813c91a720 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1666,3 +1666,4 @@ MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>");
1666MODULE_LICENSE("GPL"); 1666MODULE_LICENSE("GPL");
1667module_param_named(debug, debug.msg_enable, int, 0); 1667module_param_named(debug, debug.msg_enable, int, 0);
1668MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., ffff=all)"); 1668MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., ffff=all)");
1669MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index d4d9a3eda695..f5b96cadeb25 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -111,6 +111,7 @@
111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM 111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM
112 */ 112 */
113 113
114#include <linux/capability.h>
114#include <linux/module.h> 115#include <linux/module.h>
115#include <linux/kernel.h> 116#include <linux/kernel.h>
116#include <linux/init.h> 117#include <linux/init.h>
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index b7311bc00258..f7d9ac8324cb 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -17,8 +17,13 @@
17#include <linux/mii.h> 17#include <linux/mii.h>
18#include <linux/phy.h> 18#include <linux/phy.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/sched.h>
20#include <net/ethoc.h> 21#include <net/ethoc.h>
21 22
23static int buffer_size = 0x8000; /* 32 KBytes */
24module_param(buffer_size, int, 0);
25MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
26
22/* register offsets */ 27/* register offsets */
23#define MODER 0x00 28#define MODER 0x00
24#define INT_SOURCE 0x04 29#define INT_SOURCE 0x04
@@ -167,6 +172,7 @@
167 * struct ethoc - driver-private device structure 172 * struct ethoc - driver-private device structure
168 * @iobase: pointer to I/O memory region 173 * @iobase: pointer to I/O memory region
169 * @membase: pointer to buffer memory region 174 * @membase: pointer to buffer memory region
175 * @dma_alloc: dma allocated buffer size
170 * @num_tx: number of send buffers 176 * @num_tx: number of send buffers
171 * @cur_tx: last send buffer written 177 * @cur_tx: last send buffer written
172 * @dty_tx: last buffer actually sent 178 * @dty_tx: last buffer actually sent
@@ -185,6 +191,7 @@
185struct ethoc { 191struct ethoc {
186 void __iomem *iobase; 192 void __iomem *iobase;
187 void __iomem *membase; 193 void __iomem *membase;
194 int dma_alloc;
188 195
189 unsigned int num_tx; 196 unsigned int num_tx;
190 unsigned int cur_tx; 197 unsigned int cur_tx;
@@ -216,24 +223,25 @@ struct ethoc_bd {
216 u32 addr; 223 u32 addr;
217}; 224};
218 225
219static u32 ethoc_read(struct ethoc *dev, loff_t offset) 226static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
220{ 227{
221 return ioread32(dev->iobase + offset); 228 return ioread32(dev->iobase + offset);
222} 229}
223 230
224static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data) 231static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
225{ 232{
226 iowrite32(data, dev->iobase + offset); 233 iowrite32(data, dev->iobase + offset);
227} 234}
228 235
229static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd) 236static inline void ethoc_read_bd(struct ethoc *dev, int index,
237 struct ethoc_bd *bd)
230{ 238{
231 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 239 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
232 bd->stat = ethoc_read(dev, offset + 0); 240 bd->stat = ethoc_read(dev, offset + 0);
233 bd->addr = ethoc_read(dev, offset + 4); 241 bd->addr = ethoc_read(dev, offset + 4);
234} 242}
235 243
236static void ethoc_write_bd(struct ethoc *dev, int index, 244static inline void ethoc_write_bd(struct ethoc *dev, int index,
237 const struct ethoc_bd *bd) 245 const struct ethoc_bd *bd)
238{ 246{
239 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 247 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
@@ -241,33 +249,33 @@ static void ethoc_write_bd(struct ethoc *dev, int index,
241 ethoc_write(dev, offset + 4, bd->addr); 249 ethoc_write(dev, offset + 4, bd->addr);
242} 250}
243 251
244static void ethoc_enable_irq(struct ethoc *dev, u32 mask) 252static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
245{ 253{
246 u32 imask = ethoc_read(dev, INT_MASK); 254 u32 imask = ethoc_read(dev, INT_MASK);
247 imask |= mask; 255 imask |= mask;
248 ethoc_write(dev, INT_MASK, imask); 256 ethoc_write(dev, INT_MASK, imask);
249} 257}
250 258
251static void ethoc_disable_irq(struct ethoc *dev, u32 mask) 259static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
252{ 260{
253 u32 imask = ethoc_read(dev, INT_MASK); 261 u32 imask = ethoc_read(dev, INT_MASK);
254 imask &= ~mask; 262 imask &= ~mask;
255 ethoc_write(dev, INT_MASK, imask); 263 ethoc_write(dev, INT_MASK, imask);
256} 264}
257 265
258static void ethoc_ack_irq(struct ethoc *dev, u32 mask) 266static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
259{ 267{
260 ethoc_write(dev, INT_SOURCE, mask); 268 ethoc_write(dev, INT_SOURCE, mask);
261} 269}
262 270
263static void ethoc_enable_rx_and_tx(struct ethoc *dev) 271static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
264{ 272{
265 u32 mode = ethoc_read(dev, MODER); 273 u32 mode = ethoc_read(dev, MODER);
266 mode |= MODER_RXEN | MODER_TXEN; 274 mode |= MODER_RXEN | MODER_TXEN;
267 ethoc_write(dev, MODER, mode); 275 ethoc_write(dev, MODER, mode);
268} 276}
269 277
270static void ethoc_disable_rx_and_tx(struct ethoc *dev) 278static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
271{ 279{
272 u32 mode = ethoc_read(dev, MODER); 280 u32 mode = ethoc_read(dev, MODER);
273 mode &= ~(MODER_RXEN | MODER_TXEN); 281 mode &= ~(MODER_RXEN | MODER_TXEN);
@@ -284,7 +292,7 @@ static int ethoc_init_ring(struct ethoc *dev)
284 dev->cur_rx = 0; 292 dev->cur_rx = 0;
285 293
286 /* setup transmission buffers */ 294 /* setup transmission buffers */
287 bd.addr = 0; 295 bd.addr = virt_to_phys(dev->membase);
288 bd.stat = TX_BD_IRQ | TX_BD_CRC; 296 bd.stat = TX_BD_IRQ | TX_BD_CRC;
289 297
290 for (i = 0; i < dev->num_tx; i++) { 298 for (i = 0; i < dev->num_tx; i++) {
@@ -295,7 +303,6 @@ static int ethoc_init_ring(struct ethoc *dev)
295 bd.addr += ETHOC_BUFSIZ; 303 bd.addr += ETHOC_BUFSIZ;
296 } 304 }
297 305
298 bd.addr = dev->num_tx * ETHOC_BUFSIZ;
299 bd.stat = RX_BD_EMPTY | RX_BD_IRQ; 306 bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
300 307
301 for (i = 0; i < dev->num_rx; i++) { 308 for (i = 0; i < dev->num_rx; i++) {
@@ -400,8 +407,12 @@ static int ethoc_rx(struct net_device *dev, int limit)
400 if (ethoc_update_rx_stats(priv, &bd) == 0) { 407 if (ethoc_update_rx_stats(priv, &bd) == 0) {
401 int size = bd.stat >> 16; 408 int size = bd.stat >> 16;
402 struct sk_buff *skb = netdev_alloc_skb(dev, size); 409 struct sk_buff *skb = netdev_alloc_skb(dev, size);
410
411 size -= 4; /* strip the CRC */
412 skb_reserve(skb, 2); /* align TCP/IP header */
413
403 if (likely(skb)) { 414 if (likely(skb)) {
404 void *src = priv->membase + bd.addr; 415 void *src = phys_to_virt(bd.addr);
405 memcpy_fromio(skb_put(skb, size), src, size); 416 memcpy_fromio(skb_put(skb, size), src, size);
406 skb->protocol = eth_type_trans(skb, dev); 417 skb->protocol = eth_type_trans(skb, dev);
407 priv->stats.rx_packets++; 418 priv->stats.rx_packets++;
@@ -498,7 +509,7 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
498 return IRQ_NONE; 509 return IRQ_NONE;
499 } 510 }
500 511
501 ethoc_ack_irq(priv, INT_MASK_ALL); 512 ethoc_ack_irq(priv, pending);
502 513
503 if (pending & INT_MASK_BUSY) { 514 if (pending & INT_MASK_BUSY) {
504 dev_err(&dev->dev, "packet dropped\n"); 515 dev_err(&dev->dev, "packet dropped\n");
@@ -653,9 +664,10 @@ static int ethoc_open(struct net_device *dev)
653 if (ret) 664 if (ret)
654 return ret; 665 return ret;
655 666
656 /* calculate the number of TX/RX buffers */ 667 /* calculate the number of TX/RX buffers, maximum 128 supported */
657 num_bd = (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ; 668 num_bd = min_t(unsigned int,
658 priv->num_tx = min(min_tx, num_bd / 4); 669 128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ);
670 priv->num_tx = max(min_tx, num_bd / 4);
659 priv->num_rx = num_bd - priv->num_tx; 671 priv->num_rx = num_bd - priv->num_tx;
660 ethoc_write(priv, TX_BD_NUM, priv->num_tx); 672 ethoc_write(priv, TX_BD_NUM, priv->num_tx);
661 673
@@ -823,7 +835,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
823 else 835 else
824 bd.stat &= ~TX_BD_PAD; 836 bd.stat &= ~TX_BD_PAD;
825 837
826 dest = priv->membase + bd.addr; 838 dest = phys_to_virt(bd.addr);
827 memcpy_toio(dest, skb->data, skb->len); 839 memcpy_toio(dest, skb->data, skb->len);
828 840
829 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); 841 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
@@ -903,22 +915,19 @@ static int ethoc_probe(struct platform_device *pdev)
903 915
904 /* obtain buffer memory space */ 916 /* obtain buffer memory space */
905 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 917 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
906 if (!res) { 918 if (res) {
907 dev_err(&pdev->dev, "cannot obtain memory space\n"); 919 mem = devm_request_mem_region(&pdev->dev, res->start,
908 ret = -ENXIO;
909 goto free;
910 }
911
912 mem = devm_request_mem_region(&pdev->dev, res->start,
913 res->end - res->start + 1, res->name); 920 res->end - res->start + 1, res->name);
914 if (!mem) { 921 if (!mem) {
915 dev_err(&pdev->dev, "cannot request memory space\n"); 922 dev_err(&pdev->dev, "cannot request memory space\n");
916 ret = -ENXIO; 923 ret = -ENXIO;
917 goto free; 924 goto free;
925 }
926
927 netdev->mem_start = mem->start;
928 netdev->mem_end = mem->end;
918 } 929 }
919 930
920 netdev->mem_start = mem->start;
921 netdev->mem_end = mem->end;
922 931
923 /* obtain device IRQ number */ 932 /* obtain device IRQ number */
924 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 933 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -933,6 +942,7 @@ static int ethoc_probe(struct platform_device *pdev)
933 /* setup driver-private data */ 942 /* setup driver-private data */
934 priv = netdev_priv(netdev); 943 priv = netdev_priv(netdev);
935 priv->netdev = netdev; 944 priv->netdev = netdev;
945 priv->dma_alloc = 0;
936 946
937 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, 947 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
938 mmio->end - mmio->start + 1); 948 mmio->end - mmio->start + 1);
@@ -942,12 +952,27 @@ static int ethoc_probe(struct platform_device *pdev)
942 goto error; 952 goto error;
943 } 953 }
944 954
945 priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start, 955 if (netdev->mem_end) {
946 mem->end - mem->start + 1); 956 priv->membase = devm_ioremap_nocache(&pdev->dev,
947 if (!priv->membase) { 957 netdev->mem_start, mem->end - mem->start + 1);
948 dev_err(&pdev->dev, "cannot remap memory space\n"); 958 if (!priv->membase) {
949 ret = -ENXIO; 959 dev_err(&pdev->dev, "cannot remap memory space\n");
950 goto error; 960 ret = -ENXIO;
961 goto error;
962 }
963 } else {
964 /* Allocate buffer memory */
965 priv->membase = dma_alloc_coherent(NULL,
966 buffer_size, (void *)&netdev->mem_start,
967 GFP_KERNEL);
968 if (!priv->membase) {
969 dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
970 buffer_size);
971 ret = -ENOMEM;
972 goto error;
973 }
974 netdev->mem_end = netdev->mem_start + buffer_size;
975 priv->dma_alloc = buffer_size;
951 } 976 }
952 977
953 /* Allow the platform setup code to pass in a MAC address. */ 978 /* Allow the platform setup code to pass in a MAC address. */
@@ -1034,6 +1059,9 @@ free_mdio:
1034 kfree(priv->mdio->irq); 1059 kfree(priv->mdio->irq);
1035 mdiobus_free(priv->mdio); 1060 mdiobus_free(priv->mdio);
1036free: 1061free:
1062 if (priv->dma_alloc)
1063 dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
1064 netdev->mem_start);
1037 free_netdev(netdev); 1065 free_netdev(netdev);
1038out: 1066out:
1039 return ret; 1067 return ret;
@@ -1059,7 +1087,9 @@ static int ethoc_remove(struct platform_device *pdev)
1059 kfree(priv->mdio->irq); 1087 kfree(priv->mdio->irq);
1060 mdiobus_free(priv->mdio); 1088 mdiobus_free(priv->mdio);
1061 } 1089 }
1062 1090 if (priv->dma_alloc)
1091 dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
1092 netdev->mem_start);
1063 unregister_netdev(netdev); 1093 unregister_netdev(netdev);
1064 free_netdev(netdev); 1094 free_netdev(netdev);
1065 } 1095 }
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index b2a5ec8f3721..dd4ba01fd92d 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -145,6 +145,7 @@
145 145
146#include <linux/module.h> 146#include <linux/module.h>
147#include <linux/kernel.h> 147#include <linux/kernel.h>
148#include <linux/sched.h>
148#include <linux/string.h> 149#include <linux/string.h>
149#include <linux/errno.h> 150#include <linux/errno.h>
150#include <linux/ioport.h> 151#include <linux/ioport.h>
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 29234380e6c6..16a1d58419d9 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1654,7 +1654,7 @@ static const struct net_device_ops fec_netdev_ops = {
1654 * 1654 *
1655 * index is only used in legacy code 1655 * index is only used in legacy code
1656 */ 1656 */
1657int __init fec_enet_init(struct net_device *dev, int index) 1657static int fec_enet_init(struct net_device *dev, int index)
1658{ 1658{
1659 struct fec_enet_private *fep = netdev_priv(dev); 1659 struct fec_enet_private *fep = netdev_priv(dev);
1660 struct bufdesc *cbd_base; 1660 struct bufdesc *cbd_base;
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index c40113f58963..66dace6d324f 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -759,12 +759,6 @@ static void mpc52xx_fec_reset(struct net_device *dev)
759 759
760 mpc52xx_fec_hw_init(dev); 760 mpc52xx_fec_hw_init(dev);
761 761
762 if (priv->phydev) {
763 phy_stop(priv->phydev);
764 phy_write(priv->phydev, MII_BMCR, BMCR_RESET);
765 phy_start(priv->phydev);
766 }
767
768 bcom_fec_rx_reset(priv->rx_dmatsk); 762 bcom_fec_rx_reset(priv->rx_dmatsk);
769 bcom_fec_tx_reset(priv->tx_dmatsk); 763 bcom_fec_tx_reset(priv->tx_dmatsk);
770 764
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 31e6d62b785d..ee0f3c6d3f88 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -155,6 +155,7 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = {
155 { .compatible = "mpc5200b-fec-phy", }, 155 { .compatible = "mpc5200b-fec-phy", },
156 {} 156 {}
157}; 157};
158MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
158 159
159struct of_platform_driver mpc52xx_fec_mdio_driver = { 160struct of_platform_driver mpc52xx_fec_mdio_driver = {
160 .name = "mpc5200b-fec-phy", 161 .name = "mpc5200b-fec-phy",
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 0a1c2bb27d4d..e1da4666f204 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -49,6 +49,7 @@
49#include <linux/netdevice.h> 49#include <linux/netdevice.h>
50#include <linux/etherdevice.h> 50#include <linux/etherdevice.h>
51#include <linux/delay.h> 51#include <linux/delay.h>
52#include <linux/sched.h>
52#include <linux/spinlock.h> 53#include <linux/spinlock.h>
53#include <linux/ethtool.h> 54#include <linux/ethtool.h>
54#include <linux/timer.h> 55#include <linux/timer.h>
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 2bc2d2b20644..ec2f5034457f 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1110,6 +1110,7 @@ static struct of_device_id fs_enet_match[] = {
1110#endif 1110#endif
1111 {} 1111 {}
1112}; 1112};
1113MODULE_DEVICE_TABLE(of, fs_enet_match);
1113 1114
1114static struct of_platform_driver fs_enet_driver = { 1115static struct of_platform_driver fs_enet_driver = {
1115 .name = "fs_enet", 1116 .name = "fs_enet",
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 93b481b0e3c7..24ff9f43a62b 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -221,6 +221,7 @@ static struct of_device_id fs_enet_mdio_bb_match[] = {
221 }, 221 },
222 {}, 222 {},
223}; 223};
224MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
224 225
225static struct of_platform_driver fs_enet_bb_mdio_driver = { 226static struct of_platform_driver fs_enet_bb_mdio_driver = {
226 .name = "fsl-bb-mdio", 227 .name = "fsl-bb-mdio",
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index a2d69c1cd07e..96eba4280c5c 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -219,6 +219,7 @@ static struct of_device_id fs_enet_mdio_fec_match[] = {
219#endif 219#endif
220 {}, 220 {},
221}; 221};
222MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
222 223
223static struct of_platform_driver fs_enet_fec_mdio_driver = { 224static struct of_platform_driver fs_enet_fec_mdio_driver = {
224 .name = "fsl-fec-mdio", 225 .name = "fsl-fec-mdio",
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index d167090248e2..6ac464866972 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -407,6 +407,7 @@ static struct of_device_id fsl_pq_mdio_match[] = {
407 }, 407 },
408 {}, 408 {},
409}; 409};
410MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
410 411
411static struct of_platform_driver fsl_pq_mdio_driver = { 412static struct of_platform_driver fsl_pq_mdio_driver = {
412 .name = "fsl-pq_mdio", 413 .name = "fsl-pq_mdio",
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 1e5289ffef6f..5bf31f1509c9 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -2325,9 +2325,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2325 return IRQ_HANDLED; 2325 return IRQ_HANDLED;
2326} 2326}
2327 2327
2328/* work with hotplug and coldplug */
2329MODULE_ALIAS("platform:fsl-gianfar");
2330
2331static struct of_device_id gfar_match[] = 2328static struct of_device_id gfar_match[] =
2332{ 2329{
2333 { 2330 {
@@ -2336,6 +2333,7 @@ static struct of_device_id gfar_match[] =
2336 }, 2333 },
2337 {}, 2334 {},
2338}; 2335};
2336MODULE_DEVICE_TABLE(of, gfar_match);
2339 2337
2340/* Structure for a device driver */ 2338/* Structure for a device driver */
2341static struct of_platform_driver gfar_driver = { 2339static struct of_platform_driver gfar_driver = {
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 1d5064a09aca..f7519a594945 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -145,6 +145,7 @@ static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
145/* Time in jiffies before concluding the transmitter is hung. */ 145/* Time in jiffies before concluding the transmitter is hung. */
146#define TX_TIMEOUT (5*HZ) 146#define TX_TIMEOUT (5*HZ)
147 147
148#include <linux/capability.h>
148#include <linux/module.h> 149#include <linux/module.h>
149#include <linux/kernel.h> 150#include <linux/kernel.h>
150#include <linux/string.h> 151#include <linux/string.h>
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 7bcaf7c66243..e344c84c0ef9 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -44,6 +44,7 @@
44#include <linux/module.h> 44#include <linux/module.h>
45#include <linux/kernel.h> 45#include <linux/kernel.h>
46#include <linux/init.h> 46#include <linux/init.h>
47#include <linux/sched.h>
47#include <linux/string.h> 48#include <linux/string.h>
48#include <linux/workqueue.h> 49#include <linux/workqueue.h>
49#include <linux/fs.h> 50#include <linux/fs.h>
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index aa4488e871b2..ed60fd664273 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -71,6 +71,7 @@
71 71
72/*****************************************************************************/ 72/*****************************************************************************/
73 73
74#include <linux/capability.h>
74#include <linux/module.h> 75#include <linux/module.h>
75#include <linux/ioport.h> 76#include <linux/ioport.h>
76#include <linux/string.h> 77#include <linux/string.h>
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index 88c593596020..1686f6dcbbce 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -61,6 +61,7 @@
61 61
62/*****************************************************************************/ 62/*****************************************************************************/
63 63
64#include <linux/capability.h>
64#include <linux/module.h> 65#include <linux/module.h>
65#include <linux/ioport.h> 66#include <linux/ioport.h>
66#include <linux/string.h> 67#include <linux/string.h>
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 0013c409782c..91c5790c9581 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -42,6 +42,7 @@
42 42
43/*****************************************************************************/ 43/*****************************************************************************/
44 44
45#include <linux/capability.h>
45#include <linux/module.h> 46#include <linux/module.h>
46#include <linux/types.h> 47#include <linux/types.h>
47#include <linux/net.h> 48#include <linux/net.h>
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 33b55f729742..db4b7f1603f6 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -258,7 +258,7 @@ static void ax_bump(struct mkiss *ax)
258 } 258 }
259 if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) { 259 if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) {
260 printk(KERN_INFO 260 printk(KERN_INFO
261 "mkiss: %s: Switchting to crc-smack\n", 261 "mkiss: %s: Switching to crc-smack\n",
262 ax->dev->name); 262 ax->dev->name);
263 ax->crcmode = CRC_MODE_SMACK; 263 ax->crcmode = CRC_MODE_SMACK;
264 } 264 }
@@ -272,7 +272,7 @@ static void ax_bump(struct mkiss *ax)
272 } 272 }
273 if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) { 273 if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) {
274 printk(KERN_INFO 274 printk(KERN_INFO
275 "mkiss: %s: Switchting to crc-flexnet\n", 275 "mkiss: %s: Switching to crc-flexnet\n",
276 ax->dev->name); 276 ax->dev->name);
277 ax->crcmode = CRC_MODE_FLEX; 277 ax->crcmode = CRC_MODE_FLEX;
278 } 278 }
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index a9a1a99f02dd..dd8665138062 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -98,6 +98,7 @@
98 98
99#include <linux/module.h> 99#include <linux/module.h>
100#include <linux/kernel.h> 100#include <linux/kernel.h>
101#include <linux/sched.h>
101#include <linux/string.h> 102#include <linux/string.h>
102#include <linux/errno.h> 103#include <linux/errno.h>
103#include <linux/ioport.h> 104#include <linux/ioport.h>
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 89c82c5e63e4..3fae87559791 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -24,6 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include <linux/module.h>
27#include <linux/sched.h> 28#include <linux/sched.h>
28#include <linux/string.h> 29#include <linux/string.h>
29#include <linux/errno.h> 30#include <linux/errno.h>
@@ -443,7 +444,7 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
443 ret |= EMAC_MR1_TFS_2K; 444 ret |= EMAC_MR1_TFS_2K;
444 break; 445 break;
445 default: 446 default:
446 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", 447 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
447 dev->ndev->name, tx_size); 448 dev->ndev->name, tx_size);
448 } 449 }
449 450
@@ -470,6 +471,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
470 DBG2(dev, "__emac4_calc_base_mr1" NL); 471 DBG2(dev, "__emac4_calc_base_mr1" NL);
471 472
472 switch(tx_size) { 473 switch(tx_size) {
474 case 16384:
475 ret |= EMAC4_MR1_TFS_16K;
476 break;
473 case 4096: 477 case 4096:
474 ret |= EMAC4_MR1_TFS_4K; 478 ret |= EMAC4_MR1_TFS_4K;
475 break; 479 break;
@@ -477,7 +481,7 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
477 ret |= EMAC4_MR1_TFS_2K; 481 ret |= EMAC4_MR1_TFS_2K;
478 break; 482 break;
479 default: 483 default:
480 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", 484 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
481 dev->ndev->name, tx_size); 485 dev->ndev->name, tx_size);
482 } 486 }
483 487
@@ -2985,6 +2989,7 @@ static struct of_device_id emac_match[] =
2985 }, 2989 },
2986 {}, 2990 {},
2987}; 2991};
2992MODULE_DEVICE_TABLE(of, emac_match);
2988 2993
2989static struct of_platform_driver emac_driver = { 2994static struct of_platform_driver emac_driver = {
2990 .name = "emac", 2995 .name = "emac",
diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ibm_newemac/emac.h
index 0afc2cf5c52b..d34adf99fc6a 100644
--- a/drivers/net/ibm_newemac/emac.h
+++ b/drivers/net/ibm_newemac/emac.h
@@ -153,6 +153,7 @@ struct emac_regs {
153#define EMAC4_MR1_RFS_16K 0x00280000 153#define EMAC4_MR1_RFS_16K 0x00280000
154#define EMAC4_MR1_TFS_2K 0x00020000 154#define EMAC4_MR1_TFS_2K 0x00020000
155#define EMAC4_MR1_TFS_4K 0x00030000 155#define EMAC4_MR1_TFS_4K 0x00030000
156#define EMAC4_MR1_TFS_16K 0x00050000
156#define EMAC4_MR1_TR 0x00008000 157#define EMAC4_MR1_TR 0x00008000
157#define EMAC4_MR1_MWSW_001 0x00001000 158#define EMAC4_MR1_MWSW_001 0x00001000
158#define EMAC4_MR1_JPSM 0x00000800 159#define EMAC4_MR1_JPSM 0x00000800
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 801f088c134f..030913f8bd26 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -98,12 +98,13 @@ static void ri_tasklet(unsigned long dev)
98 stats->tx_packets++; 98 stats->tx_packets++;
99 stats->tx_bytes +=skb->len; 99 stats->tx_bytes +=skb->len;
100 100
101 skb->dev = __dev_get_by_index(&init_net, skb->iif); 101 skb->dev = dev_get_by_index(&init_net, skb->iif);
102 if (!skb->dev) { 102 if (!skb->dev) {
103 dev_kfree_skb(skb); 103 dev_kfree_skb(skb);
104 stats->tx_dropped++; 104 stats->tx_dropped++;
105 break; 105 break;
106 } 106 }
107 dev_put(skb->dev);
107 skb->iif = _dev->ifindex; 108 skb->iif = _dev->ifindex;
108 109
109 if (from & AT_EGRESS) { 110 if (from & AT_EGRESS) {
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index a0231cd079f1..7d76bb085e10 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -286,41 +286,6 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
286} 286}
287 287
288/** 288/**
289 * igb_update_mc_addr_list - Update Multicast addresses
290 * @hw: pointer to the HW structure
291 * @mc_addr_list: array of multicast addresses to program
292 * @mc_addr_count: number of multicast addresses to program
293 *
294 * Updates entire Multicast Table Array.
295 * The caller must have a packed mc_addr_list of multicast addresses.
296 **/
297void igb_update_mc_addr_list(struct e1000_hw *hw,
298 u8 *mc_addr_list, u32 mc_addr_count)
299{
300 u32 hash_value, hash_bit, hash_reg;
301 int i;
302
303 /* clear mta_shadow */
304 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
305
306 /* update mta_shadow from mc_addr_list */
307 for (i = 0; (u32) i < mc_addr_count; i++) {
308 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
309
310 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
311 hash_bit = hash_value & 0x1F;
312
313 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
314 mc_addr_list += (ETH_ALEN);
315 }
316
317 /* replace the entire MTA table */
318 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
319 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
320 wrfl();
321}
322
323/**
324 * igb_hash_mc_addr - Generate a multicast hash value 289 * igb_hash_mc_addr - Generate a multicast hash value
325 * @hw: pointer to the HW structure 290 * @hw: pointer to the HW structure
326 * @mc_addr: pointer to a multicast address 291 * @mc_addr: pointer to a multicast address
@@ -329,7 +294,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
329 * the multicast filter table array address and new table value. See 294 * the multicast filter table array address and new table value. See
330 * igb_mta_set() 295 * igb_mta_set()
331 **/ 296 **/
332u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) 297static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
333{ 298{
334 u32 hash_value, hash_mask; 299 u32 hash_value, hash_mask;
335 u8 bit_shift = 0; 300 u8 bit_shift = 0;
@@ -392,6 +357,41 @@ u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
392} 357}
393 358
394/** 359/**
360 * igb_update_mc_addr_list - Update Multicast addresses
361 * @hw: pointer to the HW structure
362 * @mc_addr_list: array of multicast addresses to program
363 * @mc_addr_count: number of multicast addresses to program
364 *
365 * Updates entire Multicast Table Array.
366 * The caller must have a packed mc_addr_list of multicast addresses.
367 **/
368void igb_update_mc_addr_list(struct e1000_hw *hw,
369 u8 *mc_addr_list, u32 mc_addr_count)
370{
371 u32 hash_value, hash_bit, hash_reg;
372 int i;
373
374 /* clear mta_shadow */
375 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
376
377 /* update mta_shadow from mc_addr_list */
378 for (i = 0; (u32) i < mc_addr_count; i++) {
379 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
380
381 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
382 hash_bit = hash_value & 0x1F;
383
384 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
385 mc_addr_list += (ETH_ALEN);
386 }
387
388 /* replace the entire MTA table */
389 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
390 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
391 wrfl();
392}
393
394/**
395 * igb_clear_hw_cntrs_base - Clear base hardware counters 395 * igb_clear_hw_cntrs_base - Clear base hardware counters
396 * @hw: pointer to the HW structure 396 * @hw: pointer to the HW structure
397 * 397 *
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index 7518af8cbbf5..bca17d882417 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -88,6 +88,5 @@ enum e1000_mng_mode {
88#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 88#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
89 89
90extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); 90extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
91extern u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
92 91
93#endif 92#endif
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index d004c359244c..b243ed3b0c36 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -34,6 +34,7 @@
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/if_ether.h> 35#include <linux/if_ether.h>
36#include <linux/ethtool.h> 36#include <linux/ethtool.h>
37#include <linux/sched.h>
37 38
38#include "igb.h" 39#include "igb.h"
39 40
@@ -731,7 +732,7 @@ static int igb_set_ringparam(struct net_device *netdev,
731{ 732{
732 struct igb_adapter *adapter = netdev_priv(netdev); 733 struct igb_adapter *adapter = netdev_priv(netdev);
733 struct igb_ring *temp_ring; 734 struct igb_ring *temp_ring;
734 int i, err; 735 int i, err = 0;
735 u32 new_rx_count, new_tx_count; 736 u32 new_rx_count, new_tx_count;
736 737
737 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 738 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -751,18 +752,30 @@ static int igb_set_ringparam(struct net_device *netdev,
751 return 0; 752 return 0;
752 } 753 }
753 754
755 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
756 msleep(1);
757
758 if (!netif_running(adapter->netdev)) {
759 for (i = 0; i < adapter->num_tx_queues; i++)
760 adapter->tx_ring[i].count = new_tx_count;
761 for (i = 0; i < adapter->num_rx_queues; i++)
762 adapter->rx_ring[i].count = new_rx_count;
763 adapter->tx_ring_count = new_tx_count;
764 adapter->rx_ring_count = new_rx_count;
765 goto clear_reset;
766 }
767
754 if (adapter->num_tx_queues > adapter->num_rx_queues) 768 if (adapter->num_tx_queues > adapter->num_rx_queues)
755 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring)); 769 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
756 else 770 else
757 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring)); 771 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
758 if (!temp_ring)
759 return -ENOMEM;
760 772
761 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 773 if (!temp_ring) {
762 msleep(1); 774 err = -ENOMEM;
775 goto clear_reset;
776 }
763 777
764 if (netif_running(adapter->netdev)) 778 igb_down(adapter);
765 igb_down(adapter);
766 779
767 /* 780 /*
768 * We can't just free everything and then setup again, 781 * We can't just free everything and then setup again,
@@ -819,14 +832,11 @@ static int igb_set_ringparam(struct net_device *netdev,
819 832
820 adapter->rx_ring_count = new_rx_count; 833 adapter->rx_ring_count = new_rx_count;
821 } 834 }
822
823 err = 0;
824err_setup: 835err_setup:
825 if (netif_running(adapter->netdev)) 836 igb_up(adapter);
826 igb_up(adapter);
827
828 clear_bit(__IGB_RESETTING, &adapter->state);
829 vfree(temp_ring); 837 vfree(temp_ring);
838clear_reset:
839 clear_bit(__IGB_RESETTING, &adapter->state);
830 return err; 840 return err;
831} 841}
832 842
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 5d6c1530a8c0..714c3a4a44ef 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1246,12 +1246,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1246 if (err) 1246 if (err)
1247 goto err_pci_reg; 1247 goto err_pci_reg;
1248 1248
1249 err = pci_enable_pcie_error_reporting(pdev); 1249 pci_enable_pcie_error_reporting(pdev);
1250 if (err) {
1251 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
1252 "0x%x\n", err);
1253 /* non-fatal, continue */
1254 }
1255 1250
1256 pci_set_master(pdev); 1251 pci_set_master(pdev);
1257 pci_save_state(pdev); 1252 pci_save_state(pdev);
@@ -1628,7 +1623,6 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1628 struct net_device *netdev = pci_get_drvdata(pdev); 1623 struct net_device *netdev = pci_get_drvdata(pdev);
1629 struct igb_adapter *adapter = netdev_priv(netdev); 1624 struct igb_adapter *adapter = netdev_priv(netdev);
1630 struct e1000_hw *hw = &adapter->hw; 1625 struct e1000_hw *hw = &adapter->hw;
1631 int err;
1632 1626
1633 /* flush_scheduled work may reschedule our watchdog task, so 1627 /* flush_scheduled work may reschedule our watchdog task, so
1634 * explicitly disable watchdog tasks from being rescheduled */ 1628 * explicitly disable watchdog tasks from being rescheduled */
@@ -1682,10 +1676,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1682 1676
1683 free_netdev(netdev); 1677 free_netdev(netdev);
1684 1678
1685 err = pci_disable_pcie_error_reporting(pdev); 1679 pci_disable_pcie_error_reporting(pdev);
1686 if (err)
1687 dev_err(&pdev->dev,
1688 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
1689 1680
1690 pci_disable_device(pdev); 1681 pci_disable_device(pdev);
1691} 1682}
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index ee17a097d1ca..c68265bd0d1a 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -279,7 +279,7 @@ static int igbvf_set_ringparam(struct net_device *netdev,
279{ 279{
280 struct igbvf_adapter *adapter = netdev_priv(netdev); 280 struct igbvf_adapter *adapter = netdev_priv(netdev);
281 struct igbvf_ring *temp_ring; 281 struct igbvf_ring *temp_ring;
282 int err; 282 int err = 0;
283 u32 new_rx_count, new_tx_count; 283 u32 new_rx_count, new_tx_count;
284 284
285 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 285 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -299,15 +299,22 @@ static int igbvf_set_ringparam(struct net_device *netdev,
299 return 0; 299 return 0;
300 } 300 }
301 301
302 temp_ring = vmalloc(sizeof(struct igbvf_ring));
303 if (!temp_ring)
304 return -ENOMEM;
305
306 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) 302 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
307 msleep(1); 303 msleep(1);
308 304
309 if (netif_running(adapter->netdev)) 305 if (!netif_running(adapter->netdev)) {
310 igbvf_down(adapter); 306 adapter->tx_ring->count = new_tx_count;
307 adapter->rx_ring->count = new_rx_count;
308 goto clear_reset;
309 }
310
311 temp_ring = vmalloc(sizeof(struct igbvf_ring));
312 if (!temp_ring) {
313 err = -ENOMEM;
314 goto clear_reset;
315 }
316
317 igbvf_down(adapter);
311 318
312 /* 319 /*
313 * We can't just free everything and then setup again, 320 * We can't just free everything and then setup again,
@@ -339,14 +346,11 @@ static int igbvf_set_ringparam(struct net_device *netdev,
339 346
340 memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring)); 347 memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
341 } 348 }
342
343 err = 0;
344err_setup: 349err_setup:
345 if (netif_running(adapter->netdev)) 350 igbvf_up(adapter);
346 igbvf_up(adapter);
347
348 clear_bit(__IGBVF_RESETTING, &adapter->state);
349 vfree(temp_ring); 351 vfree(temp_ring);
352clear_reset:
353 clear_bit(__IGBVF_RESETTING, &adapter->state);
350 return err; 354 return err;
351} 355}
352 356
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 2fc30b449eea..cb90d640007a 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -66,7 +66,6 @@
66#include <linux/errno.h> 66#include <linux/errno.h>
67#include <linux/init.h> 67#include <linux/init.h>
68#include <linux/slab.h> 68#include <linux/slab.h>
69#include <linux/kref.h>
70#include <linux/usb.h> 69#include <linux/usb.h>
71#include <linux/device.h> 70#include <linux/device.h>
72#include <linux/crc32.h> 71#include <linux/crc32.h>
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index f4d13fc51cbc..b54d3b48045e 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -118,7 +118,6 @@
118#include <linux/errno.h> 118#include <linux/errno.h>
119#include <linux/init.h> 119#include <linux/init.h>
120#include <linux/slab.h> 120#include <linux/slab.h>
121#include <linux/kref.h>
122#include <linux/usb.h> 121#include <linux/usb.h>
123#include <linux/device.h> 122#include <linux/device.h>
124#include <linux/crc32.h> 123#include <linux/crc32.h>
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 5f9d73353972..8d713ebac15b 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -82,7 +82,6 @@
82#include <linux/errno.h> 82#include <linux/errno.h>
83#include <linux/init.h> 83#include <linux/init.h>
84#include <linux/slab.h> 84#include <linux/slab.h>
85#include <linux/kref.h>
86#include <linux/usb.h> 85#include <linux/usb.h>
87#include <linux/device.h> 86#include <linux/device.h>
88#include <linux/crc32.h> 87#include <linux/crc32.h>
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index b3d30bcb88e7..c0e0bb9401d3 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -50,7 +50,6 @@
50#include <linux/errno.h> 50#include <linux/errno.h>
51#include <linux/init.h> 51#include <linux/init.h>
52#include <linux/slab.h> 52#include <linux/slab.h>
53#include <linux/kref.h>
54#include <linux/usb.h> 53#include <linux/usb.h>
55#include <linux/device.h> 54#include <linux/device.h>
56#include <linux/crc32.h> 55#include <linux/crc32.h>
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 1445e5865196..84db145d2b59 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -17,6 +17,7 @@
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/gpio.h>
20 21
21#include <net/irda/irda.h> 22#include <net/irda/irda.h>
22#include <net/irda/irmod.h> 23#include <net/irda/irmod.h>
@@ -163,6 +164,22 @@ inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
163} 164}
164 165
165/* 166/*
167 * Set the IrDA communications mode.
168 */
169static void pxa_irda_set_mode(struct pxa_irda *si, int mode)
170{
171 if (si->pdata->transceiver_mode)
172 si->pdata->transceiver_mode(si->dev, mode);
173 else {
174 if (gpio_is_valid(si->pdata->gpio_pwdown))
175 gpio_set_value(si->pdata->gpio_pwdown,
176 !(mode & IR_OFF) ^
177 !si->pdata->gpio_pwdown_inverted);
178 pxa2xx_transceiver_mode(si->dev, mode);
179 }
180}
181
182/*
166 * Set the IrDA communications speed. 183 * Set the IrDA communications speed.
167 */ 184 */
168static int pxa_irda_set_speed(struct pxa_irda *si, int speed) 185static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
@@ -188,7 +205,7 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
188 pxa_irda_disable_clk(si); 205 pxa_irda_disable_clk(si);
189 206
190 /* set board transceiver to SIR mode */ 207 /* set board transceiver to SIR mode */
191 si->pdata->transceiver_mode(si->dev, IR_SIRMODE); 208 pxa_irda_set_mode(si, IR_SIRMODE);
192 209
193 /* enable the STUART clock */ 210 /* enable the STUART clock */
194 pxa_irda_enable_sirclk(si); 211 pxa_irda_enable_sirclk(si);
@@ -222,7 +239,7 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
222 ICCR0 = 0; 239 ICCR0 = 0;
223 240
224 /* set board transceiver to FIR mode */ 241 /* set board transceiver to FIR mode */
225 si->pdata->transceiver_mode(si->dev, IR_FIRMODE); 242 pxa_irda_set_mode(si, IR_FIRMODE);
226 243
227 /* enable the FICP clock */ 244 /* enable the FICP clock */
228 pxa_irda_enable_firclk(si); 245 pxa_irda_enable_firclk(si);
@@ -641,7 +658,7 @@ static void pxa_irda_shutdown(struct pxa_irda *si)
641 local_irq_restore(flags); 658 local_irq_restore(flags);
642 659
643 /* power off board transceiver */ 660 /* power off board transceiver */
644 si->pdata->transceiver_mode(si->dev, IR_OFF); 661 pxa_irda_set_mode(si, IR_OFF);
645 662
646 printk(KERN_DEBUG "pxa_ir: irda shutdown\n"); 663 printk(KERN_DEBUG "pxa_ir: irda shutdown\n");
647} 664}
@@ -849,10 +866,26 @@ static int pxa_irda_probe(struct platform_device *pdev)
849 if (err) 866 if (err)
850 goto err_mem_5; 867 goto err_mem_5;
851 868
852 if (si->pdata->startup) 869 if (gpio_is_valid(si->pdata->gpio_pwdown)) {
870 err = gpio_request(si->pdata->gpio_pwdown, "IrDA switch");
871 if (err)
872 goto err_startup;
873 err = gpio_direction_output(si->pdata->gpio_pwdown,
874 !si->pdata->gpio_pwdown_inverted);
875 if (err) {
876 gpio_free(si->pdata->gpio_pwdown);
877 goto err_startup;
878 }
879 }
880
881 if (si->pdata->startup) {
853 err = si->pdata->startup(si->dev); 882 err = si->pdata->startup(si->dev);
854 if (err) 883 if (err)
855 goto err_startup; 884 goto err_startup;
885 }
886
887 if (gpio_is_valid(si->pdata->gpio_pwdown) && si->pdata->startup)
888 dev_warn(si->dev, "gpio_pwdown and startup() both defined!\n");
856 889
857 dev->netdev_ops = &pxa_irda_netdev_ops; 890 dev->netdev_ops = &pxa_irda_netdev_ops;
858 891
@@ -903,6 +936,8 @@ static int pxa_irda_remove(struct platform_device *_dev)
903 if (dev) { 936 if (dev) {
904 struct pxa_irda *si = netdev_priv(dev); 937 struct pxa_irda *si = netdev_priv(dev);
905 unregister_netdev(dev); 938 unregister_netdev(dev);
939 if (gpio_is_valid(si->pdata->gpio_pwdown))
940 gpio_free(si->pdata->gpio_pwdown);
906 if (si->pdata->shutdown) 941 if (si->pdata->shutdown)
907 si->pdata->shutdown(si->dev); 942 si->pdata->shutdown(si->dev);
908 kfree(si->tx_buff.head); 943 kfree(si->tx_buff.head);
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 38bf7cf2256d..c412e8026173 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -232,8 +232,11 @@ static int sa1100_irda_startup(struct sa1100_irda *si)
232 /* 232 /*
233 * Ensure that the ports for this device are setup correctly. 233 * Ensure that the ports for this device are setup correctly.
234 */ 234 */
235 if (si->pdata->startup) 235 if (si->pdata->startup) {
236 si->pdata->startup(si->dev); 236 ret = si->pdata->startup(si->dev);
237 if (ret)
238 return ret;
239 }
237 240
238 /* 241 /*
239 * Configure PPC for IRDA - we want to drive TXD2 low. 242 * Configure PPC for IRDA - we want to drive TXD2 low.
diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/net/irda/toim3232-sir.c
index fcf287b749db..99e1ec02a011 100644
--- a/drivers/net/irda/toim3232-sir.c
+++ b/drivers/net/irda/toim3232-sir.c
@@ -120,6 +120,7 @@
120#include <linux/module.h> 120#include <linux/module.h>
121#include <linux/delay.h> 121#include <linux/delay.h>
122#include <linux/init.h> 122#include <linux/init.h>
123#include <linux/sched.h>
123 124
124#include <net/irda/irda.h> 125#include <net/irda/irda.h>
125 126
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index e36e951cbc65..aa7286bc4364 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -495,7 +495,7 @@ static void veth_take_cap_ack(struct veth_lpar_connection *cnx,
495 cnx->remote_lp); 495 cnx->remote_lp);
496 } else { 496 } else {
497 memcpy(&cnx->cap_ack_event, event, 497 memcpy(&cnx->cap_ack_event, event,
498 sizeof(&cnx->cap_ack_event)); 498 sizeof(cnx->cap_ack_event));
499 cnx->state |= VETH_STATE_GOTCAPACK; 499 cnx->state |= VETH_STATE_GOTCAPACK;
500 veth_kick_statemachine(cnx); 500 veth_kick_statemachine(cnx);
501 } 501 }
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index dd688d45e9cd..385be6016667 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -267,7 +267,8 @@ struct ixgbe_adapter {
267 enum ixgbe_fc_mode last_lfc_mode; 267 enum ixgbe_fc_mode last_lfc_mode;
268 268
269 /* Interrupt Throttle Rate */ 269 /* Interrupt Throttle Rate */
270 u32 itr_setting; 270 u32 rx_itr_setting;
271 u32 tx_itr_setting;
271 u16 eitr_low; 272 u16 eitr_low;
272 u16 eitr_high; 273 u16 eitr_high;
273 274
@@ -351,7 +352,8 @@ struct ixgbe_adapter {
351 struct ixgbe_hw_stats stats; 352 struct ixgbe_hw_stats stats;
352 353
353 /* Interrupt Throttle Rate */ 354 /* Interrupt Throttle Rate */
354 u32 eitr_param; 355 u32 rx_eitr_param;
356 u32 tx_eitr_param;
355 357
356 unsigned long state; 358 unsigned long state;
357 u64 tx_busy; 359 u64 tx_busy;
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 56b12f3192f1..e2d5343f1275 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -425,7 +425,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
425#endif /* CONFIG_DCB */ 425#endif /* CONFIG_DCB */
426 default: 426 default:
427 hw_dbg(hw, "Flow control param set incorrectly\n"); 427 hw_dbg(hw, "Flow control param set incorrectly\n");
428 ret_val = -IXGBE_ERR_CONFIG; 428 ret_val = IXGBE_ERR_CONFIG;
429 goto out; 429 goto out;
430 break; 430 break;
431 } 431 }
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 2ec58dcdb82b..34b04924c8a1 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -330,6 +330,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
330 330
331 switch (hw->device_id) { 331 switch (hw->device_id) {
332 case IXGBE_DEV_ID_82599_KX4: 332 case IXGBE_DEV_ID_82599_KX4:
333 case IXGBE_DEV_ID_82599_KX4_MEZZ:
334 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
333 case IXGBE_DEV_ID_82599_XAUI_LOM: 335 case IXGBE_DEV_ID_82599_XAUI_LOM:
334 /* Default device ID is mezzanine card KX/KX4 */ 336 /* Default device ID is mezzanine card KX/KX4 */
335 media_type = ixgbe_media_type_backplane; 337 media_type = ixgbe_media_type_backplane;
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 6621e172df3d..40ff120a9ad4 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1355,9 +1355,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1355/** 1355/**
1356 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses 1356 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1357 * @hw: pointer to hardware structure 1357 * @hw: pointer to hardware structure
1358 * @addr_list: the list of new addresses 1358 * @uc_list: the list of new addresses
1359 * @addr_count: number of addresses
1360 * @next: iterator function to walk the address list
1361 * 1359 *
1362 * The given list replaces any existing list. Clears the secondary addrs from 1360 * The given list replaces any existing list. Clears the secondary addrs from
1363 * receive address registers. Uses unused receive address registers for the 1361 * receive address registers. Uses unused receive address registers for the
@@ -1663,7 +1661,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1663#endif /* CONFIG_DCB */ 1661#endif /* CONFIG_DCB */
1664 default: 1662 default:
1665 hw_dbg(hw, "Flow control param set incorrectly\n"); 1663 hw_dbg(hw, "Flow control param set incorrectly\n");
1666 ret_val = -IXGBE_ERR_CONFIG; 1664 ret_val = IXGBE_ERR_CONFIG;
1667 goto out; 1665 goto out;
1668 break; 1666 break;
1669 } 1667 }
@@ -1734,75 +1732,140 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1734 s32 ret_val = 0; 1732 s32 ret_val = 0;
1735 ixgbe_link_speed speed; 1733 ixgbe_link_speed speed;
1736 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 1734 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
1735 u32 links2, anlp1_reg, autoc_reg, links;
1737 bool link_up; 1736 bool link_up;
1738 1737
1739 /* 1738 /*
1740 * AN should have completed when the cable was plugged in. 1739 * AN should have completed when the cable was plugged in.
1741 * Look for reasons to bail out. Bail out if: 1740 * Look for reasons to bail out. Bail out if:
1742 * - FC autoneg is disabled, or if 1741 * - FC autoneg is disabled, or if
1743 * - we don't have multispeed fiber, or if 1742 * - link is not up.
1744 * - we're not running at 1G, or if
1745 * - link is not up, or if
1746 * - link is up but AN did not complete, or if
1747 * - link is up and AN completed but timed out
1748 * 1743 *
1749 * Since we're being called from an LSC, link is already know to be up. 1744 * Since we're being called from an LSC, link is already known to be up.
1750 * So use link_up_wait_to_complete=false. 1745 * So use link_up_wait_to_complete=false.
1751 */ 1746 */
1752 hw->mac.ops.check_link(hw, &speed, &link_up, false); 1747 hw->mac.ops.check_link(hw, &speed, &link_up, false);
1753 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 1748
1754 1749 if (hw->fc.disable_fc_autoneg || (!link_up)) {
1755 if (hw->fc.disable_fc_autoneg ||
1756 !hw->phy.multispeed_fiber ||
1757 (speed != IXGBE_LINK_SPEED_1GB_FULL) ||
1758 !link_up ||
1759 ((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1760 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1761 hw->fc.fc_was_autonegged = false; 1750 hw->fc.fc_was_autonegged = false;
1762 hw->fc.current_mode = hw->fc.requested_mode; 1751 hw->fc.current_mode = hw->fc.requested_mode;
1763 hw_dbg(hw, "Autoneg FC was skipped.\n");
1764 goto out; 1752 goto out;
1765 } 1753 }
1766 1754
1767 /* 1755 /*
1756 * On backplane, bail out if
1757 * - backplane autoneg was not completed, or if
1758 * - link partner is not AN enabled
1759 */
1760 if (hw->phy.media_type == ixgbe_media_type_backplane) {
1761 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
1762 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
1763 if (((links & IXGBE_LINKS_KX_AN_COMP) == 0) ||
1764 ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)) {
1765 hw->fc.fc_was_autonegged = false;
1766 hw->fc.current_mode = hw->fc.requested_mode;
1767 goto out;
1768 }
1769 }
1770
1771 /*
1772 * On multispeed fiber at 1g, bail out if
1773 * - link is up but AN did not complete, or if
1774 * - link is up and AN completed but timed out
1775 */
1776 if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) {
1777 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
1778 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1779 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1780 hw->fc.fc_was_autonegged = false;
1781 hw->fc.current_mode = hw->fc.requested_mode;
1782 goto out;
1783 }
1784 }
1785
1786 /*
1768 * Read the AN advertisement and LP ability registers and resolve 1787 * Read the AN advertisement and LP ability registers and resolve
1769 * local flow control settings accordingly 1788 * local flow control settings accordingly
1770 */ 1789 */
1771 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 1790 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1772 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 1791 (hw->phy.media_type != ixgbe_media_type_backplane)) {
1773 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) && 1792 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1774 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) { 1793 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1794 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1795 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
1796 /*
1797 * Now we need to check if the user selected Rx ONLY
1798 * of pause frames. In this case, we had to advertise
1799 * FULL flow control because we could not advertise RX
1800 * ONLY. Hence, we must now check to see if we need to
1801 * turn OFF the TRANSMISSION of PAUSE frames.
1802 */
1803 if (hw->fc.requested_mode == ixgbe_fc_full) {
1804 hw->fc.current_mode = ixgbe_fc_full;
1805 hw_dbg(hw, "Flow Control = FULL.\n");
1806 } else {
1807 hw->fc.current_mode = ixgbe_fc_rx_pause;
1808 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1809 }
1810 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1811 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1812 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1813 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1814 hw->fc.current_mode = ixgbe_fc_tx_pause;
1815 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1816 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1817 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1818 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1819 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1820 hw->fc.current_mode = ixgbe_fc_rx_pause;
1821 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1822 } else {
1823 hw->fc.current_mode = ixgbe_fc_none;
1824 hw_dbg(hw, "Flow Control = NONE.\n");
1825 }
1826 }
1827
1828 if (hw->phy.media_type == ixgbe_media_type_backplane) {
1775 /* 1829 /*
1776 * Now we need to check if the user selected Rx ONLY 1830 * Read the 10g AN autoc and LP ability registers and resolve
1777 * of pause frames. In this case, we had to advertise 1831 * local flow control settings accordingly
1778 * FULL flow control because we could not advertise RX
1779 * ONLY. Hence, we must now check to see if we need to
1780 * turn OFF the TRANSMISSION of PAUSE frames.
1781 */ 1832 */
1782 if (hw->fc.requested_mode == ixgbe_fc_full) { 1833 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1783 hw->fc.current_mode = ixgbe_fc_full; 1834 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
1784 hw_dbg(hw, "Flow Control = FULL.\n"); 1835
1785 } else { 1836 if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1837 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
1838 /*
1839 * Now we need to check if the user selected Rx ONLY
1840 * of pause frames. In this case, we had to advertise
1841 * FULL flow control because we could not advertise RX
1842 * ONLY. Hence, we must now check to see if we need to
1843 * turn OFF the TRANSMISSION of PAUSE frames.
1844 */
1845 if (hw->fc.requested_mode == ixgbe_fc_full) {
1846 hw->fc.current_mode = ixgbe_fc_full;
1847 hw_dbg(hw, "Flow Control = FULL.\n");
1848 } else {
1849 hw->fc.current_mode = ixgbe_fc_rx_pause;
1850 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1851 }
1852 } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1853 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1854 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1855 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1856 hw->fc.current_mode = ixgbe_fc_tx_pause;
1857 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1858 } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1859 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1860 !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1861 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1786 hw->fc.current_mode = ixgbe_fc_rx_pause; 1862 hw->fc.current_mode = ixgbe_fc_rx_pause;
1787 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); 1863 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1864 } else {
1865 hw->fc.current_mode = ixgbe_fc_none;
1866 hw_dbg(hw, "Flow Control = NONE.\n");
1788 } 1867 }
1789 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1790 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1791 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1792 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1793 hw->fc.current_mode = ixgbe_fc_tx_pause;
1794 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1795 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1796 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1797 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1798 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1799 hw->fc.current_mode = ixgbe_fc_rx_pause;
1800 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1801 } else {
1802 hw->fc.current_mode = ixgbe_fc_none;
1803 hw_dbg(hw, "Flow Control = NONE.\n");
1804 } 1868 }
1805
1806 /* Record that current_mode is the result of a successful autoneg */ 1869 /* Record that current_mode is the result of a successful autoneg */
1807 hw->fc.fc_was_autonegged = true; 1870 hw->fc.fc_was_autonegged = true;
1808 1871
@@ -1919,7 +1982,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1919#endif /* CONFIG_DCB */ 1982#endif /* CONFIG_DCB */
1920 default: 1983 default:
1921 hw_dbg(hw, "Flow control param set incorrectly\n"); 1984 hw_dbg(hw, "Flow control param set incorrectly\n");
1922 ret_val = -IXGBE_ERR_CONFIG; 1985 ret_val = IXGBE_ERR_CONFIG;
1923 goto out; 1986 goto out;
1924 break; 1987 break;
1925 } 1988 }
@@ -1927,9 +1990,6 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1927 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 1990 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
1928 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 1991 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
1929 1992
1930 /* Enable and restart autoneg to inform the link partner */
1931 reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
1932
1933 /* Disable AN timeout */ 1993 /* Disable AN timeout */
1934 if (hw->fc.strict_ieee) 1994 if (hw->fc.strict_ieee)
1935 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 1995 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
@@ -1937,6 +1997,70 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1937 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 1997 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
1938 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); 1998 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
1939 1999
2000 /*
2001 * Set up the 10G flow control advertisement registers so the HW
2002 * can do fc autoneg once the cable is plugged in. If we end up
2003 * using 1g instead, this is harmless.
2004 */
2005 reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2006
2007 /*
2008 * The possible values of fc.requested_mode are:
2009 * 0: Flow control is completely disabled
2010 * 1: Rx flow control is enabled (we can receive pause frames,
2011 * but not send pause frames).
2012 * 2: Tx flow control is enabled (we can send pause frames but
2013 * we do not support receiving pause frames).
2014 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2015 * other: Invalid.
2016 */
2017 switch (hw->fc.requested_mode) {
2018 case ixgbe_fc_none:
2019 /* Flow control completely disabled by software override. */
2020 reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2021 break;
2022 case ixgbe_fc_rx_pause:
2023 /*
2024 * Rx Flow control is enabled and Tx Flow control is
2025 * disabled by software override. Since there really
2026 * isn't a way to advertise that we are capable of RX
2027 * Pause ONLY, we will advertise that we support both
2028 * symmetric and asymmetric Rx PAUSE. Later, we will
2029 * disable the adapter's ability to send PAUSE frames.
2030 */
2031 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2032 break;
2033 case ixgbe_fc_tx_pause:
2034 /*
2035 * Tx Flow control is enabled, and Rx Flow control is
2036 * disabled by software override.
2037 */
2038 reg |= (IXGBE_AUTOC_ASM_PAUSE);
2039 reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
2040 break;
2041 case ixgbe_fc_full:
2042 /* Flow control (both Rx and Tx) is enabled by SW override. */
2043 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2044 break;
2045#ifdef CONFIG_DCB
2046 case ixgbe_fc_pfc:
2047 goto out;
2048 break;
2049#endif /* CONFIG_DCB */
2050 default:
2051 hw_dbg(hw, "Flow control param set incorrectly\n");
2052 ret_val = IXGBE_ERR_CONFIG;
2053 goto out;
2054 break;
2055 }
2056 /*
2057 * AUTOC restart handles negotiation of 1G and 10G. There is
2058 * no need to set the PCS1GCTL register.
2059 */
2060 reg |= IXGBE_AUTOC_AN_RESTART;
2061 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
2062 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2063
1940out: 2064out:
1941 return ret_val; 2065 return ret_val;
1942} 2066}
@@ -2000,7 +2124,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2000 2124
2001 while (timeout) { 2125 while (timeout) {
2002 if (ixgbe_get_eeprom_semaphore(hw)) 2126 if (ixgbe_get_eeprom_semaphore(hw))
2003 return -IXGBE_ERR_SWFW_SYNC; 2127 return IXGBE_ERR_SWFW_SYNC;
2004 2128
2005 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2129 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2006 if (!(gssr & (fwmask | swmask))) 2130 if (!(gssr & (fwmask | swmask)))
@@ -2017,7 +2141,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2017 2141
2018 if (!timeout) { 2142 if (!timeout) {
2019 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n"); 2143 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
2020 return -IXGBE_ERR_SWFW_SYNC; 2144 return IXGBE_ERR_SWFW_SYNC;
2021 } 2145 }
2022 2146
2023 gssr |= swmask; 2147 gssr |= swmask;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 026e94a99849..856c18c207f3 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -53,6 +53,10 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
53 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)}, 53 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
54 {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)}, 54 {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
55 {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)}, 55 {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
56 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
57 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
58 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
59 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
56 {"lsc_int", IXGBE_STAT(lsc_int)}, 60 {"lsc_int", IXGBE_STAT(lsc_int)},
57 {"tx_busy", IXGBE_STAT(tx_busy)}, 61 {"tx_busy", IXGBE_STAT(tx_busy)},
58 {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, 62 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
@@ -794,7 +798,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
794{ 798{
795 struct ixgbe_adapter *adapter = netdev_priv(netdev); 799 struct ixgbe_adapter *adapter = netdev_priv(netdev);
796 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; 800 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
797 int i, err; 801 int i, err = 0;
798 u32 new_rx_count, new_tx_count; 802 u32 new_rx_count, new_tx_count;
799 bool need_update = false; 803 bool need_update = false;
800 804
@@ -818,6 +822,16 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
818 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 822 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
819 msleep(1); 823 msleep(1);
820 824
825 if (!netif_running(adapter->netdev)) {
826 for (i = 0; i < adapter->num_tx_queues; i++)
827 adapter->tx_ring[i].count = new_tx_count;
828 for (i = 0; i < adapter->num_rx_queues; i++)
829 adapter->rx_ring[i].count = new_rx_count;
830 adapter->tx_ring_count = new_tx_count;
831 adapter->rx_ring_count = new_rx_count;
832 goto err_setup;
833 }
834
821 temp_tx_ring = kcalloc(adapter->num_tx_queues, 835 temp_tx_ring = kcalloc(adapter->num_tx_queues,
822 sizeof(struct ixgbe_ring), GFP_KERNEL); 836 sizeof(struct ixgbe_ring), GFP_KERNEL);
823 if (!temp_tx_ring) { 837 if (!temp_tx_ring) {
@@ -875,8 +889,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
875 889
876 /* if rings need to be updated, here's the place to do it in one shot */ 890 /* if rings need to be updated, here's the place to do it in one shot */
877 if (need_update) { 891 if (need_update) {
878 if (netif_running(netdev)) 892 ixgbe_down(adapter);
879 ixgbe_down(adapter);
880 893
881 /* tx */ 894 /* tx */
882 if (new_tx_count != adapter->tx_ring_count) { 895 if (new_tx_count != adapter->tx_ring_count) {
@@ -893,13 +906,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
893 temp_rx_ring = NULL; 906 temp_rx_ring = NULL;
894 adapter->rx_ring_count = new_rx_count; 907 adapter->rx_ring_count = new_rx_count;
895 } 908 }
896 }
897
898 /* success! */
899 err = 0;
900 if (netif_running(netdev))
901 ixgbe_up(adapter); 909 ixgbe_up(adapter);
902 910 }
903err_setup: 911err_setup:
904 clear_bit(__IXGBE_RESETTING, &adapter->state); 912 clear_bit(__IXGBE_RESETTING, &adapter->state);
905 return err; 913 return err;
@@ -1929,7 +1937,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
1929 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit; 1937 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
1930 1938
1931 /* only valid if in constant ITR mode */ 1939 /* only valid if in constant ITR mode */
1932 switch (adapter->itr_setting) { 1940 switch (adapter->rx_itr_setting) {
1933 case 0: 1941 case 0:
1934 /* throttling disabled */ 1942 /* throttling disabled */
1935 ec->rx_coalesce_usecs = 0; 1943 ec->rx_coalesce_usecs = 0;
@@ -1940,9 +1948,25 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
1940 break; 1948 break;
1941 default: 1949 default:
1942 /* fixed interrupt rate mode */ 1950 /* fixed interrupt rate mode */
1943 ec->rx_coalesce_usecs = 1000000/adapter->eitr_param; 1951 ec->rx_coalesce_usecs = 1000000/adapter->rx_eitr_param;
1952 break;
1953 }
1954
1955 /* only valid if in constant ITR mode */
1956 switch (adapter->tx_itr_setting) {
1957 case 0:
1958 /* throttling disabled */
1959 ec->tx_coalesce_usecs = 0;
1960 break;
1961 case 1:
1962 /* dynamic ITR mode */
1963 ec->tx_coalesce_usecs = 1;
1964 break;
1965 default:
1966 ec->tx_coalesce_usecs = 1000000/adapter->tx_eitr_param;
1944 break; 1967 break;
1945 } 1968 }
1969
1946 return 0; 1970 return 0;
1947} 1971}
1948 1972
@@ -1953,6 +1977,14 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
1953 struct ixgbe_q_vector *q_vector; 1977 struct ixgbe_q_vector *q_vector;
1954 int i; 1978 int i;
1955 1979
1980 /*
1981 * don't accept tx specific changes if we've got mixed RxTx vectors
1982 * test and jump out here if needed before changing the rx numbers
1983 */
1984 if ((1000000/ec->tx_coalesce_usecs) != adapter->tx_eitr_param &&
1985 adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count)
1986 return -EINVAL;
1987
1956 if (ec->tx_max_coalesced_frames_irq) 1988 if (ec->tx_max_coalesced_frames_irq)
1957 adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq; 1989 adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
1958 1990
@@ -1963,26 +1995,49 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
1963 return -EINVAL; 1995 return -EINVAL;
1964 1996
1965 /* store the value in ints/second */ 1997 /* store the value in ints/second */
1966 adapter->eitr_param = 1000000/ec->rx_coalesce_usecs; 1998 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
1967 1999
1968 /* static value of interrupt rate */ 2000 /* static value of interrupt rate */
1969 adapter->itr_setting = adapter->eitr_param; 2001 adapter->rx_itr_setting = adapter->rx_eitr_param;
1970 /* clear the lower bit as its used for dynamic state */ 2002 /* clear the lower bit as its used for dynamic state */
1971 adapter->itr_setting &= ~1; 2003 adapter->rx_itr_setting &= ~1;
1972 } else if (ec->rx_coalesce_usecs == 1) { 2004 } else if (ec->rx_coalesce_usecs == 1) {
1973 /* 1 means dynamic mode */ 2005 /* 1 means dynamic mode */
1974 adapter->eitr_param = 20000; 2006 adapter->rx_eitr_param = 20000;
1975 adapter->itr_setting = 1; 2007 adapter->rx_itr_setting = 1;
1976 } else { 2008 } else {
1977 /* 2009 /*
1978 * any other value means disable eitr, which is best 2010 * any other value means disable eitr, which is best
1979 * served by setting the interrupt rate very high 2011 * served by setting the interrupt rate very high
1980 */ 2012 */
1981 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 2013 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
1982 adapter->eitr_param = IXGBE_MAX_RSC_INT_RATE; 2014 adapter->rx_eitr_param = IXGBE_MAX_RSC_INT_RATE;
1983 else 2015 else
1984 adapter->eitr_param = IXGBE_MAX_INT_RATE; 2016 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
1985 adapter->itr_setting = 0; 2017 adapter->rx_itr_setting = 0;
2018 }
2019
2020 if (ec->tx_coalesce_usecs > 1) {
2021 /* check the limits */
2022 if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
2023 (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
2024 return -EINVAL;
2025
2026 /* store the value in ints/second */
2027 adapter->tx_eitr_param = 1000000/ec->tx_coalesce_usecs;
2028
2029 /* static value of interrupt rate */
2030 adapter->tx_itr_setting = adapter->tx_eitr_param;
2031
2032 /* clear the lower bit as its used for dynamic state */
2033 adapter->tx_itr_setting &= ~1;
2034 } else if (ec->tx_coalesce_usecs == 1) {
2035 /* 1 means dynamic mode */
2036 adapter->tx_eitr_param = 10000;
2037 adapter->tx_itr_setting = 1;
2038 } else {
2039 adapter->tx_eitr_param = IXGBE_MAX_INT_RATE;
2040 adapter->tx_itr_setting = 0;
1986 } 2041 }
1987 2042
1988 /* MSI/MSIx Interrupt Mode */ 2043 /* MSI/MSIx Interrupt Mode */
@@ -1992,17 +2047,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
1992 for (i = 0; i < num_vectors; i++) { 2047 for (i = 0; i < num_vectors; i++) {
1993 q_vector = adapter->q_vector[i]; 2048 q_vector = adapter->q_vector[i];
1994 if (q_vector->txr_count && !q_vector->rxr_count) 2049 if (q_vector->txr_count && !q_vector->rxr_count)
1995 /* tx vector gets half the rate */ 2050 /* tx only */
1996 q_vector->eitr = (adapter->eitr_param >> 1); 2051 q_vector->eitr = adapter->tx_eitr_param;
1997 else 2052 else
1998 /* rx only or mixed */ 2053 /* rx only or mixed */
1999 q_vector->eitr = adapter->eitr_param; 2054 q_vector->eitr = adapter->rx_eitr_param;
2000 ixgbe_write_eitr(q_vector); 2055 ixgbe_write_eitr(q_vector);
2001 } 2056 }
2002 /* Legacy Interrupt Mode */ 2057 /* Legacy Interrupt Mode */
2003 } else { 2058 } else {
2004 q_vector = adapter->q_vector[0]; 2059 q_vector = adapter->q_vector[0];
2005 q_vector->eitr = adapter->eitr_param; 2060 q_vector->eitr = adapter->rx_eitr_param;
2006 ixgbe_write_eitr(q_vector); 2061 ixgbe_write_eitr(q_vector);
2007 } 2062 }
2008 2063
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 59ad9590e700..cbb143ca1eb8 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -49,7 +49,7 @@ char ixgbe_driver_name[] = "ixgbe";
49static const char ixgbe_driver_string[] = 49static const char ixgbe_driver_string[] =
50 "Intel(R) 10 Gigabit PCI Express Network Driver"; 50 "Intel(R) 10 Gigabit PCI Express Network Driver";
51 51
52#define DRV_VERSION "2.0.37-k2" 52#define DRV_VERSION "2.0.44-k2"
53const char ixgbe_driver_version[] = DRV_VERSION; 53const char ixgbe_driver_version[] = DRV_VERSION;
54static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation."; 54static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
55 55
@@ -97,8 +97,12 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
97 board_82599 }, 97 board_82599 },
98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), 98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
99 board_82599 }, 99 board_82599 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
101 board_82599 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
101 board_82599 }, 103 board_82599 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
105 board_82599 },
102 106
103 /* required last entry */ 107 /* required last entry */
104 {0, } 108 {0, }
@@ -926,12 +930,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
926 r_idx + 1); 930 r_idx + 1);
927 } 931 }
928 932
929 /* if this is a tx only vector halve the interrupt rate */
930 if (q_vector->txr_count && !q_vector->rxr_count) 933 if (q_vector->txr_count && !q_vector->rxr_count)
931 q_vector->eitr = (adapter->eitr_param >> 1); 934 /* tx only */
935 q_vector->eitr = adapter->tx_eitr_param;
932 else if (q_vector->rxr_count) 936 else if (q_vector->rxr_count)
933 /* rx only */ 937 /* rx or mixed */
934 q_vector->eitr = adapter->eitr_param; 938 q_vector->eitr = adapter->rx_eitr_param;
935 939
936 ixgbe_write_eitr(q_vector); 940 ixgbe_write_eitr(q_vector);
937 } 941 }
@@ -1359,7 +1363,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1359 /* If all Rx work done, exit the polling mode */ 1363 /* If all Rx work done, exit the polling mode */
1360 if (work_done < budget) { 1364 if (work_done < budget) {
1361 napi_complete(napi); 1365 napi_complete(napi);
1362 if (adapter->itr_setting & 1) 1366 if (adapter->rx_itr_setting & 1)
1363 ixgbe_set_itr_msix(q_vector); 1367 ixgbe_set_itr_msix(q_vector);
1364 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1368 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1365 ixgbe_irq_enable_queues(adapter, 1369 ixgbe_irq_enable_queues(adapter,
@@ -1420,7 +1424,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1420 /* If all Rx work done, exit the polling mode */ 1424 /* If all Rx work done, exit the polling mode */
1421 if (work_done < budget) { 1425 if (work_done < budget) {
1422 napi_complete(napi); 1426 napi_complete(napi);
1423 if (adapter->itr_setting & 1) 1427 if (adapter->rx_itr_setting & 1)
1424 ixgbe_set_itr_msix(q_vector); 1428 ixgbe_set_itr_msix(q_vector);
1425 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1429 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1426 ixgbe_irq_enable_queues(adapter, 1430 ixgbe_irq_enable_queues(adapter,
@@ -1458,10 +1462,10 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1458 if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) 1462 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
1459 work_done = budget; 1463 work_done = budget;
1460 1464
1461 /* If all Rx work done, exit the polling mode */ 1465 /* If all Tx work done, exit the polling mode */
1462 if (work_done < budget) { 1466 if (work_done < budget) {
1463 napi_complete(napi); 1467 napi_complete(napi);
1464 if (adapter->itr_setting & 1) 1468 if (adapter->tx_itr_setting & 1)
1465 ixgbe_set_itr_msix(q_vector); 1469 ixgbe_set_itr_msix(q_vector);
1466 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1470 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1467 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); 1471 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
@@ -1848,7 +1852,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1848 struct ixgbe_hw *hw = &adapter->hw; 1852 struct ixgbe_hw *hw = &adapter->hw;
1849 1853
1850 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), 1854 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
1851 EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param)); 1855 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
1852 1856
1853 ixgbe_set_ivar(adapter, 0, 0, 0); 1857 ixgbe_set_ivar(adapter, 0, 0, 0);
1854 ixgbe_set_ivar(adapter, 1, 0, 0); 1858 ixgbe_set_ivar(adapter, 1, 0, 0);
@@ -1885,12 +1889,29 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1885 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 1889 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1886 adapter->tx_ring[i].head = IXGBE_TDH(j); 1890 adapter->tx_ring[i].head = IXGBE_TDH(j);
1887 adapter->tx_ring[i].tail = IXGBE_TDT(j); 1891 adapter->tx_ring[i].tail = IXGBE_TDT(j);
1888 /* Disable Tx Head Writeback RO bit, since this hoses 1892 /*
1893 * Disable Tx Head Writeback RO bit, since this hoses
1889 * bookkeeping if things aren't delivered in order. 1894 * bookkeeping if things aren't delivered in order.
1890 */ 1895 */
1891 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 1896 switch (hw->mac.type) {
1897 case ixgbe_mac_82598EB:
1898 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
1899 break;
1900 case ixgbe_mac_82599EB:
1901 default:
1902 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
1903 break;
1904 }
1892 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1905 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1893 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 1906 switch (hw->mac.type) {
1907 case ixgbe_mac_82598EB:
1908 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
1909 break;
1910 case ixgbe_mac_82599EB:
1911 default:
1912 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
1913 break;
1914 }
1894 } 1915 }
1895 if (hw->mac.type == ixgbe_mac_82599EB) { 1916 if (hw->mac.type == ixgbe_mac_82599EB) {
1896 /* We enable 8 traffic classes, DCB only */ 1917 /* We enable 8 traffic classes, DCB only */
@@ -1970,6 +1991,50 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
1970} 1991}
1971 1992
1972/** 1993/**
1994 * ixgbe_configure_rscctl - enable RSC for the indicated ring
1995 * @adapter: address of board private structure
1996 * @index: index of ring to set
1997 * @rx_buf_len: rx buffer length
1998 **/
1999static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index,
2000 int rx_buf_len)
2001{
2002 struct ixgbe_ring *rx_ring;
2003 struct ixgbe_hw *hw = &adapter->hw;
2004 int j;
2005 u32 rscctrl;
2006
2007 rx_ring = &adapter->rx_ring[index];
2008 j = rx_ring->reg_idx;
2009 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
2010 rscctrl |= IXGBE_RSCCTL_RSCEN;
2011 /*
2012 * we must limit the number of descriptors so that the
2013 * total size of max desc * buf_len is not greater
2014 * than 65535
2015 */
2016 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
2017#if (MAX_SKB_FRAGS > 16)
2018 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2019#elif (MAX_SKB_FRAGS > 8)
2020 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2021#elif (MAX_SKB_FRAGS > 4)
2022 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2023#else
2024 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
2025#endif
2026 } else {
2027 if (rx_buf_len < IXGBE_RXBUFFER_4096)
2028 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2029 else if (rx_buf_len < IXGBE_RXBUFFER_8192)
2030 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2031 else
2032 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2033 }
2034 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
2035}
2036
2037/**
1973 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset 2038 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
1974 * @adapter: board private structure 2039 * @adapter: board private structure
1975 * 2040 *
@@ -1990,7 +2055,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1990 u32 fctrl, hlreg0; 2055 u32 fctrl, hlreg0;
1991 u32 reta = 0, mrqc = 0; 2056 u32 reta = 0, mrqc = 0;
1992 u32 rdrxctl; 2057 u32 rdrxctl;
1993 u32 rscctrl;
1994 int rx_buf_len; 2058 int rx_buf_len;
1995 2059
1996 /* Decide whether to use packet split mode or not */ 2060 /* Decide whether to use packet split mode or not */
@@ -2148,36 +2212,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2148 2212
2149 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 2213 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2150 /* Enable 82599 HW-RSC */ 2214 /* Enable 82599 HW-RSC */
2151 for (i = 0; i < adapter->num_rx_queues; i++) { 2215 for (i = 0; i < adapter->num_rx_queues; i++)
2152 rx_ring = &adapter->rx_ring[i]; 2216 ixgbe_configure_rscctl(adapter, i, rx_buf_len);
2153 j = rx_ring->reg_idx; 2217
2154 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
2155 rscctrl |= IXGBE_RSCCTL_RSCEN;
2156 /*
2157 * we must limit the number of descriptors so that the
2158 * total size of max desc * buf_len is not greater
2159 * than 65535
2160 */
2161 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
2162#if (MAX_SKB_FRAGS > 16)
2163 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2164#elif (MAX_SKB_FRAGS > 8)
2165 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2166#elif (MAX_SKB_FRAGS > 4)
2167 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2168#else
2169 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
2170#endif
2171 } else {
2172 if (rx_buf_len < IXGBE_RXBUFFER_4096)
2173 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2174 else if (rx_buf_len < IXGBE_RXBUFFER_8192)
2175 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2176 else
2177 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2178 }
2179 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
2180 }
2181 /* Disable RSC for ACK packets */ 2218 /* Disable RSC for ACK packets */
2182 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, 2219 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
2183 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); 2220 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
@@ -2926,6 +2963,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
2926 2963
2927 ixgbe_napi_disable_all(adapter); 2964 ixgbe_napi_disable_all(adapter);
2928 2965
2966 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
2967 del_timer_sync(&adapter->sfp_timer);
2929 del_timer_sync(&adapter->watchdog_timer); 2968 del_timer_sync(&adapter->watchdog_timer);
2930 cancel_work_sync(&adapter->watchdog_task); 2969 cancel_work_sync(&adapter->watchdog_task);
2931 2970
@@ -2989,7 +3028,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
2989 /* If budget not fully consumed, exit the polling mode */ 3028 /* If budget not fully consumed, exit the polling mode */
2990 if (work_done < budget) { 3029 if (work_done < budget) {
2991 napi_complete(napi); 3030 napi_complete(napi);
2992 if (adapter->itr_setting & 1) 3031 if (adapter->rx_itr_setting & 1)
2993 ixgbe_set_itr(adapter); 3032 ixgbe_set_itr(adapter);
2994 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 3033 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2995 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE); 3034 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
@@ -3599,7 +3638,10 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
3599 if (!q_vector) 3638 if (!q_vector)
3600 goto err_out; 3639 goto err_out;
3601 q_vector->adapter = adapter; 3640 q_vector->adapter = adapter;
3602 q_vector->eitr = adapter->eitr_param; 3641 if (q_vector->txr_count && !q_vector->rxr_count)
3642 q_vector->eitr = adapter->tx_eitr_param;
3643 else
3644 q_vector->eitr = adapter->rx_eitr_param;
3603 q_vector->v_idx = q_idx; 3645 q_vector->v_idx = q_idx;
3604 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64); 3646 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
3605 adapter->q_vector[q_idx] = q_vector; 3647 adapter->q_vector[q_idx] = q_vector;
@@ -3868,8 +3910,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
3868 hw->fc.disable_fc_autoneg = false; 3910 hw->fc.disable_fc_autoneg = false;
3869 3911
3870 /* enable itr by default in dynamic mode */ 3912 /* enable itr by default in dynamic mode */
3871 adapter->itr_setting = 1; 3913 adapter->rx_itr_setting = 1;
3872 adapter->eitr_param = 20000; 3914 adapter->rx_eitr_param = 20000;
3915 adapter->tx_itr_setting = 1;
3916 adapter->tx_eitr_param = 10000;
3873 3917
3874 /* set defaults for eitr in MegaBytes */ 3918 /* set defaults for eitr in MegaBytes */
3875 adapter->eitr_low = 10; 3919 adapter->eitr_low = 10;
@@ -4409,10 +4453,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4409 4453
4410 /* 82598 hardware only has a 32 bit counter in the high register */ 4454 /* 82598 hardware only has a 32 bit counter in the high register */
4411 if (hw->mac.type == ixgbe_mac_82599EB) { 4455 if (hw->mac.type == ixgbe_mac_82599EB) {
4456 u64 tmp;
4412 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 4457 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
4413 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ 4458 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
4459 adapter->stats.gorc += (tmp << 32);
4414 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 4460 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
4415 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ 4461 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
4462 adapter->stats.gotc += (tmp << 32);
4416 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL); 4463 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
4417 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 4464 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
4418 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 4465 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
@@ -5048,7 +5095,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5048 /* Right now, we support IPv4 only */ 5095 /* Right now, we support IPv4 only */
5049 struct ixgbe_atr_input atr_input; 5096 struct ixgbe_atr_input atr_input;
5050 struct tcphdr *th; 5097 struct tcphdr *th;
5051 struct udphdr *uh;
5052 struct iphdr *iph = ip_hdr(skb); 5098 struct iphdr *iph = ip_hdr(skb);
5053 struct ethhdr *eth = (struct ethhdr *)skb->data; 5099 struct ethhdr *eth = (struct ethhdr *)skb->data;
5054 u16 vlan_id, src_port, dst_port, flex_bytes; 5100 u16 vlan_id, src_port, dst_port, flex_bytes;
@@ -5062,12 +5108,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5062 dst_port = th->dest; 5108 dst_port = th->dest;
5063 l4type |= IXGBE_ATR_L4TYPE_TCP; 5109 l4type |= IXGBE_ATR_L4TYPE_TCP;
5064 /* l4type IPv4 type is 0, no need to assign */ 5110 /* l4type IPv4 type is 0, no need to assign */
5065 } else if(iph->protocol == IPPROTO_UDP) {
5066 uh = udp_hdr(skb);
5067 src_port = uh->source;
5068 dst_port = uh->dest;
5069 l4type |= IXGBE_ATR_L4TYPE_UDP;
5070 /* l4type IPv4 type is 0, no need to assign */
5071 } else { 5111 } else {
5072 /* Unsupported L4 header, just bail here */ 5112 /* Unsupported L4 header, just bail here */
5073 return; 5113 return;
@@ -5471,12 +5511,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5471 goto err_pci_reg; 5511 goto err_pci_reg;
5472 } 5512 }
5473 5513
5474 err = pci_enable_pcie_error_reporting(pdev); 5514 pci_enable_pcie_error_reporting(pdev);
5475 if (err) {
5476 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
5477 "0x%x\n", err);
5478 /* non-fatal, continue */
5479 }
5480 5515
5481 pci_set_master(pdev); 5516 pci_set_master(pdev);
5482 pci_save_state(pdev); 5517 pci_save_state(pdev);
@@ -5785,7 +5820,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
5785{ 5820{
5786 struct net_device *netdev = pci_get_drvdata(pdev); 5821 struct net_device *netdev = pci_get_drvdata(pdev);
5787 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5822 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5788 int err;
5789 5823
5790 set_bit(__IXGBE_DOWN, &adapter->state); 5824 set_bit(__IXGBE_DOWN, &adapter->state);
5791 /* clear the module not found bit to make sure the worker won't 5825 /* clear the module not found bit to make sure the worker won't
@@ -5836,10 +5870,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
5836 5870
5837 free_netdev(netdev); 5871 free_netdev(netdev);
5838 5872
5839 err = pci_disable_pcie_error_reporting(pdev); 5873 pci_disable_pcie_error_reporting(pdev);
5840 if (err)
5841 dev_err(&pdev->dev,
5842 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
5843 5874
5844 pci_disable_device(pdev); 5875 pci_disable_device(pdev);
5845} 5876}
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 8761d7899f7d..ef4bdd58e016 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -49,9 +49,11 @@
49#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 49#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
50#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 50#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
51#define IXGBE_DEV_ID_82599_KX4 0x10F7 51#define IXGBE_DEV_ID_82599_KX4 0x10F7
52#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
52#define IXGBE_DEV_ID_82599_CX4 0x10F9 53#define IXGBE_DEV_ID_82599_CX4 0x10F9
53#define IXGBE_DEV_ID_82599_SFP 0x10FB 54#define IXGBE_DEV_ID_82599_SFP 0x10FB
54#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC 55#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
56#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
55 57
56/* General Registers */ 58/* General Registers */
57#define IXGBE_CTRL 0x00000 59#define IXGBE_CTRL 0x00000
@@ -1336,6 +1338,8 @@
1336#define IXGBE_AUTOC_KX4_SUPP 0x80000000 1338#define IXGBE_AUTOC_KX4_SUPP 0x80000000
1337#define IXGBE_AUTOC_KX_SUPP 0x40000000 1339#define IXGBE_AUTOC_KX_SUPP 0x40000000
1338#define IXGBE_AUTOC_PAUSE 0x30000000 1340#define IXGBE_AUTOC_PAUSE 0x30000000
1341#define IXGBE_AUTOC_ASM_PAUSE 0x20000000
1342#define IXGBE_AUTOC_SYM_PAUSE 0x10000000
1339#define IXGBE_AUTOC_RF 0x08000000 1343#define IXGBE_AUTOC_RF 0x08000000
1340#define IXGBE_AUTOC_PD_TMR 0x06000000 1344#define IXGBE_AUTOC_PD_TMR 0x06000000
1341#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 1345#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
@@ -1404,6 +1408,8 @@
1404#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ 1408#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
1405#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ 1409#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
1406 1410
1411#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040
1412
1407/* PCS1GLSTA Bit Masks */ 1413/* PCS1GLSTA Bit Masks */
1408#define IXGBE_PCS1GLSTA_LINK_OK 1 1414#define IXGBE_PCS1GLSTA_LINK_OK 1
1409#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 1415#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
@@ -1424,6 +1430,11 @@
1424#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 1430#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
1425#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 1431#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
1426 1432
1433/* ANLP1 Bit Masks */
1434#define IXGBE_ANLP1_PAUSE 0x0C00
1435#define IXGBE_ANLP1_SYM_PAUSE 0x0400
1436#define IXGBE_ANLP1_ASM_PAUSE 0x0800
1437
1427/* SW Semaphore Register bitmasks */ 1438/* SW Semaphore Register bitmasks */
1428#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 1439#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
1429#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ 1440#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c
index b02a981c87a8..34a6cfd17930 100644
--- a/drivers/net/ixp2000/enp2611.c
+++ b/drivers/net/ixp2000/enp2611.c
@@ -119,24 +119,9 @@ static struct ixp2400_msf_parameters enp2611_msf_parameters =
119 } 119 }
120}; 120};
121 121
122struct enp2611_ixpdev_priv
123{
124 struct ixpdev_priv ixpdev_priv;
125 struct net_device_stats stats;
126};
127
128static struct net_device *nds[3]; 122static struct net_device *nds[3];
129static struct timer_list link_check_timer; 123static struct timer_list link_check_timer;
130 124
131static struct net_device_stats *enp2611_get_stats(struct net_device *dev)
132{
133 struct enp2611_ixpdev_priv *ip = netdev_priv(dev);
134
135 pm3386_get_stats(ip->ixpdev_priv.channel, &(ip->stats));
136
137 return &(ip->stats);
138}
139
140/* @@@ Poll the SFP moddef0 line too. */ 125/* @@@ Poll the SFP moddef0 line too. */
141/* @@@ Try to use the pm3386 DOOL interrupt as well. */ 126/* @@@ Try to use the pm3386 DOOL interrupt as well. */
142static void enp2611_check_link_status(unsigned long __dummy) 127static void enp2611_check_link_status(unsigned long __dummy)
@@ -203,14 +188,13 @@ static int __init enp2611_init_module(void)
203 188
204 ports = pm3386_port_count(); 189 ports = pm3386_port_count();
205 for (i = 0; i < ports; i++) { 190 for (i = 0; i < ports; i++) {
206 nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv)); 191 nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv));
207 if (nds[i] == NULL) { 192 if (nds[i] == NULL) {
208 while (--i >= 0) 193 while (--i >= 0)
209 free_netdev(nds[i]); 194 free_netdev(nds[i]);
210 return -ENOMEM; 195 return -ENOMEM;
211 } 196 }
212 197
213 nds[i]->get_stats = enp2611_get_stats;
214 pm3386_init_port(i); 198 pm3386_init_port(i);
215 pm3386_get_mac(i, nds[i]->dev_addr); 199 pm3386_get_mac(i, nds[i]->dev_addr);
216 } 200 }
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 127243461a51..9aee0cc922c9 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -21,6 +21,7 @@
21#include "ixp2400_tx.ucode" 21#include "ixp2400_tx.ucode"
22#include "ixpdev_priv.h" 22#include "ixpdev_priv.h"
23#include "ixpdev.h" 23#include "ixpdev.h"
24#include "pm3386.h"
24 25
25#define DRV_MODULE_VERSION "0.2" 26#define DRV_MODULE_VERSION "0.2"
26 27
@@ -271,6 +272,15 @@ static int ixpdev_close(struct net_device *dev)
271 return 0; 272 return 0;
272} 273}
273 274
275static struct net_device_stats *ixpdev_get_stats(struct net_device *dev)
276{
277 struct ixpdev_priv *ip = netdev_priv(dev);
278
279 pm3386_get_stats(ip->channel, &(dev->stats));
280
281 return &(dev->stats);
282}
283
274static const struct net_device_ops ixpdev_netdev_ops = { 284static const struct net_device_ops ixpdev_netdev_ops = {
275 .ndo_open = ixpdev_open, 285 .ndo_open = ixpdev_open,
276 .ndo_stop = ixpdev_close, 286 .ndo_stop = ixpdev_close,
@@ -278,6 +288,7 @@ static const struct net_device_ops ixpdev_netdev_ops = {
278 .ndo_change_mtu = eth_change_mtu, 288 .ndo_change_mtu = eth_change_mtu,
279 .ndo_validate_addr = eth_validate_addr, 289 .ndo_validate_addr = eth_validate_addr,
280 .ndo_set_mac_address = eth_mac_addr, 290 .ndo_set_mac_address = eth_mac_addr,
291 .ndo_get_stats = ixpdev_get_stats,
281#ifdef CONFIG_NET_POLL_CONTROLLER 292#ifdef CONFIG_NET_POLL_CONTROLLER
282 .ndo_poll_controller = ixpdev_poll_controller, 293 .ndo_poll_controller = ixpdev_poll_controller,
283#endif 294#endif
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 547ac7c7479c..a23f739d222f 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -171,6 +171,36 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
171} 171}
172 172
173/** 173/**
174 * ks8851_wrreg8 - write 8bit register value to chip
175 * @ks: The chip state
176 * @reg: The register address
177 * @val: The value to write
178 *
179 * Issue a write to put the value @val into the register specified in @reg.
180 */
181static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
182{
183 struct spi_transfer *xfer = &ks->spi_xfer1;
184 struct spi_message *msg = &ks->spi_msg1;
185 __le16 txb[2];
186 int ret;
187 int bit;
188
189 bit = 1 << (reg & 3);
190
191 txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR);
192 txb[1] = val;
193
194 xfer->tx_buf = txb;
195 xfer->rx_buf = NULL;
196 xfer->len = 3;
197
198 ret = spi_sync(ks->spidev, msg);
199 if (ret < 0)
200 ks_err(ks, "spi_sync() failed\n");
201}
202
203/**
174 * ks8851_rx_1msg - select whether to use one or two messages for spi read 204 * ks8851_rx_1msg - select whether to use one or two messages for spi read
175 * @ks: The device structure 205 * @ks: The device structure
176 * 206 *
@@ -322,13 +352,12 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
322static int ks8851_write_mac_addr(struct net_device *dev) 352static int ks8851_write_mac_addr(struct net_device *dev)
323{ 353{
324 struct ks8851_net *ks = netdev_priv(dev); 354 struct ks8851_net *ks = netdev_priv(dev);
325 u16 *mcp = (u16 *)dev->dev_addr; 355 int i;
326 356
327 mutex_lock(&ks->lock); 357 mutex_lock(&ks->lock);
328 358
329 ks8851_wrreg16(ks, KS_MARL, mcp[0]); 359 for (i = 0; i < ETH_ALEN; i++)
330 ks8851_wrreg16(ks, KS_MARM, mcp[1]); 360 ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
331 ks8851_wrreg16(ks, KS_MARH, mcp[2]);
332 361
333 mutex_unlock(&ks->lock); 362 mutex_unlock(&ks->lock);
334 363
@@ -951,7 +980,7 @@ static void ks8851_set_rx_mode(struct net_device *dev)
951 mcptr = mcptr->next; 980 mcptr = mcptr->next;
952 } 981 }
953 982
954 rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA; 983 rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA;
955 } else { 984 } else {
956 /* just accept broadcast / unicast */ 985 /* just accept broadcast / unicast */
957 rxctrl.rxcr1 = RXCR1_RXPAFMA; 986 rxctrl.rxcr1 = RXCR1_RXPAFMA;
@@ -1239,6 +1268,9 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1239 ndev->netdev_ops = &ks8851_netdev_ops; 1268 ndev->netdev_ops = &ks8851_netdev_ops;
1240 ndev->irq = spi->irq; 1269 ndev->irq = spi->irq;
1241 1270
1271 /* issue a global soft reset to reset the device. */
1272 ks8851_soft_reset(ks, GRR_GSR);
1273
1242 /* simple check for a valid chip being connected to the bus */ 1274 /* simple check for a valid chip being connected to the bus */
1243 1275
1244 if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { 1276 if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
@@ -1321,3 +1353,4 @@ MODULE_LICENSE("GPL");
1321 1353
1322module_param_named(message, msg_enable, int, 0); 1354module_param_named(message, msg_enable, int, 0);
1323MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)"); 1355MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1356MODULE_ALIAS("spi:ks8851");
diff --git a/drivers/net/ks8851.h b/drivers/net/ks8851.h
index 85abe147afbf..f52c312cc356 100644
--- a/drivers/net/ks8851.h
+++ b/drivers/net/ks8851.h
@@ -16,6 +16,7 @@
16#define CCR_32PIN (1 << 0) 16#define CCR_32PIN (1 << 0)
17 17
18/* MAC address registers */ 18/* MAC address registers */
19#define KS_MAR(_m) 0x15 - (_m)
19#define KS_MARL 0x10 20#define KS_MARL 0x10
20#define KS_MARM 0x12 21#define KS_MARM 0x12
21#define KS_MARH 0x14 22#define KS_MARH 0x14
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
new file mode 100644
index 000000000000..0be14d702beb
--- /dev/null
+++ b/drivers/net/ks8851_mll.c
@@ -0,0 +1,1697 @@
1/**
2 * drivers/net/ks8851_mll.c
3 * Copyright (c) 2009 Micrel Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/**
20 * Supports:
21 * KS8851 16bit MLL chip from Micrel Inc.
22 */
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/cache.h>
30#include <linux/crc32.h>
31#include <linux/mii.h>
32#include <linux/platform_device.h>
33#include <linux/delay.h>
34
35#define DRV_NAME "ks8851_mll"
36
37static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
38#define MAX_RECV_FRAMES 32
39#define MAX_BUF_SIZE 2048
40#define TX_BUF_SIZE 2000
41#define RX_BUF_SIZE 2000
42
43#define KS_CCR 0x08
44#define CCR_EEPROM (1 << 9)
45#define CCR_SPI (1 << 8)
46#define CCR_8BIT (1 << 7)
47#define CCR_16BIT (1 << 6)
48#define CCR_32BIT (1 << 5)
49#define CCR_SHARED (1 << 4)
50#define CCR_32PIN (1 << 0)
51
52/* MAC address registers */
53#define KS_MARL 0x10
54#define KS_MARM 0x12
55#define KS_MARH 0x14
56
57#define KS_OBCR 0x20
58#define OBCR_ODS_16MA (1 << 6)
59
60#define KS_EEPCR 0x22
61#define EEPCR_EESA (1 << 4)
62#define EEPCR_EESB (1 << 3)
63#define EEPCR_EEDO (1 << 2)
64#define EEPCR_EESCK (1 << 1)
65#define EEPCR_EECS (1 << 0)
66
67#define KS_MBIR 0x24
68#define MBIR_TXMBF (1 << 12)
69#define MBIR_TXMBFA (1 << 11)
70#define MBIR_RXMBF (1 << 4)
71#define MBIR_RXMBFA (1 << 3)
72
73#define KS_GRR 0x26
74#define GRR_QMU (1 << 1)
75#define GRR_GSR (1 << 0)
76
77#define KS_WFCR 0x2A
78#define WFCR_MPRXE (1 << 7)
79#define WFCR_WF3E (1 << 3)
80#define WFCR_WF2E (1 << 2)
81#define WFCR_WF1E (1 << 1)
82#define WFCR_WF0E (1 << 0)
83
84#define KS_WF0CRC0 0x30
85#define KS_WF0CRC1 0x32
86#define KS_WF0BM0 0x34
87#define KS_WF0BM1 0x36
88#define KS_WF0BM2 0x38
89#define KS_WF0BM3 0x3A
90
91#define KS_WF1CRC0 0x40
92#define KS_WF1CRC1 0x42
93#define KS_WF1BM0 0x44
94#define KS_WF1BM1 0x46
95#define KS_WF1BM2 0x48
96#define KS_WF1BM3 0x4A
97
98#define KS_WF2CRC0 0x50
99#define KS_WF2CRC1 0x52
100#define KS_WF2BM0 0x54
101#define KS_WF2BM1 0x56
102#define KS_WF2BM2 0x58
103#define KS_WF2BM3 0x5A
104
105#define KS_WF3CRC0 0x60
106#define KS_WF3CRC1 0x62
107#define KS_WF3BM0 0x64
108#define KS_WF3BM1 0x66
109#define KS_WF3BM2 0x68
110#define KS_WF3BM3 0x6A
111
112#define KS_TXCR 0x70
113#define TXCR_TCGICMP (1 << 8)
114#define TXCR_TCGUDP (1 << 7)
115#define TXCR_TCGTCP (1 << 6)
116#define TXCR_TCGIP (1 << 5)
117#define TXCR_FTXQ (1 << 4)
118#define TXCR_TXFCE (1 << 3)
119#define TXCR_TXPE (1 << 2)
120#define TXCR_TXCRC (1 << 1)
121#define TXCR_TXE (1 << 0)
122
123#define KS_TXSR 0x72
124#define TXSR_TXLC (1 << 13)
125#define TXSR_TXMC (1 << 12)
126#define TXSR_TXFID_MASK (0x3f << 0)
127#define TXSR_TXFID_SHIFT (0)
128#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
129
130
131#define KS_RXCR1 0x74
132#define RXCR1_FRXQ (1 << 15)
133#define RXCR1_RXUDPFCC (1 << 14)
134#define RXCR1_RXTCPFCC (1 << 13)
135#define RXCR1_RXIPFCC (1 << 12)
136#define RXCR1_RXPAFMA (1 << 11)
137#define RXCR1_RXFCE (1 << 10)
138#define RXCR1_RXEFE (1 << 9)
139#define RXCR1_RXMAFMA (1 << 8)
140#define RXCR1_RXBE (1 << 7)
141#define RXCR1_RXME (1 << 6)
142#define RXCR1_RXUE (1 << 5)
143#define RXCR1_RXAE (1 << 4)
144#define RXCR1_RXINVF (1 << 1)
145#define RXCR1_RXE (1 << 0)
146#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
147 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
148
149#define KS_RXCR2 0x76
150#define RXCR2_SRDBL_MASK (0x7 << 5)
151#define RXCR2_SRDBL_SHIFT (5)
152#define RXCR2_SRDBL_4B (0x0 << 5)
153#define RXCR2_SRDBL_8B (0x1 << 5)
154#define RXCR2_SRDBL_16B (0x2 << 5)
155#define RXCR2_SRDBL_32B (0x3 << 5)
156/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
157#define RXCR2_IUFFP (1 << 4)
158#define RXCR2_RXIUFCEZ (1 << 3)
159#define RXCR2_UDPLFE (1 << 2)
160#define RXCR2_RXICMPFCC (1 << 1)
161#define RXCR2_RXSAF (1 << 0)
162
163#define KS_TXMIR 0x78
164
165#define KS_RXFHSR 0x7C
166#define RXFSHR_RXFV (1 << 15)
167#define RXFSHR_RXICMPFCS (1 << 13)
168#define RXFSHR_RXIPFCS (1 << 12)
169#define RXFSHR_RXTCPFCS (1 << 11)
170#define RXFSHR_RXUDPFCS (1 << 10)
171#define RXFSHR_RXBF (1 << 7)
172#define RXFSHR_RXMF (1 << 6)
173#define RXFSHR_RXUF (1 << 5)
174#define RXFSHR_RXMR (1 << 4)
175#define RXFSHR_RXFT (1 << 3)
176#define RXFSHR_RXFTL (1 << 2)
177#define RXFSHR_RXRF (1 << 1)
178#define RXFSHR_RXCE (1 << 0)
179#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
180 RXFSHR_RXFTL | RXFSHR_RXMR |\
181 RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
182 RXFSHR_RXTCPFCS)
183#define KS_RXFHBCR 0x7E
184#define RXFHBCR_CNT_MASK 0x0FFF
185
186#define KS_TXQCR 0x80
187#define TXQCR_AETFE (1 << 2)
188#define TXQCR_TXQMAM (1 << 1)
189#define TXQCR_METFE (1 << 0)
190
191#define KS_RXQCR 0x82
192#define RXQCR_RXDTTS (1 << 12)
193#define RXQCR_RXDBCTS (1 << 11)
194#define RXQCR_RXFCTS (1 << 10)
195#define RXQCR_RXIPHTOE (1 << 9)
196#define RXQCR_RXDTTE (1 << 7)
197#define RXQCR_RXDBCTE (1 << 6)
198#define RXQCR_RXFCTE (1 << 5)
199#define RXQCR_ADRFE (1 << 4)
200#define RXQCR_SDA (1 << 3)
201#define RXQCR_RRXEF (1 << 0)
202#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
203
204#define KS_TXFDPR 0x84
205#define TXFDPR_TXFPAI (1 << 14)
206#define TXFDPR_TXFP_MASK (0x7ff << 0)
207#define TXFDPR_TXFP_SHIFT (0)
208
209#define KS_RXFDPR 0x86
210#define RXFDPR_RXFPAI (1 << 14)
211
212#define KS_RXDTTR 0x8C
213#define KS_RXDBCTR 0x8E
214
215#define KS_IER 0x90
216#define KS_ISR 0x92
217#define IRQ_LCI (1 << 15)
218#define IRQ_TXI (1 << 14)
219#define IRQ_RXI (1 << 13)
220#define IRQ_RXOI (1 << 11)
221#define IRQ_TXPSI (1 << 9)
222#define IRQ_RXPSI (1 << 8)
223#define IRQ_TXSAI (1 << 6)
224#define IRQ_RXWFDI (1 << 5)
225#define IRQ_RXMPDI (1 << 4)
226#define IRQ_LDI (1 << 3)
227#define IRQ_EDI (1 << 2)
228#define IRQ_SPIBEI (1 << 1)
229#define IRQ_DEDI (1 << 0)
230
231#define KS_RXFCTR 0x9C
232#define RXFCTR_THRESHOLD_MASK 0x00FF
233
234#define KS_RXFC 0x9D
235#define RXFCTR_RXFC_MASK (0xff << 8)
236#define RXFCTR_RXFC_SHIFT (8)
237#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
238#define RXFCTR_RXFCT_MASK (0xff << 0)
239#define RXFCTR_RXFCT_SHIFT (0)
240
241#define KS_TXNTFSR 0x9E
242
243#define KS_MAHTR0 0xA0
244#define KS_MAHTR1 0xA2
245#define KS_MAHTR2 0xA4
246#define KS_MAHTR3 0xA6
247
248#define KS_FCLWR 0xB0
249#define KS_FCHWR 0xB2
250#define KS_FCOWR 0xB4
251
252#define KS_CIDER 0xC0
253#define CIDER_ID 0x8870
254#define CIDER_REV_MASK (0x7 << 1)
255#define CIDER_REV_SHIFT (1)
256#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
257
258#define KS_CGCR 0xC6
259#define KS_IACR 0xC8
260#define IACR_RDEN (1 << 12)
261#define IACR_TSEL_MASK (0x3 << 10)
262#define IACR_TSEL_SHIFT (10)
263#define IACR_TSEL_MIB (0x3 << 10)
264#define IACR_ADDR_MASK (0x1f << 0)
265#define IACR_ADDR_SHIFT (0)
266
267#define KS_IADLR 0xD0
268#define KS_IAHDR 0xD2
269
270#define KS_PMECR 0xD4
271#define PMECR_PME_DELAY (1 << 14)
272#define PMECR_PME_POL (1 << 12)
273#define PMECR_WOL_WAKEUP (1 << 11)
274#define PMECR_WOL_MAGICPKT (1 << 10)
275#define PMECR_WOL_LINKUP (1 << 9)
276#define PMECR_WOL_ENERGY (1 << 8)
277#define PMECR_AUTO_WAKE_EN (1 << 7)
278#define PMECR_WAKEUP_NORMAL (1 << 6)
279#define PMECR_WKEVT_MASK (0xf << 2)
280#define PMECR_WKEVT_SHIFT (2)
281#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
282#define PMECR_WKEVT_ENERGY (0x1 << 2)
283#define PMECR_WKEVT_LINK (0x2 << 2)
284#define PMECR_WKEVT_MAGICPKT (0x4 << 2)
285#define PMECR_WKEVT_FRAME (0x8 << 2)
286#define PMECR_PM_MASK (0x3 << 0)
287#define PMECR_PM_SHIFT (0)
288#define PMECR_PM_NORMAL (0x0 << 0)
289#define PMECR_PM_ENERGY (0x1 << 0)
290#define PMECR_PM_SOFTDOWN (0x2 << 0)
291#define PMECR_PM_POWERSAVE (0x3 << 0)
292
293/* Standard MII PHY data */
294#define KS_P1MBCR 0xE4
295#define P1MBCR_FORCE_FDX (1 << 8)
296
297#define KS_P1MBSR 0xE6
298#define P1MBSR_AN_COMPLETE (1 << 5)
299#define P1MBSR_AN_CAPABLE (1 << 3)
300#define P1MBSR_LINK_UP (1 << 2)
301
302#define KS_PHY1ILR 0xE8
303#define KS_PHY1IHR 0xEA
304#define KS_P1ANAR 0xEC
305#define KS_P1ANLPR 0xEE
306
307#define KS_P1SCLMD 0xF4
308#define P1SCLMD_LEDOFF (1 << 15)
309#define P1SCLMD_TXIDS (1 << 14)
310#define P1SCLMD_RESTARTAN (1 << 13)
311#define P1SCLMD_DISAUTOMDIX (1 << 10)
312#define P1SCLMD_FORCEMDIX (1 << 9)
313#define P1SCLMD_AUTONEGEN (1 << 7)
314#define P1SCLMD_FORCE100 (1 << 6)
315#define P1SCLMD_FORCEFDX (1 << 5)
316#define P1SCLMD_ADV_FLOW (1 << 4)
317#define P1SCLMD_ADV_100BT_FDX (1 << 3)
318#define P1SCLMD_ADV_100BT_HDX (1 << 2)
319#define P1SCLMD_ADV_10BT_FDX (1 << 1)
320#define P1SCLMD_ADV_10BT_HDX (1 << 0)
321
322#define KS_P1CR 0xF6
323#define P1CR_HP_MDIX (1 << 15)
324#define P1CR_REV_POL (1 << 13)
325#define P1CR_OP_100M (1 << 10)
326#define P1CR_OP_FDX (1 << 9)
327#define P1CR_OP_MDI (1 << 7)
328#define P1CR_AN_DONE (1 << 6)
329#define P1CR_LINK_GOOD (1 << 5)
330#define P1CR_PNTR_FLOW (1 << 4)
331#define P1CR_PNTR_100BT_FDX (1 << 3)
332#define P1CR_PNTR_100BT_HDX (1 << 2)
333#define P1CR_PNTR_10BT_FDX (1 << 1)
334#define P1CR_PNTR_10BT_HDX (1 << 0)
335
336/* TX Frame control */
337
338#define TXFR_TXIC (1 << 15)
339#define TXFR_TXFID_MASK (0x3f << 0)
340#define TXFR_TXFID_SHIFT (0)
341
342#define KS_P1SR 0xF8
343#define P1SR_HP_MDIX (1 << 15)
344#define P1SR_REV_POL (1 << 13)
345#define P1SR_OP_100M (1 << 10)
346#define P1SR_OP_FDX (1 << 9)
347#define P1SR_OP_MDI (1 << 7)
348#define P1SR_AN_DONE (1 << 6)
349#define P1SR_LINK_GOOD (1 << 5)
350#define P1SR_PNTR_FLOW (1 << 4)
351#define P1SR_PNTR_100BT_FDX (1 << 3)
352#define P1SR_PNTR_100BT_HDX (1 << 2)
353#define P1SR_PNTR_10BT_FDX (1 << 1)
354#define P1SR_PNTR_10BT_HDX (1 << 0)
355
356#define ENUM_BUS_NONE 0
357#define ENUM_BUS_8BIT 1
358#define ENUM_BUS_16BIT 2
359#define ENUM_BUS_32BIT 3
360
361#define MAX_MCAST_LST 32
362#define HW_MCAST_SIZE 8
363#define MAC_ADDR_LEN 6
364
365/**
366 * union ks_tx_hdr - tx header data
367 * @txb: The header as bytes
368 * @txw: The header as 16bit, little-endian words
369 *
370 * A dual representation of the tx header data to allow
371 * access to individual bytes, and to allow 16bit accesses
372 * with 16bit alignment.
373 */
374union ks_tx_hdr {
375 u8 txb[4];
376 __le16 txw[2];
377};
378
379/**
380 * struct ks_net - KS8851 driver private data
381 * @net_device : The network device we're bound to
382 * @hw_addr : start address of data register.
383 * @hw_addr_cmd : start address of command register.
384 * @txh : temporaly buffer to save status/length.
385 * @lock : Lock to ensure that the device is not accessed when busy.
386 * @pdev : Pointer to platform device.
387 * @mii : The MII state information for the mii calls.
388 * @frame_head_info : frame header information for multi-pkt rx.
389 * @statelock : Lock on this structure for tx list.
390 * @msg_enable : The message flags controlling driver output (see ethtool).
391 * @frame_cnt : number of frames received.
392 * @bus_width : i/o bus width.
393 * @irq : irq number assigned to this device.
394 * @rc_rxqcr : Cached copy of KS_RXQCR.
395 * @rc_txcr : Cached copy of KS_TXCR.
396 * @rc_ier : Cached copy of KS_IER.
397 * @sharedbus : Multipex(addr and data bus) mode indicator.
398 * @cmd_reg_cache : command register cached.
399 * @cmd_reg_cache_int : command register cached. Used in the irq handler.
400 * @promiscuous : promiscuous mode indicator.
401 * @all_mcast : mutlicast indicator.
402 * @mcast_lst_size : size of multicast list.
403 * @mcast_lst : multicast list.
404 * @mcast_bits : multicast enabed.
405 * @mac_addr : MAC address assigned to this device.
406 * @fid : frame id.
407 * @extra_byte : number of extra byte prepended rx pkt.
408 * @enabled : indicator this device works.
409 *
410 * The @lock ensures that the chip is protected when certain operations are
411 * in progress. When the read or write packet transfer is in progress, most
412 * of the chip registers are not accessible until the transfer is finished and
413 * the DMA has been de-asserted.
414 *
415 * The @statelock is used to protect information in the structure which may
416 * need to be accessed via several sources, such as the network driver layer
417 * or one of the work queues.
418 *
419 */
420
421/* Receive multiplex framer header info */
422struct type_frame_head {
423 u16 sts; /* Frame status */
424 u16 len; /* Byte count */
425};
426
427struct ks_net {
428 struct net_device *netdev;
429 void __iomem *hw_addr;
430 void __iomem *hw_addr_cmd;
431 union ks_tx_hdr txh ____cacheline_aligned;
432 struct mutex lock; /* spinlock to be interrupt safe */
433 struct platform_device *pdev;
434 struct mii_if_info mii;
435 struct type_frame_head *frame_head_info;
436 spinlock_t statelock;
437 u32 msg_enable;
438 u32 frame_cnt;
439 int bus_width;
440 int irq;
441
442 u16 rc_rxqcr;
443 u16 rc_txcr;
444 u16 rc_ier;
445 u16 sharedbus;
446 u16 cmd_reg_cache;
447 u16 cmd_reg_cache_int;
448 u16 promiscuous;
449 u16 all_mcast;
450 u16 mcast_lst_size;
451 u8 mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN];
452 u8 mcast_bits[HW_MCAST_SIZE];
453 u8 mac_addr[6];
454 u8 fid;
455 u8 extra_byte;
456 u8 enabled;
457};
458
459static int msg_enable;
460
461#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
462#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
463#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
464#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
465
466#define BE3 0x8000 /* Byte Enable 3 */
467#define BE2 0x4000 /* Byte Enable 2 */
468#define BE1 0x2000 /* Byte Enable 1 */
469#define BE0 0x1000 /* Byte Enable 0 */
470
471/**
472 * register read/write calls.
473 *
474 * All these calls issue transactions to access the chip's registers. They
475 * all require that the necessary lock is held to prevent accesses when the
476 * chip is busy transfering packet data (RX/TX FIFO accesses).
477 */
478
479/**
480 * ks_rdreg8 - read 8 bit register from device
481 * @ks : The chip information
482 * @offset: The register address
483 *
484 * Read a 8bit register from the chip, returning the result
485 */
486static u8 ks_rdreg8(struct ks_net *ks, int offset)
487{
488 u16 data;
489 u8 shift_bit = offset & 0x03;
490 u8 shift_data = (offset & 1) << 3;
491 ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
492 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
493 data = ioread16(ks->hw_addr);
494 return (u8)(data >> shift_data);
495}
496
497/**
498 * ks_rdreg16 - read 16 bit register from device
499 * @ks : The chip information
500 * @offset: The register address
501 *
502 * Read a 16bit register from the chip, returning the result
503 */
504
505static u16 ks_rdreg16(struct ks_net *ks, int offset)
506{
507 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
508 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
509 return ioread16(ks->hw_addr);
510}
511
512/**
513 * ks_wrreg8 - write 8bit register value to chip
514 * @ks: The chip information
515 * @offset: The register address
516 * @value: The value to write
517 *
518 */
519static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
520{
521 u8 shift_bit = (offset & 0x03);
522 u16 value_write = (u16)(value << ((offset & 1) << 3));
523 ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
524 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
525 iowrite16(value_write, ks->hw_addr);
526}
527
528/**
529 * ks_wrreg16 - write 16bit register value to chip
530 * @ks: The chip information
531 * @offset: The register address
532 * @value: The value to write
533 *
534 */
535
536static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
537{
538 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
539 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
540 iowrite16(value, ks->hw_addr);
541}
542
543/**
544 * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
545 * @ks: The chip state
546 * @wptr: buffer address to save data
547 * @len: length in byte to read
548 *
549 */
550static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
551{
552 len >>= 1;
553 while (len--)
554 *wptr++ = (u16)ioread16(ks->hw_addr);
555}
556
557/**
558 * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
559 * @ks: The chip information
560 * @wptr: buffer address
561 * @len: length in byte to write
562 *
563 */
564static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
565{
566 len >>= 1;
567 while (len--)
568 iowrite16(*wptr++, ks->hw_addr);
569}
570
571/**
572 * ks_tx_fifo_space - return the available hardware buffer size.
573 * @ks: The chip information
574 *
575 */
576static inline u16 ks_tx_fifo_space(struct ks_net *ks)
577{
578 return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
579}
580
581/**
582 * ks_save_cmd_reg - save the command register from the cache.
583 * @ks: The chip information
584 *
585 */
586static inline void ks_save_cmd_reg(struct ks_net *ks)
587{
588 /*ks8851 MLL has a bug to read back the command register.
589 * So rely on software to save the content of command register.
590 */
591 ks->cmd_reg_cache_int = ks->cmd_reg_cache;
592}
593
594/**
595 * ks_restore_cmd_reg - restore the command register from the cache and
596 * write to hardware register.
597 * @ks: The chip information
598 *
599 */
600static inline void ks_restore_cmd_reg(struct ks_net *ks)
601{
602 ks->cmd_reg_cache = ks->cmd_reg_cache_int;
603 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
604}
605
606/**
607 * ks_set_powermode - set power mode of the device
608 * @ks: The chip information
609 * @pwrmode: The power mode value to write to KS_PMECR.
610 *
611 * Change the power mode of the chip.
612 */
613static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
614{
615 unsigned pmecr;
616
617 if (netif_msg_hw(ks))
618 ks_dbg(ks, "setting power mode %d\n", pwrmode);
619
620 ks_rdreg16(ks, KS_GRR);
621 pmecr = ks_rdreg16(ks, KS_PMECR);
622 pmecr &= ~PMECR_PM_MASK;
623 pmecr |= pwrmode;
624
625 ks_wrreg16(ks, KS_PMECR, pmecr);
626}
627
628/**
629 * ks_read_config - read chip configuration of bus width.
630 * @ks: The chip information
631 *
632 */
633static void ks_read_config(struct ks_net *ks)
634{
635 u16 reg_data = 0;
636
637 /* Regardless of bus width, 8 bit read should always work.*/
638 reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
639 reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
640
641 /* addr/data bus are multiplexed */
642 ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
643
644 /* There are garbage data when reading data from QMU,
645 depending on bus-width.
646 */
647
648 if (reg_data & CCR_8BIT) {
649 ks->bus_width = ENUM_BUS_8BIT;
650 ks->extra_byte = 1;
651 } else if (reg_data & CCR_16BIT) {
652 ks->bus_width = ENUM_BUS_16BIT;
653 ks->extra_byte = 2;
654 } else {
655 ks->bus_width = ENUM_BUS_32BIT;
656 ks->extra_byte = 4;
657 }
658}
659
660/**
661 * ks_soft_reset - issue one of the soft reset to the device
662 * @ks: The device state.
663 * @op: The bit(s) to set in the GRR
664 *
665 * Issue the relevant soft-reset command to the device's GRR register
666 * specified by @op.
667 *
668 * Note, the delays are in there as a caution to ensure that the reset
669 * has time to take effect and then complete. Since the datasheet does
670 * not currently specify the exact sequence, we have chosen something
671 * that seems to work with our device.
672 */
673static void ks_soft_reset(struct ks_net *ks, unsigned op)
674{
675 /* Disable interrupt first */
676 ks_wrreg16(ks, KS_IER, 0x0000);
677 ks_wrreg16(ks, KS_GRR, op);
678 mdelay(10); /* wait a short time to effect reset */
679 ks_wrreg16(ks, KS_GRR, 0);
680 mdelay(1); /* wait for condition to clear */
681}
682
683
684/**
685 * ks_read_qmu - read 1 pkt data from the QMU.
686 * @ks: The chip information
687 * @buf: buffer address to save 1 pkt
688 * @len: Pkt length
689 * Here is the sequence to read 1 pkt:
690 * 1. set sudo DMA mode
691 * 2. read prepend data
692 * 3. read pkt data
693 * 4. reset sudo DMA Mode
694 */
695static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
696{
697 u32 r = ks->extra_byte & 0x1 ;
698 u32 w = ks->extra_byte - r;
699
700 /* 1. set sudo DMA mode */
701 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
702 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
703
704 /* 2. read prepend data */
705 /**
706 * read 4 + extra bytes and discard them.
707 * extra bytes for dummy, 2 for status, 2 for len
708 */
709
710 /* use likely(r) for 8 bit access for performance */
711 if (unlikely(r))
712 ioread8(ks->hw_addr);
713 ks_inblk(ks, buf, w + 2 + 2);
714
715 /* 3. read pkt data */
716 ks_inblk(ks, buf, ALIGN(len, 4));
717
718 /* 4. reset sudo DMA Mode */
719 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
720}
721
722/**
723 * ks_rcv - read multiple pkts data from the QMU.
724 * @ks: The chip information
725 * @netdev: The network device being opened.
726 *
727 * Read all of header information before reading pkt content.
728 * It is not allowed only port of pkts in QMU after issuing
729 * interrupt ack.
730 */
731static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
732{
733 u32 i;
734 struct type_frame_head *frame_hdr = ks->frame_head_info;
735 struct sk_buff *skb;
736
737 ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
738
739 /* read all header information */
740 for (i = 0; i < ks->frame_cnt; i++) {
741 /* Checking Received packet status */
742 frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
743 /* Get packet len from hardware */
744 frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
745 frame_hdr++;
746 }
747
748 frame_hdr = ks->frame_head_info;
749 while (ks->frame_cnt--) {
750 skb = dev_alloc_skb(frame_hdr->len + 16);
751 if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
752 (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
753 skb_reserve(skb, 2);
754 /* read data block including CRC 4 bytes */
755 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len + 4);
756 skb_put(skb, frame_hdr->len);
757 skb->dev = netdev;
758 skb->protocol = eth_type_trans(skb, netdev);
759 netif_rx(skb);
760 } else {
761 printk(KERN_ERR "%s: err:skb alloc\n", __func__);
762 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
763 if (skb)
764 dev_kfree_skb_irq(skb);
765 }
766 frame_hdr++;
767 }
768}
769
770/**
771 * ks_update_link_status - link status update.
772 * @netdev: The network device being opened.
773 * @ks: The chip information
774 *
775 */
776
777static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
778{
779 /* check the status of the link */
780 u32 link_up_status;
781 if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
782 netif_carrier_on(netdev);
783 link_up_status = true;
784 } else {
785 netif_carrier_off(netdev);
786 link_up_status = false;
787 }
788 if (netif_msg_link(ks))
789 ks_dbg(ks, "%s: %s\n",
790 __func__, link_up_status ? "UP" : "DOWN");
791}
792
793/**
794 * ks_irq - device interrupt handler
795 * @irq: Interrupt number passed from the IRQ hnalder.
796 * @pw: The private word passed to register_irq(), our struct ks_net.
797 *
798 * This is the handler invoked to find out what happened
799 *
800 * Read the interrupt status, work out what needs to be done and then clear
801 * any of the interrupts that are not needed.
802 */
803
804static irqreturn_t ks_irq(int irq, void *pw)
805{
806 struct ks_net *ks = pw;
807 struct net_device *netdev = ks->netdev;
808 u16 status;
809
810 /*this should be the first in IRQ handler */
811 ks_save_cmd_reg(ks);
812
813 status = ks_rdreg16(ks, KS_ISR);
814 if (unlikely(!status)) {
815 ks_restore_cmd_reg(ks);
816 return IRQ_NONE;
817 }
818
819 ks_wrreg16(ks, KS_ISR, status);
820
821 if (likely(status & IRQ_RXI))
822 ks_rcv(ks, netdev);
823
824 if (unlikely(status & IRQ_LCI))
825 ks_update_link_status(netdev, ks);
826
827 if (unlikely(status & IRQ_TXI))
828 netif_wake_queue(netdev);
829
830 if (unlikely(status & IRQ_LDI)) {
831
832 u16 pmecr = ks_rdreg16(ks, KS_PMECR);
833 pmecr &= ~PMECR_WKEVT_MASK;
834 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
835 }
836
837 /* this should be the last in IRQ handler*/
838 ks_restore_cmd_reg(ks);
839 return IRQ_HANDLED;
840}
841
842
843/**
844 * ks_net_open - open network device
845 * @netdev: The network device being opened.
846 *
847 * Called when the network device is marked active, such as a user executing
848 * 'ifconfig up' on the device.
849 */
850static int ks_net_open(struct net_device *netdev)
851{
852 struct ks_net *ks = netdev_priv(netdev);
853 int err;
854
855#define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
856 /* lock the card, even if we may not actually do anything
857 * else at the moment.
858 */
859
860 if (netif_msg_ifup(ks))
861 ks_dbg(ks, "%s - entry\n", __func__);
862
863 /* reset the HW */
864 err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, ks);
865
866 if (err) {
867 printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
868 ks->irq, err);
869 return err;
870 }
871
872 if (netif_msg_ifup(ks))
873 ks_dbg(ks, "network device %s up\n", netdev->name);
874
875 return 0;
876}
877
878/**
879 * ks_net_stop - close network device
880 * @netdev: The device being closed.
881 *
882 * Called to close down a network device which has been active. Cancell any
883 * work, shutdown the RX and TX process and then place the chip into a low
884 * power state whilst it is not being used.
885 */
886static int ks_net_stop(struct net_device *netdev)
887{
888 struct ks_net *ks = netdev_priv(netdev);
889
890 if (netif_msg_ifdown(ks))
891 ks_info(ks, "%s: shutting down\n", netdev->name);
892
893 netif_stop_queue(netdev);
894
895 kfree(ks->frame_head_info);
896
897 mutex_lock(&ks->lock);
898
899 /* turn off the IRQs and ack any outstanding */
900 ks_wrreg16(ks, KS_IER, 0x0000);
901 ks_wrreg16(ks, KS_ISR, 0xffff);
902
903 /* shutdown RX process */
904 ks_wrreg16(ks, KS_RXCR1, 0x0000);
905
906 /* shutdown TX process */
907 ks_wrreg16(ks, KS_TXCR, 0x0000);
908
909 /* set powermode to soft power down to save power */
910 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
911 free_irq(ks->irq, netdev);
912 mutex_unlock(&ks->lock);
913 return 0;
914}
915
916
917/**
918 * ks_write_qmu - write 1 pkt data to the QMU.
919 * @ks: The chip information
920 * @pdata: buffer address to save 1 pkt
921 * @len: Pkt length in byte
922 * Here is the sequence to write 1 pkt:
923 * 1. set sudo DMA mode
924 * 2. write status/length
925 * 3. write pkt data
926 * 4. reset sudo DMA Mode
927 * 5. reset sudo DMA mode
928 * 6. Wait until pkt is out
929 */
930static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
931{
932 unsigned fid = ks->fid;
933
934 fid = ks->fid;
935 ks->fid = (ks->fid + 1) & TXFR_TXFID_MASK;
936
937 /* reduce the tx interrupt occurrances. */
938 if (!fid)
939 fid |= TXFR_TXIC; /* irq on completion */
940
941 /* start header at txb[0] to align txw entries */
942 ks->txh.txw[0] = cpu_to_le16(fid);
943 ks->txh.txw[1] = cpu_to_le16(len);
944
945 /* 1. set sudo-DMA mode */
946 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
947 /* 2. write status/lenth info */
948 ks_outblk(ks, ks->txh.txw, 4);
949 /* 3. write pkt data */
950 ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
951 /* 4. reset sudo-DMA mode */
952 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
953 /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
954 ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
955 /* 6. wait until TXQCR_METFE is auto-cleared */
956 while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
957 ;
958}
959
960static void ks_disable_int(struct ks_net *ks)
961{
962 ks_wrreg16(ks, KS_IER, 0x0000);
963} /* ks_disable_int */
964
965static void ks_enable_int(struct ks_net *ks)
966{
967 ks_wrreg16(ks, KS_IER, ks->rc_ier);
968} /* ks_enable_int */
969
970/**
971 * ks_start_xmit - transmit packet
972 * @skb : The buffer to transmit
973 * @netdev : The device used to transmit the packet.
974 *
975 * Called by the network layer to transmit the @skb.
976 * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
977 * So while tx is in-progress, prevent IRQ interrupt from happenning.
978 */
979static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
980{
981 int retv = NETDEV_TX_OK;
982 struct ks_net *ks = netdev_priv(netdev);
983
984 disable_irq(netdev->irq);
985 ks_disable_int(ks);
986 spin_lock(&ks->statelock);
987
988 /* Extra space are required:
989 * 4 byte for alignment, 4 for status/length, 4 for CRC
990 */
991
992 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
993 ks_write_qmu(ks, skb->data, skb->len);
994 dev_kfree_skb(skb);
995 } else
996 retv = NETDEV_TX_BUSY;
997 spin_unlock(&ks->statelock);
998 ks_enable_int(ks);
999 enable_irq(netdev->irq);
1000 return retv;
1001}
1002
1003/**
1004 * ks_start_rx - ready to serve pkts
1005 * @ks : The chip information
1006 *
1007 */
1008static void ks_start_rx(struct ks_net *ks)
1009{
1010 u16 cntl;
1011
1012 /* Enables QMU Receive (RXCR1). */
1013 cntl = ks_rdreg16(ks, KS_RXCR1);
1014 cntl |= RXCR1_RXE ;
1015 ks_wrreg16(ks, KS_RXCR1, cntl);
1016} /* ks_start_rx */
1017
1018/**
1019 * ks_stop_rx - stop to serve pkts
1020 * @ks : The chip information
1021 *
1022 */
1023static void ks_stop_rx(struct ks_net *ks)
1024{
1025 u16 cntl;
1026
1027 /* Disables QMU Receive (RXCR1). */
1028 cntl = ks_rdreg16(ks, KS_RXCR1);
1029 cntl &= ~RXCR1_RXE ;
1030 ks_wrreg16(ks, KS_RXCR1, cntl);
1031
1032} /* ks_stop_rx */
1033
1034static unsigned long const ethernet_polynomial = 0x04c11db7U;
1035
1036static unsigned long ether_gen_crc(int length, u8 *data)
1037{
1038 long crc = -1;
1039 while (--length >= 0) {
1040 u8 current_octet = *data++;
1041 int bit;
1042
1043 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1044 crc = (crc << 1) ^
1045 ((crc < 0) ^ (current_octet & 1) ?
1046 ethernet_polynomial : 0);
1047 }
1048 }
1049 return (unsigned long)crc;
1050} /* ether_gen_crc */
1051
1052/**
1053* ks_set_grpaddr - set multicast information
1054* @ks : The chip information
1055*/
1056
1057static void ks_set_grpaddr(struct ks_net *ks)
1058{
1059 u8 i;
1060 u32 index, position, value;
1061
1062 memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
1063
1064 for (i = 0; i < ks->mcast_lst_size; i++) {
1065 position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
1066 index = position >> 3;
1067 value = 1 << (position & 7);
1068 ks->mcast_bits[index] |= (u8)value;
1069 }
1070
1071 for (i = 0; i < HW_MCAST_SIZE; i++) {
1072 if (i & 1) {
1073 ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
1074 (ks->mcast_bits[i] << 8) |
1075 ks->mcast_bits[i - 1]);
1076 }
1077 }
1078} /* ks_set_grpaddr */
1079
1080/*
1081* ks_clear_mcast - clear multicast information
1082*
1083* @ks : The chip information
1084* This routine removes all mcast addresses set in the hardware.
1085*/
1086
1087static void ks_clear_mcast(struct ks_net *ks)
1088{
1089 u16 i, mcast_size;
1090 for (i = 0; i < HW_MCAST_SIZE; i++)
1091 ks->mcast_bits[i] = 0;
1092
1093 mcast_size = HW_MCAST_SIZE >> 2;
1094 for (i = 0; i < mcast_size; i++)
1095 ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
1096}
1097
1098static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
1099{
1100 u16 cntl;
1101 ks->promiscuous = promiscuous_mode;
1102 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1103 cntl = ks_rdreg16(ks, KS_RXCR1);
1104
1105 cntl &= ~RXCR1_FILTER_MASK;
1106 if (promiscuous_mode)
1107 /* Enable Promiscuous mode */
1108 cntl |= RXCR1_RXAE | RXCR1_RXINVF;
1109 else
1110 /* Disable Promiscuous mode (default normal mode) */
1111 cntl |= RXCR1_RXPAFMA;
1112
1113 ks_wrreg16(ks, KS_RXCR1, cntl);
1114
1115 if (ks->enabled)
1116 ks_start_rx(ks);
1117
1118} /* ks_set_promis */
1119
1120static void ks_set_mcast(struct ks_net *ks, u16 mcast)
1121{
1122 u16 cntl;
1123
1124 ks->all_mcast = mcast;
1125 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1126 cntl = ks_rdreg16(ks, KS_RXCR1);
1127 cntl &= ~RXCR1_FILTER_MASK;
1128 if (mcast)
1129 /* Enable "Perfect with Multicast address passed mode" */
1130 cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1131 else
1132 /**
1133 * Disable "Perfect with Multicast address passed
1134 * mode" (normal mode).
1135 */
1136 cntl |= RXCR1_RXPAFMA;
1137
1138 ks_wrreg16(ks, KS_RXCR1, cntl);
1139
1140 if (ks->enabled)
1141 ks_start_rx(ks);
1142} /* ks_set_mcast */
1143
1144static void ks_set_rx_mode(struct net_device *netdev)
1145{
1146 struct ks_net *ks = netdev_priv(netdev);
1147 struct dev_mc_list *ptr;
1148
1149 /* Turn on/off promiscuous mode. */
1150 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
1151 ks_set_promis(ks,
1152 (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
1153 /* Turn on/off all mcast mode. */
1154 else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
1155 ks_set_mcast(ks,
1156 (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
1157 else
1158 ks_set_promis(ks, false);
1159
1160 if ((netdev->flags & IFF_MULTICAST) && netdev->mc_count) {
1161 if (netdev->mc_count <= MAX_MCAST_LST) {
1162 int i = 0;
1163 for (ptr = netdev->mc_list; ptr; ptr = ptr->next) {
1164 if (!(*ptr->dmi_addr & 1))
1165 continue;
1166 if (i >= MAX_MCAST_LST)
1167 break;
1168 memcpy(ks->mcast_lst[i++], ptr->dmi_addr,
1169 MAC_ADDR_LEN);
1170 }
1171 ks->mcast_lst_size = (u8)i;
1172 ks_set_grpaddr(ks);
1173 } else {
1174 /**
1175 * List too big to support so
1176 * turn on all mcast mode.
1177 */
1178 ks->mcast_lst_size = MAX_MCAST_LST;
1179 ks_set_mcast(ks, true);
1180 }
1181 } else {
1182 ks->mcast_lst_size = 0;
1183 ks_clear_mcast(ks);
1184 }
1185} /* ks_set_rx_mode */
1186
1187static void ks_set_mac(struct ks_net *ks, u8 *data)
1188{
1189 u16 *pw = (u16 *)data;
1190 u16 w, u;
1191
1192 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1193
1194 u = *pw++;
1195 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1196 ks_wrreg16(ks, KS_MARH, w);
1197
1198 u = *pw++;
1199 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1200 ks_wrreg16(ks, KS_MARM, w);
1201
1202 u = *pw;
1203 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1204 ks_wrreg16(ks, KS_MARL, w);
1205
1206 memcpy(ks->mac_addr, data, 6);
1207
1208 if (ks->enabled)
1209 ks_start_rx(ks);
1210}
1211
1212static int ks_set_mac_address(struct net_device *netdev, void *paddr)
1213{
1214 struct ks_net *ks = netdev_priv(netdev);
1215 struct sockaddr *addr = paddr;
1216 u8 *da;
1217
1218 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1219
1220 da = (u8 *)netdev->dev_addr;
1221
1222 ks_set_mac(ks, da);
1223 return 0;
1224}
1225
1226static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1227{
1228 struct ks_net *ks = netdev_priv(netdev);
1229
1230 if (!netif_running(netdev))
1231 return -EINVAL;
1232
1233 return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
1234}
1235
1236static const struct net_device_ops ks_netdev_ops = {
1237 .ndo_open = ks_net_open,
1238 .ndo_stop = ks_net_stop,
1239 .ndo_do_ioctl = ks_net_ioctl,
1240 .ndo_start_xmit = ks_start_xmit,
1241 .ndo_set_mac_address = ks_set_mac_address,
1242 .ndo_set_rx_mode = ks_set_rx_mode,
1243 .ndo_change_mtu = eth_change_mtu,
1244 .ndo_validate_addr = eth_validate_addr,
1245};
1246
1247/* ethtool support */
1248
1249static void ks_get_drvinfo(struct net_device *netdev,
1250 struct ethtool_drvinfo *di)
1251{
1252 strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
1253 strlcpy(di->version, "1.00", sizeof(di->version));
1254 strlcpy(di->bus_info, dev_name(netdev->dev.parent),
1255 sizeof(di->bus_info));
1256}
1257
1258static u32 ks_get_msglevel(struct net_device *netdev)
1259{
1260 struct ks_net *ks = netdev_priv(netdev);
1261 return ks->msg_enable;
1262}
1263
1264static void ks_set_msglevel(struct net_device *netdev, u32 to)
1265{
1266 struct ks_net *ks = netdev_priv(netdev);
1267 ks->msg_enable = to;
1268}
1269
1270static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1271{
1272 struct ks_net *ks = netdev_priv(netdev);
1273 return mii_ethtool_gset(&ks->mii, cmd);
1274}
1275
1276static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1277{
1278 struct ks_net *ks = netdev_priv(netdev);
1279 return mii_ethtool_sset(&ks->mii, cmd);
1280}
1281
1282static u32 ks_get_link(struct net_device *netdev)
1283{
1284 struct ks_net *ks = netdev_priv(netdev);
1285 return mii_link_ok(&ks->mii);
1286}
1287
1288static int ks_nway_reset(struct net_device *netdev)
1289{
1290 struct ks_net *ks = netdev_priv(netdev);
1291 return mii_nway_restart(&ks->mii);
1292}
1293
1294static const struct ethtool_ops ks_ethtool_ops = {
1295 .get_drvinfo = ks_get_drvinfo,
1296 .get_msglevel = ks_get_msglevel,
1297 .set_msglevel = ks_set_msglevel,
1298 .get_settings = ks_get_settings,
1299 .set_settings = ks_set_settings,
1300 .get_link = ks_get_link,
1301 .nway_reset = ks_nway_reset,
1302};
1303
1304/* MII interface controls */
1305
1306/**
1307 * ks_phy_reg - convert MII register into a KS8851 register
1308 * @reg: MII register number.
1309 *
1310 * Return the KS8851 register number for the corresponding MII PHY register
1311 * if possible. Return zero if the MII register has no direct mapping to the
1312 * KS8851 register set.
1313 */
1314static int ks_phy_reg(int reg)
1315{
1316 switch (reg) {
1317 case MII_BMCR:
1318 return KS_P1MBCR;
1319 case MII_BMSR:
1320 return KS_P1MBSR;
1321 case MII_PHYSID1:
1322 return KS_PHY1ILR;
1323 case MII_PHYSID2:
1324 return KS_PHY1IHR;
1325 case MII_ADVERTISE:
1326 return KS_P1ANAR;
1327 case MII_LPA:
1328 return KS_P1ANLPR;
1329 }
1330
1331 return 0x0;
1332}
1333
1334/**
1335 * ks_phy_read - MII interface PHY register read.
1336 * @netdev: The network device the PHY is on.
1337 * @phy_addr: Address of PHY (ignored as we only have one)
1338 * @reg: The register to read.
1339 *
1340 * This call reads data from the PHY register specified in @reg. Since the
1341 * device does not support all the MII registers, the non-existant values
1342 * are always returned as zero.
1343 *
1344 * We return zero for unsupported registers as the MII code does not check
1345 * the value returned for any error status, and simply returns it to the
1346 * caller. The mii-tool that the driver was tested with takes any -ve error
1347 * as real PHY capabilities, thus displaying incorrect data to the user.
1348 */
1349static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1350{
1351 struct ks_net *ks = netdev_priv(netdev);
1352 int ksreg;
1353 int result;
1354
1355 ksreg = ks_phy_reg(reg);
1356 if (!ksreg)
1357 return 0x0; /* no error return allowed, so use zero */
1358
1359 mutex_lock(&ks->lock);
1360 result = ks_rdreg16(ks, ksreg);
1361 mutex_unlock(&ks->lock);
1362
1363 return result;
1364}
1365
1366static void ks_phy_write(struct net_device *netdev,
1367 int phy, int reg, int value)
1368{
1369 struct ks_net *ks = netdev_priv(netdev);
1370 int ksreg;
1371
1372 ksreg = ks_phy_reg(reg);
1373 if (ksreg) {
1374 mutex_lock(&ks->lock);
1375 ks_wrreg16(ks, ksreg, value);
1376 mutex_unlock(&ks->lock);
1377 }
1378}
1379
1380/**
1381 * ks_read_selftest - read the selftest memory info.
1382 * @ks: The device state
1383 *
1384 * Read and check the TX/RX memory selftest information.
1385 */
1386static int ks_read_selftest(struct ks_net *ks)
1387{
1388 unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1389 int ret = 0;
1390 unsigned rd;
1391
1392 rd = ks_rdreg16(ks, KS_MBIR);
1393
1394 if ((rd & both_done) != both_done) {
1395 ks_warn(ks, "Memory selftest not finished\n");
1396 return 0;
1397 }
1398
1399 if (rd & MBIR_TXMBFA) {
1400 ks_err(ks, "TX memory selftest fails\n");
1401 ret |= 1;
1402 }
1403
1404 if (rd & MBIR_RXMBFA) {
1405 ks_err(ks, "RX memory selftest fails\n");
1406 ret |= 2;
1407 }
1408
1409 ks_info(ks, "the selftest passes\n");
1410 return ret;
1411}
1412
1413static void ks_disable(struct ks_net *ks)
1414{
1415 u16 w;
1416
1417 w = ks_rdreg16(ks, KS_TXCR);
1418
1419 /* Disables QMU Transmit (TXCR). */
1420 w &= ~TXCR_TXE;
1421 ks_wrreg16(ks, KS_TXCR, w);
1422
1423 /* Disables QMU Receive (RXCR1). */
1424 w = ks_rdreg16(ks, KS_RXCR1);
1425 w &= ~RXCR1_RXE ;
1426 ks_wrreg16(ks, KS_RXCR1, w);
1427
1428 ks->enabled = false;
1429
1430} /* ks_disable */
1431
1432static void ks_setup(struct ks_net *ks)
1433{
1434 u16 w;
1435
1436 /**
1437 * Configure QMU Transmit
1438 */
1439
1440 /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
1441 ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1442
1443 /* Setup Receive Frame Data Pointer Auto-Increment */
1444 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1445
1446 /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
1447 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
1448
1449 /* Setup RxQ Command Control (RXQCR) */
1450 ks->rc_rxqcr = RXQCR_CMD_CNTL;
1451 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1452
1453 /**
1454 * set the force mode to half duplex, default is full duplex
1455 * because if the auto-negotiation fails, most switch uses
1456 * half-duplex.
1457 */
1458
1459 w = ks_rdreg16(ks, KS_P1MBCR);
1460 w &= ~P1MBCR_FORCE_FDX;
1461 ks_wrreg16(ks, KS_P1MBCR, w);
1462
1463 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1464 ks_wrreg16(ks, KS_TXCR, w);
1465
1466 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE;
1467
1468 if (ks->promiscuous) /* bPromiscuous */
1469 w |= (RXCR1_RXAE | RXCR1_RXINVF);
1470 else if (ks->all_mcast) /* Multicast address passed mode */
1471 w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1472 else /* Normal mode */
1473 w |= RXCR1_RXPAFMA;
1474
1475 ks_wrreg16(ks, KS_RXCR1, w);
1476} /*ks_setup */
1477
1478
1479static void ks_setup_int(struct ks_net *ks)
1480{
1481 ks->rc_ier = 0x00;
1482 /* Clear the interrupts status of the hardware. */
1483 ks_wrreg16(ks, KS_ISR, 0xffff);
1484
1485 /* Enables the interrupts of the hardware. */
1486 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1487} /* ks_setup_int */
1488
1489void ks_enable(struct ks_net *ks)
1490{
1491 u16 w;
1492
1493 w = ks_rdreg16(ks, KS_TXCR);
1494 /* Enables QMU Transmit (TXCR). */
1495 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
1496
1497 /*
1498 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
1499 * Enable
1500 */
1501
1502 w = ks_rdreg16(ks, KS_RXQCR);
1503 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
1504
1505 /* Enables QMU Receive (RXCR1). */
1506 w = ks_rdreg16(ks, KS_RXCR1);
1507 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
1508 ks->enabled = true;
1509} /* ks_enable */
1510
1511static int ks_hw_init(struct ks_net *ks)
1512{
1513#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1514 ks->promiscuous = 0;
1515 ks->all_mcast = 0;
1516 ks->mcast_lst_size = 0;
1517
1518 ks->frame_head_info = (struct type_frame_head *) \
1519 kmalloc(MHEADER_SIZE, GFP_KERNEL);
1520 if (!ks->frame_head_info) {
1521 printk(KERN_ERR "Error: Fail to allocate frame memory\n");
1522 return false;
1523 }
1524
1525 ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1526 return true;
1527}
1528
1529
1530static int __devinit ks8851_probe(struct platform_device *pdev)
1531{
1532 int err = -ENOMEM;
1533 struct resource *io_d, *io_c;
1534 struct net_device *netdev;
1535 struct ks_net *ks;
1536 u16 id, data;
1537
1538 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1539 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1540
1541 if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME))
1542 goto err_mem_region;
1543
1544 if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME))
1545 goto err_mem_region1;
1546
1547 netdev = alloc_etherdev(sizeof(struct ks_net));
1548 if (!netdev)
1549 goto err_alloc_etherdev;
1550
1551 SET_NETDEV_DEV(netdev, &pdev->dev);
1552
1553 ks = netdev_priv(netdev);
1554 ks->netdev = netdev;
1555 ks->hw_addr = ioremap(io_d->start, resource_size(io_d));
1556
1557 if (!ks->hw_addr)
1558 goto err_ioremap;
1559
1560 ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c));
1561 if (!ks->hw_addr_cmd)
1562 goto err_ioremap1;
1563
1564 ks->irq = platform_get_irq(pdev, 0);
1565
1566 if (ks->irq < 0) {
1567 err = ks->irq;
1568 goto err_get_irq;
1569 }
1570
1571 ks->pdev = pdev;
1572
1573 mutex_init(&ks->lock);
1574 spin_lock_init(&ks->statelock);
1575
1576 netdev->netdev_ops = &ks_netdev_ops;
1577 netdev->ethtool_ops = &ks_ethtool_ops;
1578
1579 /* setup mii state */
1580 ks->mii.dev = netdev;
1581 ks->mii.phy_id = 1,
1582 ks->mii.phy_id_mask = 1;
1583 ks->mii.reg_num_mask = 0xf;
1584 ks->mii.mdio_read = ks_phy_read;
1585 ks->mii.mdio_write = ks_phy_write;
1586
1587 ks_info(ks, "message enable is %d\n", msg_enable);
1588 /* set the default message enable */
1589 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1590 NETIF_MSG_PROBE |
1591 NETIF_MSG_LINK));
1592 ks_read_config(ks);
1593
1594 /* simple check for a valid chip being connected to the bus */
1595 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1596 ks_err(ks, "failed to read device ID\n");
1597 err = -ENODEV;
1598 goto err_register;
1599 }
1600
1601 if (ks_read_selftest(ks)) {
1602 ks_err(ks, "failed to read device ID\n");
1603 err = -ENODEV;
1604 goto err_register;
1605 }
1606
1607 err = register_netdev(netdev);
1608 if (err)
1609 goto err_register;
1610
1611 platform_set_drvdata(pdev, netdev);
1612
1613 ks_soft_reset(ks, GRR_GSR);
1614 ks_hw_init(ks);
1615 ks_disable(ks);
1616 ks_setup(ks);
1617 ks_setup_int(ks);
1618 ks_enable_int(ks);
1619 ks_enable(ks);
1620 memcpy(netdev->dev_addr, ks->mac_addr, 6);
1621
1622 data = ks_rdreg16(ks, KS_OBCR);
1623 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
1624
1625 /**
1626 * If you want to use the default MAC addr,
1627 * comment out the 2 functions below.
1628 */
1629
1630 random_ether_addr(netdev->dev_addr);
1631 ks_set_mac(ks, netdev->dev_addr);
1632
1633 id = ks_rdreg16(ks, KS_CIDER);
1634
1635 printk(KERN_INFO DRV_NAME
1636 " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1637 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1638 return 0;
1639
1640err_register:
1641err_get_irq:
1642 iounmap(ks->hw_addr_cmd);
1643err_ioremap1:
1644 iounmap(ks->hw_addr);
1645err_ioremap:
1646 free_netdev(netdev);
1647err_alloc_etherdev:
1648 release_mem_region(io_c->start, resource_size(io_c));
1649err_mem_region1:
1650 release_mem_region(io_d->start, resource_size(io_d));
1651err_mem_region:
1652 return err;
1653}
1654
1655static int __devexit ks8851_remove(struct platform_device *pdev)
1656{
1657 struct net_device *netdev = platform_get_drvdata(pdev);
1658 struct ks_net *ks = netdev_priv(netdev);
1659 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1660
1661 unregister_netdev(netdev);
1662 iounmap(ks->hw_addr);
1663 free_netdev(netdev);
1664 release_mem_region(iomem->start, resource_size(iomem));
1665 platform_set_drvdata(pdev, NULL);
1666 return 0;
1667
1668}
1669
1670static struct platform_driver ks8851_platform_driver = {
1671 .driver = {
1672 .name = DRV_NAME,
1673 .owner = THIS_MODULE,
1674 },
1675 .probe = ks8851_probe,
1676 .remove = __devexit_p(ks8851_remove),
1677};
1678
1679static int __init ks8851_init(void)
1680{
1681 return platform_driver_register(&ks8851_platform_driver);
1682}
1683
1684static void __exit ks8851_exit(void)
1685{
1686 platform_driver_unregister(&ks8851_platform_driver);
1687}
1688
1689module_init(ks8851_init);
1690module_exit(ks8851_exit);
1691
1692MODULE_DESCRIPTION("KS8851 MLL Network driver");
1693MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1694MODULE_LICENSE("GPL");
1695module_param_named(message, msg_enable, int, 0);
1696MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1697
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 92ceb689b4d4..2af81735386b 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -828,7 +828,7 @@ static int __exit meth_remove(struct platform_device *pdev)
828 828
829static struct platform_driver meth_driver = { 829static struct platform_driver meth_driver = {
830 .probe = meth_probe, 830 .probe = meth_probe,
831 .remove = __devexit_p(meth_remove), 831 .remove = __exit_p(meth_remove),
832 .driver = { 832 .driver = {
833 .name = "meth", 833 .name = "meth",
834 .owner = THIS_MODULE, 834 .owner = THIS_MODULE,
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index cee199ceba2f..3c16602172fc 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -33,6 +33,7 @@
33 */ 33 */
34 34
35#include <linux/mlx4/cmd.h> 35#include <linux/mlx4/cmd.h>
36#include <linux/cache.h>
36 37
37#include "fw.h" 38#include "fw.h"
38#include "icm.h" 39#include "icm.h"
@@ -698,6 +699,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
698#define INIT_HCA_IN_SIZE 0x200 699#define INIT_HCA_IN_SIZE 0x200
699#define INIT_HCA_VERSION_OFFSET 0x000 700#define INIT_HCA_VERSION_OFFSET 0x000
700#define INIT_HCA_VERSION 2 701#define INIT_HCA_VERSION 2
702#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
701#define INIT_HCA_FLAGS_OFFSET 0x014 703#define INIT_HCA_FLAGS_OFFSET 0x014
702#define INIT_HCA_QPC_OFFSET 0x020 704#define INIT_HCA_QPC_OFFSET 0x020
703#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) 705#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
@@ -735,6 +737,9 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
735 737
736 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; 738 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
737 739
740 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
741 (ilog2(cache_line_size()) - 4) << 5;
742
738#if defined(__LITTLE_ENDIAN) 743#if defined(__LITTLE_ENDIAN)
739 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); 744 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
740#elif defined(__BIG_ENDIAN) 745#elif defined(__BIG_ENDIAN)
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 3dd481e77f92..291a505fd4fc 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1282,6 +1282,8 @@ static struct pci_device_id mlx4_pci_table[] = {
1282 { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 1282 { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
1283 { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 1283 { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
1284 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 1284 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
1285 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
1286 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
1285 { 0, } 1287 { 0, }
1286}; 1288};
1287 1289
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 6930c87f362e..f3624517cb0e 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -75,7 +75,7 @@
75#include "myri10ge_mcp.h" 75#include "myri10ge_mcp.h"
76#include "myri10ge_mcp_gen_header.h" 76#include "myri10ge_mcp_gen_header.h"
77 77
78#define MYRI10GE_VERSION_STR "1.5.0-1.432" 78#define MYRI10GE_VERSION_STR "1.5.1-1.451"
79 79
80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
81MODULE_AUTHOR("Maintainer: help@myri.com"); 81MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -1624,10 +1624,21 @@ myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1624 return 0; 1624 return 0;
1625 } 1625 }
1626 } 1626 }
1627 if (*ptr == 'R' || *ptr == 'Q') { 1627 if (*ptr == '2')
1628 /* We've found either an XFP or quad ribbon fiber */ 1628 ptr++;
1629 if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
1630 /* We've found either an XFP, quad ribbon fiber, or SFP+ */
1629 cmd->port = PORT_FIBRE; 1631 cmd->port = PORT_FIBRE;
1632 cmd->supported |= SUPPORTED_FIBRE;
1633 cmd->advertising |= ADVERTISED_FIBRE;
1634 } else {
1635 cmd->port = PORT_OTHER;
1630 } 1636 }
1637 if (*ptr == 'R' || *ptr == 'S')
1638 cmd->transceiver = XCVR_EXTERNAL;
1639 else
1640 cmd->transceiver = XCVR_INTERNAL;
1641
1631 return 0; 1642 return 0;
1632} 1643}
1633 1644
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 7a7177421d7c..1c46da632125 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -419,6 +419,7 @@ enum {
419#define NETXEN_CRB_ROMUSB \ 419#define NETXEN_CRB_ROMUSB \
420 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB) 420 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB)
421#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q) 421#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q)
422#define NETXEN_CRB_I2C0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2C0)
422#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB) 423#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB)
423#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64) 424#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64)
424 425
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 32314000dfcd..3185a98b0917 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -1901,22 +1901,16 @@ netxen_setup_hwops(struct netxen_adapter *adapter)
1901 1901
1902int netxen_nic_get_board_info(struct netxen_adapter *adapter) 1902int netxen_nic_get_board_info(struct netxen_adapter *adapter)
1903{ 1903{
1904 int offset, board_type, magic, header_version; 1904 int offset, board_type, magic;
1905 struct pci_dev *pdev = adapter->pdev; 1905 struct pci_dev *pdev = adapter->pdev;
1906 1906
1907 offset = NX_FW_MAGIC_OFFSET; 1907 offset = NX_FW_MAGIC_OFFSET;
1908 if (netxen_rom_fast_read(adapter, offset, &magic)) 1908 if (netxen_rom_fast_read(adapter, offset, &magic))
1909 return -EIO; 1909 return -EIO;
1910 1910
1911 offset = NX_HDR_VERSION_OFFSET; 1911 if (magic != NETXEN_BDINFO_MAGIC) {
1912 if (netxen_rom_fast_read(adapter, offset, &header_version)) 1912 dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
1913 return -EIO; 1913 magic);
1914
1915 if (magic != NETXEN_BDINFO_MAGIC ||
1916 header_version != NETXEN_BDINFO_VERSION) {
1917 dev_err(&pdev->dev,
1918 "invalid board config, magic=%08x, version=%08x\n",
1919 magic, header_version);
1920 return -EIO; 1914 return -EIO;
1921 } 1915 }
1922 1916
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 91c2bc61c8eb..e40b914d6faf 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -531,6 +531,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
531 continue; 531 continue;
532 532
533 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 533 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
534 if (off == (NETXEN_CRB_I2C0 + 0x1c))
535 continue;
534 /* do not reset PCI */ 536 /* do not reset PCI */
535 if (off == (ROMUSB_GLB + 0xbc)) 537 if (off == (ROMUSB_GLB + 0xbc))
536 continue; 538 continue;
@@ -553,12 +555,6 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
553 continue; 555 continue;
554 } 556 }
555 557
556 if (off == NETXEN_ADDR_ERROR) {
557 printk(KERN_ERR "%s: Err: Unknown addr: 0x%08x\n",
558 netxen_nic_driver_name, buf[i].addr);
559 continue;
560 }
561
562 init_delay = 1; 558 init_delay = 1;
563 /* After writing this register, HW needs time for CRB */ 559 /* After writing this register, HW needs time for CRB */
564 /* to quiet down (else crb_window returns 0xffffffff) */ 560 /* to quiet down (else crb_window returns 0xffffffff) */
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index f7bdde111dfc..0b4a56a8c8d5 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -595,7 +595,8 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
595 void __iomem *mem_ptr2 = NULL; 595 void __iomem *mem_ptr2 = NULL;
596 void __iomem *db_ptr = NULL; 596 void __iomem *db_ptr = NULL;
597 597
598 unsigned long mem_base, mem_len, db_base, db_len = 0, pci_len0 = 0; 598 resource_size_t mem_base, db_base;
599 unsigned long mem_len, db_len = 0, pci_len0 = 0;
599 600
600 struct pci_dev *pdev = adapter->pdev; 601 struct pci_dev *pdev = adapter->pdev;
601 int pci_func = adapter->ahw.pci_func; 602 int pci_func = adapter->ahw.pci_func;
@@ -1469,6 +1470,7 @@ netxen_nic_resume(struct pci_dev *pdev)
1469 } 1470 }
1470 1471
1471 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); 1472 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
1473 return 0;
1472 1474
1473err_out_detach: 1475err_out_detach:
1474 netxen_nic_detach(adapter); 1476 netxen_nic_detach(adapter);
@@ -1713,7 +1715,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1713 /* 4 fragments per cmd des */ 1715 /* 4 fragments per cmd des */
1714 no_of_desc = (frag_count + 3) >> 2; 1716 no_of_desc = (frag_count + 3) >> 2;
1715 1717
1716 if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) { 1718 if (unlikely(no_of_desc + 2 > netxen_tx_avail(tx_ring))) {
1717 netif_stop_queue(netdev); 1719 netif_stop_queue(netdev);
1718 return NETDEV_TX_BUSY; 1720 return NETDEV_TX_BUSY;
1719 } 1721 }
@@ -1903,12 +1905,13 @@ static void netxen_tx_timeout_task(struct work_struct *work)
1903 1905
1904 netif_wake_queue(adapter->netdev); 1906 netif_wake_queue(adapter->netdev);
1905 1907
1906 goto done; 1908 clear_bit(__NX_RESETTING, &adapter->state);
1907 1909
1908 } else { 1910 } else {
1911 clear_bit(__NX_RESETTING, &adapter->state);
1909 if (!netxen_nic_reset_context(adapter)) { 1912 if (!netxen_nic_reset_context(adapter)) {
1910 adapter->netdev->trans_start = jiffies; 1913 adapter->netdev->trans_start = jiffies;
1911 goto done; 1914 return;
1912 } 1915 }
1913 1916
1914 /* context reset failed, fall through for fw reset */ 1917 /* context reset failed, fall through for fw reset */
@@ -1916,7 +1919,6 @@ static void netxen_tx_timeout_task(struct work_struct *work)
1916 1919
1917request_reset: 1920request_reset:
1918 adapter->need_fw_reset = 1; 1921 adapter->need_fw_reset = 1;
1919done:
1920 clear_bit(__NX_RESETTING, &adapter->state); 1922 clear_bit(__NX_RESETTING, &adapter->state);
1921} 1923}
1922 1924
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 76cc2614f480..d6c7ac68f6ea 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3545,7 +3545,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3545 rp->rcr_index = index; 3545 rp->rcr_index = index;
3546 3546
3547 skb_reserve(skb, NET_IP_ALIGN); 3547 skb_reserve(skb, NET_IP_ALIGN);
3548 __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX)); 3548 __pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN));
3549 3549
3550 rp->rx_packets++; 3550 rp->rx_packets++;
3551 rp->rx_bytes += skb->len; 3551 rp->rx_bytes += skb->len;
@@ -5615,7 +5615,7 @@ static void niu_init_tx_mac(struct niu *np)
5615 /* The XMAC_MIN register only accepts values for TX min which 5615 /* The XMAC_MIN register only accepts values for TX min which
5616 * have the low 3 bits cleared. 5616 * have the low 3 bits cleared.
5617 */ 5617 */
5618 BUILD_BUG_ON(min & 0x7); 5618 BUG_ON(min & 0x7);
5619 5619
5620 if (np->flags & NIU_FLAGS_XMAC) 5620 if (np->flags & NIU_FLAGS_XMAC)
5621 niu_init_tx_xmac(np, min, max); 5621 niu_init_tx_xmac(np, min, max);
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index c594e1946476..57fd483dbb1f 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -111,6 +111,7 @@
111#include <linux/compiler.h> 111#include <linux/compiler.h>
112#include <linux/prefetch.h> 112#include <linux/prefetch.h>
113#include <linux/ethtool.h> 113#include <linux/ethtool.h>
114#include <linux/sched.h>
114#include <linux/timer.h> 115#include <linux/timer.h>
115#include <linux/if_vlan.h> 116#include <linux/if_vlan.h>
116#include <linux/rtnetlink.h> 117#include <linux/rtnetlink.h>
diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/pasemi_mac_ethtool.c
index 064a4fe1dd90..28a86224879d 100644
--- a/drivers/net/pasemi_mac_ethtool.c
+++ b/drivers/net/pasemi_mac_ethtool.c
@@ -71,6 +71,9 @@ pasemi_mac_ethtool_get_settings(struct net_device *netdev,
71 struct pasemi_mac *mac = netdev_priv(netdev); 71 struct pasemi_mac *mac = netdev_priv(netdev);
72 struct phy_device *phydev = mac->phydev; 72 struct phy_device *phydev = mac->phydev;
73 73
74 if (!phydev)
75 return -EOPNOTSUPP;
76
74 return phy_ethtool_gset(phydev, cmd); 77 return phy_ethtool_gset(phydev, cmd);
75} 78}
76 79
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index ee8ad3e180dd..b58965a2b3ae 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -251,6 +251,7 @@ static void el3_tx_timeout(struct net_device *dev);
251static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 251static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
252static const struct ethtool_ops netdev_ethtool_ops; 252static const struct ethtool_ops netdev_ethtool_ops;
253static void set_rx_mode(struct net_device *dev); 253static void set_rx_mode(struct net_device *dev);
254static void set_multicast_list(struct net_device *dev);
254 255
255static void tc574_detach(struct pcmcia_device *p_dev); 256static void tc574_detach(struct pcmcia_device *p_dev);
256 257
@@ -266,7 +267,7 @@ static const struct net_device_ops el3_netdev_ops = {
266 .ndo_tx_timeout = el3_tx_timeout, 267 .ndo_tx_timeout = el3_tx_timeout,
267 .ndo_get_stats = el3_get_stats, 268 .ndo_get_stats = el3_get_stats,
268 .ndo_do_ioctl = el3_ioctl, 269 .ndo_do_ioctl = el3_ioctl,
269 .ndo_set_multicast_list = set_rx_mode, 270 .ndo_set_multicast_list = set_multicast_list,
270 .ndo_change_mtu = eth_change_mtu, 271 .ndo_change_mtu = eth_change_mtu,
271 .ndo_set_mac_address = eth_mac_addr, 272 .ndo_set_mac_address = eth_mac_addr,
272 .ndo_validate_addr = eth_validate_addr, 273 .ndo_validate_addr = eth_validate_addr,
@@ -1161,6 +1162,16 @@ static void set_rx_mode(struct net_device *dev)
1161 outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); 1162 outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
1162} 1163}
1163 1164
1165static void set_multicast_list(struct net_device *dev)
1166{
1167 struct el3_private *lp = netdev_priv(dev);
1168 unsigned long flags;
1169
1170 spin_lock_irqsave(&lp->window_lock, flags);
1171 set_rx_mode(dev);
1172 spin_unlock_irqrestore(&lp->window_lock, flags);
1173}
1174
1164static int el3_close(struct net_device *dev) 1175static int el3_close(struct net_device *dev)
1165{ 1176{
1166 unsigned int ioaddr = dev->base_addr; 1177 unsigned int ioaddr = dev->base_addr;
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 97db1c732342..bd3447f04902 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -340,12 +340,11 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link)
340 base = &virt[hw_info[i].offset & (req.Size-1)]; 340 base = &virt[hw_info[i].offset & (req.Size-1)];
341 if ((readb(base+0) == hw_info[i].a0) && 341 if ((readb(base+0) == hw_info[i].a0) &&
342 (readb(base+2) == hw_info[i].a1) && 342 (readb(base+2) == hw_info[i].a1) &&
343 (readb(base+4) == hw_info[i].a2)) 343 (readb(base+4) == hw_info[i].a2)) {
344 break; 344 for (j = 0; j < 6; j++)
345 } 345 dev->dev_addr[j] = readb(base + (j<<1));
346 if (i < NR_INFO) { 346 break;
347 for (j = 0; j < 6; j++) 347 }
348 dev->dev_addr[j] = readb(base + (j<<1));
349 } 348 }
350 349
351 iounmap(virt); 350 iounmap(virt);
@@ -1755,14 +1754,14 @@ static struct pcmcia_device_id pcnet_ids[] = {
1755 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), 1754 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"),
1756 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), 1755 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"),
1757 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), 1756 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"),
1758 PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"), 1757 PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
1759 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"), 1758 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
1760 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "DP83903.cis"), 1759 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
1761 PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), 1760 PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
1762 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"), 1761 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"),
1763 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"), 1762 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
1764 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"), 1763 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"),
1765 PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "tamarack.cis"), 1764 PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"),
1766 PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b), 1765 PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b),
1767 PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", 1766 PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0",
1768 0xb4be14e3, 0x43ac239b, 0x0877b627), 1767 0xb4be14e3, 0x43ac239b, 0x0877b627),
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 6d28b18e7e28..c1b3f09f452c 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -31,6 +31,7 @@ static const char *const version =
31 31
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/sched.h>
34#include <linux/string.h> 35#include <linux/string.h>
35#include <linux/errno.h> 36#include <linux/errno.h>
36#include <linux/ioport.h> 37#include <linux/ioport.h>
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 250e10f2c35b..8659d341e769 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -238,6 +238,7 @@ static struct of_device_id mdio_ofgpio_match[] = {
238 }, 238 },
239 {}, 239 {},
240}; 240};
241MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
241 242
242static struct of_platform_driver mdio_ofgpio_driver = { 243static struct of_platform_driver mdio_ofgpio_driver = {
243 .name = "mdio-gpio", 244 .name = "mdio-gpio",
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 7cbf6f9b51de..2559991eea6a 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -111,9 +111,6 @@ struct pppoe_net {
111 rwlock_t hash_lock; 111 rwlock_t hash_lock;
112}; 112};
113 113
114/* to eliminate a race btw pppoe_flush_dev and pppoe_release */
115static DEFINE_SPINLOCK(flush_lock);
116
117/* 114/*
118 * PPPoE could be in the following stages: 115 * PPPoE could be in the following stages:
119 * 1) Discovery stage (to obtain remote MAC and Session ID) 116 * 1) Discovery stage (to obtain remote MAC and Session ID)
@@ -303,45 +300,48 @@ static void pppoe_flush_dev(struct net_device *dev)
303 write_lock_bh(&pn->hash_lock); 300 write_lock_bh(&pn->hash_lock);
304 for (i = 0; i < PPPOE_HASH_SIZE; i++) { 301 for (i = 0; i < PPPOE_HASH_SIZE; i++) {
305 struct pppox_sock *po = pn->hash_table[i]; 302 struct pppox_sock *po = pn->hash_table[i];
303 struct sock *sk;
306 304
307 while (po != NULL) { 305 while (po) {
308 struct sock *sk; 306 while (po && po->pppoe_dev != dev) {
309 if (po->pppoe_dev != dev) {
310 po = po->next; 307 po = po->next;
311 continue;
312 } 308 }
309
310 if (!po)
311 break;
312
313 sk = sk_pppox(po); 313 sk = sk_pppox(po);
314 spin_lock(&flush_lock);
315 po->pppoe_dev = NULL;
316 spin_unlock(&flush_lock);
317 dev_put(dev);
318 314
319 /* We always grab the socket lock, followed by the 315 /* We always grab the socket lock, followed by the
320 * hash_lock, in that order. Since we should 316 * hash_lock, in that order. Since we should hold the
321 * hold the sock lock while doing any unbinding, 317 * sock lock while doing any unbinding, we need to
322 * we need to release the lock we're holding. 318 * release the lock we're holding. Hold a reference to
323 * Hold a reference to the sock so it doesn't disappear 319 * the sock so it doesn't disappear as we're jumping
324 * as we're jumping between locks. 320 * between locks.
325 */ 321 */
326 322
327 sock_hold(sk); 323 sock_hold(sk);
328
329 write_unlock_bh(&pn->hash_lock); 324 write_unlock_bh(&pn->hash_lock);
330 lock_sock(sk); 325 lock_sock(sk);
331 326
332 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { 327 if (po->pppoe_dev == dev
328 && sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
333 pppox_unbind_sock(sk); 329 pppox_unbind_sock(sk);
334 sk->sk_state = PPPOX_ZOMBIE; 330 sk->sk_state = PPPOX_ZOMBIE;
335 sk->sk_state_change(sk); 331 sk->sk_state_change(sk);
332 po->pppoe_dev = NULL;
333 dev_put(dev);
336 } 334 }
337 335
338 release_sock(sk); 336 release_sock(sk);
339 sock_put(sk); 337 sock_put(sk);
340 338
341 /* Restart scan at the beginning of this hash chain. 339 /* Restart the process from the start of the current
342 * While the lock was dropped the chain contents may 340 * hash chain. We dropped locks so the world may have
343 * have changed. 341 * change from underneath us.
344 */ 342 */
343
344 BUG_ON(pppoe_pernet(dev_net(dev)) == NULL);
345 write_lock_bh(&pn->hash_lock); 345 write_lock_bh(&pn->hash_lock);
346 po = pn->hash_table[i]; 346 po = pn->hash_table[i];
347 } 347 }
@@ -388,11 +388,16 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
388 struct pppox_sock *po = pppox_sk(sk); 388 struct pppox_sock *po = pppox_sk(sk);
389 struct pppox_sock *relay_po; 389 struct pppox_sock *relay_po;
390 390
391 /* Backlog receive. Semantics of backlog rcv preclude any code from
392 * executing in lock_sock()/release_sock() bounds; meaning sk->sk_state
393 * can't change.
394 */
395
391 if (sk->sk_state & PPPOX_BOUND) { 396 if (sk->sk_state & PPPOX_BOUND) {
392 ppp_input(&po->chan, skb); 397 ppp_input(&po->chan, skb);
393 } else if (sk->sk_state & PPPOX_RELAY) { 398 } else if (sk->sk_state & PPPOX_RELAY) {
394 relay_po = get_item_by_addr(dev_net(po->pppoe_dev), 399 relay_po = get_item_by_addr(sock_net(sk),
395 &po->pppoe_relay); 400 &po->pppoe_relay);
396 if (relay_po == NULL) 401 if (relay_po == NULL)
397 goto abort_kfree; 402 goto abort_kfree;
398 403
@@ -447,6 +452,10 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
447 goto drop; 452 goto drop;
448 453
449 pn = pppoe_pernet(dev_net(dev)); 454 pn = pppoe_pernet(dev_net(dev));
455
456 /* Note that get_item does a sock_hold(), so sk_pppox(po)
457 * is known to be safe.
458 */
450 po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex); 459 po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
451 if (!po) 460 if (!po)
452 goto drop; 461 goto drop;
@@ -561,6 +570,7 @@ static int pppoe_release(struct socket *sock)
561 struct sock *sk = sock->sk; 570 struct sock *sk = sock->sk;
562 struct pppox_sock *po; 571 struct pppox_sock *po;
563 struct pppoe_net *pn; 572 struct pppoe_net *pn;
573 struct net *net = NULL;
564 574
565 if (!sk) 575 if (!sk)
566 return 0; 576 return 0;
@@ -571,44 +581,28 @@ static int pppoe_release(struct socket *sock)
571 return -EBADF; 581 return -EBADF;
572 } 582 }
573 583
584 po = pppox_sk(sk);
585
586 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
587 dev_put(po->pppoe_dev);
588 po->pppoe_dev = NULL;
589 }
590
574 pppox_unbind_sock(sk); 591 pppox_unbind_sock(sk);
575 592
576 /* Signal the death of the socket. */ 593 /* Signal the death of the socket. */
577 sk->sk_state = PPPOX_DEAD; 594 sk->sk_state = PPPOX_DEAD;
578 595
579 /* 596 net = sock_net(sk);
580 * pppoe_flush_dev could lead to a race with 597 pn = pppoe_pernet(net);
581 * this routine so we use flush_lock to eliminate
582 * such a case (we only need per-net specific data)
583 */
584 spin_lock(&flush_lock);
585 po = pppox_sk(sk);
586 if (!po->pppoe_dev) {
587 spin_unlock(&flush_lock);
588 goto out;
589 }
590 pn = pppoe_pernet(dev_net(po->pppoe_dev));
591 spin_unlock(&flush_lock);
592 598
593 /* 599 /*
594 * protect "po" from concurrent updates 600 * protect "po" from concurrent updates
595 * on pppoe_flush_dev 601 * on pppoe_flush_dev
596 */ 602 */
597 write_lock_bh(&pn->hash_lock); 603 delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
604 po->pppoe_ifindex);
598 605
599 po = pppox_sk(sk);
600 if (stage_session(po->pppoe_pa.sid))
601 __delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
602 po->pppoe_ifindex);
603
604 if (po->pppoe_dev) {
605 dev_put(po->pppoe_dev);
606 po->pppoe_dev = NULL;
607 }
608
609 write_unlock_bh(&pn->hash_lock);
610
611out:
612 sock_orphan(sk); 606 sock_orphan(sk);
613 sock->sk = NULL; 607 sock->sk = NULL;
614 608
@@ -625,8 +619,9 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
625 struct sock *sk = sock->sk; 619 struct sock *sk = sock->sk;
626 struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr; 620 struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr;
627 struct pppox_sock *po = pppox_sk(sk); 621 struct pppox_sock *po = pppox_sk(sk);
628 struct net_device *dev; 622 struct net_device *dev = NULL;
629 struct pppoe_net *pn; 623 struct pppoe_net *pn;
624 struct net *net = NULL;
630 int error; 625 int error;
631 626
632 lock_sock(sk); 627 lock_sock(sk);
@@ -652,12 +647,14 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
652 /* Delete the old binding */ 647 /* Delete the old binding */
653 if (stage_session(po->pppoe_pa.sid)) { 648 if (stage_session(po->pppoe_pa.sid)) {
654 pppox_unbind_sock(sk); 649 pppox_unbind_sock(sk);
650 pn = pppoe_pernet(sock_net(sk));
651 delete_item(pn, po->pppoe_pa.sid,
652 po->pppoe_pa.remote, po->pppoe_ifindex);
655 if (po->pppoe_dev) { 653 if (po->pppoe_dev) {
656 pn = pppoe_pernet(dev_net(po->pppoe_dev));
657 delete_item(pn, po->pppoe_pa.sid,
658 po->pppoe_pa.remote, po->pppoe_ifindex);
659 dev_put(po->pppoe_dev); 654 dev_put(po->pppoe_dev);
655 po->pppoe_dev = NULL;
660 } 656 }
657
661 memset(sk_pppox(po) + 1, 0, 658 memset(sk_pppox(po) + 1, 0,
662 sizeof(struct pppox_sock) - sizeof(struct sock)); 659 sizeof(struct pppox_sock) - sizeof(struct sock));
663 sk->sk_state = PPPOX_NONE; 660 sk->sk_state = PPPOX_NONE;
@@ -666,16 +663,15 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
666 /* Re-bind in session stage only */ 663 /* Re-bind in session stage only */
667 if (stage_session(sp->sa_addr.pppoe.sid)) { 664 if (stage_session(sp->sa_addr.pppoe.sid)) {
668 error = -ENODEV; 665 error = -ENODEV;
669 dev = dev_get_by_name(sock_net(sk), sp->sa_addr.pppoe.dev); 666 net = sock_net(sk);
667 dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev);
670 if (!dev) 668 if (!dev)
671 goto end; 669 goto err_put;
672 670
673 po->pppoe_dev = dev; 671 po->pppoe_dev = dev;
674 po->pppoe_ifindex = dev->ifindex; 672 po->pppoe_ifindex = dev->ifindex;
675 pn = pppoe_pernet(dev_net(dev)); 673 pn = pppoe_pernet(net);
676 write_lock_bh(&pn->hash_lock);
677 if (!(dev->flags & IFF_UP)) { 674 if (!(dev->flags & IFF_UP)) {
678 write_unlock_bh(&pn->hash_lock);
679 goto err_put; 675 goto err_put;
680 } 676 }
681 677
@@ -683,6 +679,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
683 &sp->sa_addr.pppoe, 679 &sp->sa_addr.pppoe,
684 sizeof(struct pppoe_addr)); 680 sizeof(struct pppoe_addr));
685 681
682 write_lock_bh(&pn->hash_lock);
686 error = __set_item(pn, po); 683 error = __set_item(pn, po);
687 write_unlock_bh(&pn->hash_lock); 684 write_unlock_bh(&pn->hash_lock);
688 if (error < 0) 685 if (error < 0)
@@ -696,8 +693,11 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
696 po->chan.ops = &pppoe_chan_ops; 693 po->chan.ops = &pppoe_chan_ops;
697 694
698 error = ppp_register_net_channel(dev_net(dev), &po->chan); 695 error = ppp_register_net_channel(dev_net(dev), &po->chan);
699 if (error) 696 if (error) {
697 delete_item(pn, po->pppoe_pa.sid,
698 po->pppoe_pa.remote, po->pppoe_ifindex);
700 goto err_put; 699 goto err_put;
700 }
701 701
702 sk->sk_state = PPPOX_CONNECTED; 702 sk->sk_state = PPPOX_CONNECTED;
703 } 703 }
@@ -915,6 +915,14 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
915 struct pppoe_hdr *ph; 915 struct pppoe_hdr *ph;
916 int data_len = skb->len; 916 int data_len = skb->len;
917 917
918 /* The higher-level PPP code (ppp_unregister_channel()) ensures the PPP
919 * xmit operations conclude prior to an unregistration call. Thus
920 * sk->sk_state cannot change, so we don't need to do lock_sock().
921 * But, we also can't do a lock_sock since that introduces a potential
922 * deadlock as we'd reverse the lock ordering used when calling
923 * ppp_unregister_channel().
924 */
925
918 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) 926 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
919 goto abort; 927 goto abort;
920 928
@@ -944,7 +952,6 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
944 po->pppoe_pa.remote, NULL, data_len); 952 po->pppoe_pa.remote, NULL, data_len);
945 953
946 dev_queue_xmit(skb); 954 dev_queue_xmit(skb);
947
948 return 1; 955 return 1;
949 956
950abort: 957abort:
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index cc394d073755..5910df60c93e 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -2179,7 +2179,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
2179 * session or the special tunnel type. 2179 * session or the special tunnel type.
2180 */ 2180 */
2181static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, 2181static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
2182 char __user *optval, int optlen) 2182 char __user *optval, unsigned int optlen)
2183{ 2183{
2184 struct sock *sk = sock->sk; 2184 struct sock *sk = sock->sk;
2185 struct pppol2tp_session *session = sk->sk_user_data; 2185 struct pppol2tp_session *session = sk->sk_user_data;
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index a9845a2f243f..c2383adcd527 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -9,6 +9,7 @@
9 9
10#include <linux/pci.h> 10#include <linux/pci.h>
11#include <linux/netdevice.h> 11#include <linux/netdevice.h>
12#include <linux/rtnetlink.h>
12 13
13/* 14/*
14 * General definitions... 15 * General definitions...
@@ -94,6 +95,7 @@ enum {
94 95
95 /* Misc. stuff */ 96 /* Misc. stuff */
96 MAILBOX_COUNT = 16, 97 MAILBOX_COUNT = 16,
98 MAILBOX_TIMEOUT = 5,
97 99
98 PROC_ADDR_RDY = (1 << 31), 100 PROC_ADDR_RDY = (1 << 31),
99 PROC_ADDR_R = (1 << 30), 101 PROC_ADDR_R = (1 << 30),
@@ -135,9 +137,9 @@ enum {
135 RST_FO_TFO = (1 << 0), 137 RST_FO_TFO = (1 << 0),
136 RST_FO_RR_MASK = 0x00060000, 138 RST_FO_RR_MASK = 0x00060000,
137 RST_FO_RR_CQ_CAM = 0x00000000, 139 RST_FO_RR_CQ_CAM = 0x00000000,
138 RST_FO_RR_DROP = 0x00000001, 140 RST_FO_RR_DROP = 0x00000002,
139 RST_FO_RR_DQ = 0x00000002, 141 RST_FO_RR_DQ = 0x00000004,
140 RST_FO_RR_RCV_FUNC_CQ = 0x00000003, 142 RST_FO_RR_RCV_FUNC_CQ = 0x00000006,
141 RST_FO_FRB = (1 << 12), 143 RST_FO_FRB = (1 << 12),
142 RST_FO_MOP = (1 << 13), 144 RST_FO_MOP = (1 << 13),
143 RST_FO_REG = (1 << 14), 145 RST_FO_REG = (1 << 14),
@@ -802,6 +804,12 @@ enum {
802 MB_CMD_SET_PORT_CFG = 0x00000122, 804 MB_CMD_SET_PORT_CFG = 0x00000122,
803 MB_CMD_GET_PORT_CFG = 0x00000123, 805 MB_CMD_GET_PORT_CFG = 0x00000123,
804 MB_CMD_GET_LINK_STS = 0x00000124, 806 MB_CMD_GET_LINK_STS = 0x00000124,
807 MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
808 MB_SET_MPI_TFK_STOP = (1 << 0),
809 MB_SET_MPI_TFK_RESUME = (1 << 1),
810 MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */
811 MB_GET_MPI_TFK_STOPPED = (1 << 0),
812 MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1),
805 813
806 /* Mailbox Command Status. */ 814 /* Mailbox Command Status. */
807 MB_CMD_STS_GOOD = 0x00004000, /* Success. */ 815 MB_CMD_STS_GOOD = 0x00004000, /* Success. */
@@ -1167,7 +1175,7 @@ struct ricb {
1167#define RSS_RI6 0x40 1175#define RSS_RI6 0x40
1168#define RSS_RT6 0x80 1176#define RSS_RT6 0x80
1169 __le16 mask; 1177 __le16 mask;
1170 __le32 hash_cq_id[256]; 1178 u8 hash_cq_id[1024];
1171 __le32 ipv6_hash_key[10]; 1179 __le32 ipv6_hash_key[10];
1172 __le32 ipv4_hash_key[4]; 1180 __le32 ipv4_hash_key[4];
1173} __attribute((packed)); 1181} __attribute((packed));
@@ -1381,15 +1389,15 @@ struct intr_context {
1381 1389
1382/* adapter flags definitions. */ 1390/* adapter flags definitions. */
1383enum { 1391enum {
1384 QL_ADAPTER_UP = (1 << 0), /* Adapter has been brought up. */ 1392 QL_ADAPTER_UP = 0, /* Adapter has been brought up. */
1385 QL_LEGACY_ENABLED = (1 << 3), 1393 QL_LEGACY_ENABLED = 1,
1386 QL_MSI_ENABLED = (1 << 3), 1394 QL_MSI_ENABLED = 2,
1387 QL_MSIX_ENABLED = (1 << 4), 1395 QL_MSIX_ENABLED = 3,
1388 QL_DMA64 = (1 << 5), 1396 QL_DMA64 = 4,
1389 QL_PROMISCUOUS = (1 << 6), 1397 QL_PROMISCUOUS = 5,
1390 QL_ALLMULTI = (1 << 7), 1398 QL_ALLMULTI = 6,
1391 QL_PORT_CFG = (1 << 8), 1399 QL_PORT_CFG = 7,
1392 QL_CAM_RT_SET = (1 << 9), 1400 QL_CAM_RT_SET = 8,
1393}; 1401};
1394 1402
1395/* link_status bit definitions */ 1403/* link_status bit definitions */
@@ -1477,7 +1485,6 @@ struct ql_adapter {
1477 u32 mailbox_in; 1485 u32 mailbox_in;
1478 u32 mailbox_out; 1486 u32 mailbox_out;
1479 struct mbox_params idc_mbc; 1487 struct mbox_params idc_mbc;
1480 struct mutex mpi_mutex;
1481 1488
1482 int tx_ring_size; 1489 int tx_ring_size;
1483 int rx_ring_size; 1490 int rx_ring_size;
@@ -1606,6 +1613,8 @@ int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
1606int ql_mb_about_fw(struct ql_adapter *qdev); 1613int ql_mb_about_fw(struct ql_adapter *qdev);
1607void ql_link_on(struct ql_adapter *qdev); 1614void ql_link_on(struct ql_adapter *qdev);
1608void ql_link_off(struct ql_adapter *qdev); 1615void ql_link_off(struct ql_adapter *qdev);
1616int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
1617int ql_wait_fifo_empty(struct ql_adapter *qdev);
1609 1618
1610#if 1 1619#if 1
1611#define QL_ALL_DUMP 1620#define QL_ALL_DUMP
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 68f9bd280f86..52073946bce3 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -45,7 +45,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
45 if (!netif_running(qdev->ndev)) 45 if (!netif_running(qdev->ndev))
46 return status; 46 return status;
47 47
48 spin_lock(&qdev->hw_lock);
49 /* Skip the default queue, and update the outbound handler 48 /* Skip the default queue, and update the outbound handler
50 * queues if they changed. 49 * queues if they changed.
51 */ 50 */
@@ -92,7 +91,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
92 } 91 }
93 } 92 }
94exit: 93exit:
95 spin_unlock(&qdev->hw_lock);
96 return status; 94 return status;
97} 95}
98 96
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 7783c5db81dc..cea7531f4f40 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -34,7 +34,6 @@
34#include <linux/etherdevice.h> 34#include <linux/etherdevice.h>
35#include <linux/ethtool.h> 35#include <linux/ethtool.h>
36#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/rtnetlink.h>
38#include <linux/if_vlan.h> 37#include <linux/if_vlan.h>
39#include <linux/delay.h> 38#include <linux/delay.h>
40#include <linux/mm.h> 39#include <linux/mm.h>
@@ -321,6 +320,37 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
321 320
322 switch (type) { 321 switch (type) {
323 case MAC_ADDR_TYPE_MULTI_MAC: 322 case MAC_ADDR_TYPE_MULTI_MAC:
323 {
324 u32 upper = (addr[0] << 8) | addr[1];
325 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
326 (addr[4] << 8) | (addr[5]);
327
328 status =
329 ql_wait_reg_rdy(qdev,
330 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
331 if (status)
332 goto exit;
333 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
334 (index << MAC_ADDR_IDX_SHIFT) |
335 type | MAC_ADDR_E);
336 ql_write32(qdev, MAC_ADDR_DATA, lower);
337 status =
338 ql_wait_reg_rdy(qdev,
339 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
340 if (status)
341 goto exit;
342 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
343 (index << MAC_ADDR_IDX_SHIFT) |
344 type | MAC_ADDR_E);
345
346 ql_write32(qdev, MAC_ADDR_DATA, upper);
347 status =
348 ql_wait_reg_rdy(qdev,
349 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350 if (status)
351 goto exit;
352 break;
353 }
324 case MAC_ADDR_TYPE_CAM_MAC: 354 case MAC_ADDR_TYPE_CAM_MAC:
325 { 355 {
326 u32 cam_output; 356 u32 cam_output;
@@ -366,16 +396,14 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
366 and possibly the function id. Right now we hardcode 396 and possibly the function id. Right now we hardcode
367 the route field to NIC core. 397 the route field to NIC core.
368 */ 398 */
369 if (type == MAC_ADDR_TYPE_CAM_MAC) { 399 cam_output = (CAM_OUT_ROUTE_NIC |
370 cam_output = (CAM_OUT_ROUTE_NIC | 400 (qdev->
371 (qdev-> 401 func << CAM_OUT_FUNC_SHIFT) |
372 func << CAM_OUT_FUNC_SHIFT) | 402 (0 << CAM_OUT_CQ_ID_SHIFT));
373 (0 << CAM_OUT_CQ_ID_SHIFT)); 403 if (qdev->vlgrp)
374 if (qdev->vlgrp) 404 cam_output |= CAM_OUT_RV;
375 cam_output |= CAM_OUT_RV; 405 /* route to NIC core */
376 /* route to NIC core */ 406 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
377 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
378 }
379 break; 407 break;
380 } 408 }
381 case MAC_ADDR_TYPE_VLAN: 409 case MAC_ADDR_TYPE_VLAN:
@@ -547,14 +575,14 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
547 } 575 }
548 case RT_IDX_MCAST: /* Pass up All Multicast frames. */ 576 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
549 { 577 {
550 value = RT_IDX_DST_CAM_Q | /* dest */ 578 value = RT_IDX_DST_DFLT_Q | /* dest */
551 RT_IDX_TYPE_NICQ | /* type */ 579 RT_IDX_TYPE_NICQ | /* type */
552 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ 580 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
553 break; 581 break;
554 } 582 }
555 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ 583 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
556 { 584 {
557 value = RT_IDX_DST_CAM_Q | /* dest */ 585 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */ 586 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ 587 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
560 break; 588 break;
@@ -1926,12 +1954,10 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1926 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 1954 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1927 if (status) 1955 if (status)
1928 return; 1956 return;
1929 spin_lock(&qdev->hw_lock);
1930 if (ql_set_mac_addr_reg 1957 if (ql_set_mac_addr_reg
1931 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { 1958 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1932 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n"); 1959 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1933 } 1960 }
1934 spin_unlock(&qdev->hw_lock);
1935 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 1961 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1936} 1962}
1937 1963
@@ -1945,12 +1971,10 @@ static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1945 if (status) 1971 if (status)
1946 return; 1972 return;
1947 1973
1948 spin_lock(&qdev->hw_lock);
1949 if (ql_set_mac_addr_reg 1974 if (ql_set_mac_addr_reg
1950 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { 1975 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1951 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n"); 1976 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1952 } 1977 }
1953 spin_unlock(&qdev->hw_lock);
1954 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 1978 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1955 1979
1956} 1980}
@@ -2001,15 +2025,17 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
2001 /* 2025 /*
2002 * Check MPI processor activity. 2026 * Check MPI processor activity.
2003 */ 2027 */
2004 if (var & STS_PI) { 2028 if ((var & STS_PI) &&
2029 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2005 /* 2030 /*
2006 * We've got an async event or mailbox completion. 2031 * We've got an async event or mailbox completion.
2007 * Handle it and clear the source of the interrupt. 2032 * Handle it and clear the source of the interrupt.
2008 */ 2033 */
2009 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n"); 2034 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2010 ql_disable_completion_interrupt(qdev, intr_context->intr); 2035 ql_disable_completion_interrupt(qdev, intr_context->intr);
2011 queue_delayed_work_on(smp_processor_id(), qdev->workqueue, 2036 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2012 &qdev->mpi_work, 0); 2037 queue_delayed_work_on(smp_processor_id(),
2038 qdev->workqueue, &qdev->mpi_work, 0);
2013 work_done++; 2039 work_done++;
2014 } 2040 }
2015 2041
@@ -3080,6 +3106,12 @@ err_irq:
3080 3106
3081static int ql_start_rss(struct ql_adapter *qdev) 3107static int ql_start_rss(struct ql_adapter *qdev)
3082{ 3108{
3109 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3110 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3111 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3112 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3113 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3114 0xbe, 0xac, 0x01, 0xfa};
3083 struct ricb *ricb = &qdev->ricb; 3115 struct ricb *ricb = &qdev->ricb;
3084 int status = 0; 3116 int status = 0;
3085 int i; 3117 int i;
@@ -3089,21 +3121,17 @@ static int ql_start_rss(struct ql_adapter *qdev)
3089 3121
3090 ricb->base_cq = RSS_L4K; 3122 ricb->base_cq = RSS_L4K;
3091 ricb->flags = 3123 ricb->flags =
3092 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 | 3124 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3093 RSS_RT6); 3125 ricb->mask = cpu_to_le16((u16)(0x3ff));
3094 ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
3095 3126
3096 /* 3127 /*
3097 * Fill out the Indirection Table. 3128 * Fill out the Indirection Table.
3098 */ 3129 */
3099 for (i = 0; i < 256; i++) 3130 for (i = 0; i < 1024; i++)
3100 hash_id[i] = i & (qdev->rss_ring_count - 1); 3131 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3101 3132
3102 /* 3133 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3103 * Random values for the IPv6 and IPv4 Hash Keys. 3134 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3104 */
3105 get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
3106 get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
3107 3135
3108 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n"); 3136 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
3109 3137
@@ -3142,14 +3170,14 @@ static int ql_route_initialize(struct ql_adapter *qdev)
3142{ 3170{
3143 int status = 0; 3171 int status = 0;
3144 3172
3145 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 3173 /* Clear all the entries in the routing table. */
3174 status = ql_clear_routing_entries(qdev);
3146 if (status) 3175 if (status)
3147 return status; 3176 return status;
3148 3177
3149 /* Clear all the entries in the routing table. */ 3178 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3150 status = ql_clear_routing_entries(qdev);
3151 if (status) 3179 if (status)
3152 goto exit; 3180 return status;
3153 3181
3154 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1); 3182 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3155 if (status) { 3183 if (status) {
@@ -3242,6 +3270,13 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3242 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP | 3270 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3243 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE)); 3271 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
3244 3272
3273 /* Set RX packet routing to use port/pci function on which the
3274 * packet arrived on in addition to usual frame routing.
3275 * This is helpful on bonding where both interfaces can have
3276 * the same MAC address.
3277 */
3278 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3279
3245 /* Start up the rx queues. */ 3280 /* Start up the rx queues. */
3246 for (i = 0; i < qdev->rx_ring_count; i++) { 3281 for (i = 0; i < qdev->rx_ring_count; i++) {
3247 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); 3282 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
@@ -3314,6 +3349,13 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
3314 3349
3315 end_jiffies = jiffies + 3350 end_jiffies = jiffies +
3316 max((unsigned long)1, usecs_to_jiffies(30)); 3351 max((unsigned long)1, usecs_to_jiffies(30));
3352
3353 /* Stop management traffic. */
3354 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3355
3356 /* Wait for the NIC and MGMNT FIFOs to empty. */
3357 ql_wait_fifo_empty(qdev);
3358
3317 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); 3359 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3318 3360
3319 do { 3361 do {
@@ -3329,6 +3371,8 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
3329 status = -ETIMEDOUT; 3371 status = -ETIMEDOUT;
3330 } 3372 }
3331 3373
3374 /* Resume management traffic. */
3375 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3332 return status; 3376 return status;
3333} 3377}
3334 3378
@@ -3380,12 +3424,10 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3380 3424
3381 ql_free_rx_buffers(qdev); 3425 ql_free_rx_buffers(qdev);
3382 3426
3383 spin_lock(&qdev->hw_lock);
3384 status = ql_adapter_reset(qdev); 3427 status = ql_adapter_reset(qdev);
3385 if (status) 3428 if (status)
3386 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n", 3429 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3387 qdev->func); 3430 qdev->func);
3388 spin_unlock(&qdev->hw_lock);
3389 return status; 3431 return status;
3390} 3432}
3391 3433
@@ -3587,7 +3629,6 @@ static void qlge_set_multicast_list(struct net_device *ndev)
3587 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 3629 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3588 if (status) 3630 if (status)
3589 return; 3631 return;
3590 spin_lock(&qdev->hw_lock);
3591 /* 3632 /*
3592 * Set or clear promiscuous mode if a 3633 * Set or clear promiscuous mode if a
3593 * transition is taking place. 3634 * transition is taking place.
@@ -3664,7 +3705,6 @@ static void qlge_set_multicast_list(struct net_device *ndev)
3664 } 3705 }
3665 } 3706 }
3666exit: 3707exit:
3667 spin_unlock(&qdev->hw_lock);
3668 ql_sem_unlock(qdev, SEM_RT_IDX_MASK); 3708 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3669} 3709}
3670 3710
@@ -3684,10 +3724,8 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
3684 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 3724 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3685 if (status) 3725 if (status)
3686 return status; 3726 return status;
3687 spin_lock(&qdev->hw_lock);
3688 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, 3727 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3689 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); 3728 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
3690 spin_unlock(&qdev->hw_lock);
3691 if (status) 3729 if (status)
3692 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); 3730 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3693 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 3731 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
@@ -3705,7 +3743,7 @@ static void ql_asic_reset_work(struct work_struct *work)
3705 struct ql_adapter *qdev = 3743 struct ql_adapter *qdev =
3706 container_of(work, struct ql_adapter, asic_reset_work.work); 3744 container_of(work, struct ql_adapter, asic_reset_work.work);
3707 int status; 3745 int status;
3708 3746 rtnl_lock();
3709 status = ql_adapter_down(qdev); 3747 status = ql_adapter_down(qdev);
3710 if (status) 3748 if (status)
3711 goto error; 3749 goto error;
@@ -3714,11 +3752,17 @@ static void ql_asic_reset_work(struct work_struct *work)
3714 if (status) 3752 if (status)
3715 goto error; 3753 goto error;
3716 3754
3755 /* Restore rx mode. */
3756 clear_bit(QL_ALLMULTI, &qdev->flags);
3757 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3758 qlge_set_multicast_list(qdev->ndev);
3759
3760 rtnl_unlock();
3717 return; 3761 return;
3718error: 3762error:
3719 QPRINTK(qdev, IFUP, ALERT, 3763 QPRINTK(qdev, IFUP, ALERT,
3720 "Driver up/down cycle failed, closing device\n"); 3764 "Driver up/down cycle failed, closing device\n");
3721 rtnl_lock(); 3765
3722 set_bit(QL_ADAPTER_UP, &qdev->flags); 3766 set_bit(QL_ADAPTER_UP, &qdev->flags);
3723 dev_close(qdev->ndev); 3767 dev_close(qdev->ndev);
3724 rtnl_unlock(); 3768 rtnl_unlock();
@@ -3834,11 +3878,14 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3834 return err; 3878 return err;
3835 } 3879 }
3836 3880
3881 qdev->ndev = ndev;
3882 qdev->pdev = pdev;
3883 pci_set_drvdata(pdev, ndev);
3837 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 3884 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3838 if (pos <= 0) { 3885 if (pos <= 0) {
3839 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " 3886 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3840 "aborting.\n"); 3887 "aborting.\n");
3841 goto err_out; 3888 return pos;
3842 } else { 3889 } else {
3843 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); 3890 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3844 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; 3891 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
@@ -3851,7 +3898,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3851 err = pci_request_regions(pdev, DRV_NAME); 3898 err = pci_request_regions(pdev, DRV_NAME);
3852 if (err) { 3899 if (err) {
3853 dev_err(&pdev->dev, "PCI region request failed.\n"); 3900 dev_err(&pdev->dev, "PCI region request failed.\n");
3854 goto err_out; 3901 return err;
3855 } 3902 }
3856 3903
3857 pci_set_master(pdev); 3904 pci_set_master(pdev);
@@ -3869,7 +3916,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3869 goto err_out; 3916 goto err_out;
3870 } 3917 }
3871 3918
3872 pci_set_drvdata(pdev, ndev); 3919 pci_save_state(pdev);
3873 qdev->reg_base = 3920 qdev->reg_base =
3874 ioremap_nocache(pci_resource_start(pdev, 1), 3921 ioremap_nocache(pci_resource_start(pdev, 1),
3875 pci_resource_len(pdev, 1)); 3922 pci_resource_len(pdev, 1));
@@ -3889,8 +3936,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3889 goto err_out; 3936 goto err_out;
3890 } 3937 }
3891 3938
3892 qdev->ndev = ndev;
3893 qdev->pdev = pdev;
3894 err = ql_get_board_info(qdev); 3939 err = ql_get_board_info(qdev);
3895 if (err) { 3940 if (err) {
3896 dev_err(&pdev->dev, "Register access failed.\n"); 3941 dev_err(&pdev->dev, "Register access failed.\n");
@@ -3930,7 +3975,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3930 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); 3975 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
3931 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); 3976 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
3932 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); 3977 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
3933 mutex_init(&qdev->mpi_mutex);
3934 init_completion(&qdev->ide_completion); 3978 init_completion(&qdev->ide_completion);
3935 3979
3936 if (!cards_found) { 3980 if (!cards_found) {
@@ -4027,6 +4071,33 @@ static void __devexit qlge_remove(struct pci_dev *pdev)
4027 free_netdev(ndev); 4071 free_netdev(ndev);
4028} 4072}
4029 4073
4074/* Clean up resources without touching hardware. */
4075static void ql_eeh_close(struct net_device *ndev)
4076{
4077 int i;
4078 struct ql_adapter *qdev = netdev_priv(ndev);
4079
4080 if (netif_carrier_ok(ndev)) {
4081 netif_carrier_off(ndev);
4082 netif_stop_queue(ndev);
4083 }
4084
4085 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4086 cancel_delayed_work_sync(&qdev->asic_reset_work);
4087 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4088 cancel_delayed_work_sync(&qdev->mpi_work);
4089 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4090 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4091
4092 for (i = 0; i < qdev->rss_ring_count; i++)
4093 netif_napi_del(&qdev->rx_ring[i].napi);
4094
4095 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4096 ql_tx_ring_clean(qdev);
4097 ql_free_rx_buffers(qdev);
4098 ql_release_adapter_resources(qdev);
4099}
4100
4030/* 4101/*
4031 * This callback is called by the PCI subsystem whenever 4102 * This callback is called by the PCI subsystem whenever
4032 * a PCI bus error is detected. 4103 * a PCI bus error is detected.
@@ -4035,17 +4106,21 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4035 enum pci_channel_state state) 4106 enum pci_channel_state state)
4036{ 4107{
4037 struct net_device *ndev = pci_get_drvdata(pdev); 4108 struct net_device *ndev = pci_get_drvdata(pdev);
4038 struct ql_adapter *qdev = netdev_priv(ndev);
4039
4040 netif_device_detach(ndev);
4041 4109
4042 if (state == pci_channel_io_perm_failure) 4110 switch (state) {
4111 case pci_channel_io_normal:
4112 return PCI_ERS_RESULT_CAN_RECOVER;
4113 case pci_channel_io_frozen:
4114 netif_device_detach(ndev);
4115 if (netif_running(ndev))
4116 ql_eeh_close(ndev);
4117 pci_disable_device(pdev);
4118 return PCI_ERS_RESULT_NEED_RESET;
4119 case pci_channel_io_perm_failure:
4120 dev_err(&pdev->dev,
4121 "%s: pci_channel_io_perm_failure.\n", __func__);
4043 return PCI_ERS_RESULT_DISCONNECT; 4122 return PCI_ERS_RESULT_DISCONNECT;
4044 4123 }
4045 if (netif_running(ndev))
4046 ql_adapter_down(qdev);
4047
4048 pci_disable_device(pdev);
4049 4124
4050 /* Request a slot reset. */ 4125 /* Request a slot reset. */
4051 return PCI_ERS_RESULT_NEED_RESET; 4126 return PCI_ERS_RESULT_NEED_RESET;
@@ -4062,25 +4137,15 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4062 struct net_device *ndev = pci_get_drvdata(pdev); 4137 struct net_device *ndev = pci_get_drvdata(pdev);
4063 struct ql_adapter *qdev = netdev_priv(ndev); 4138 struct ql_adapter *qdev = netdev_priv(ndev);
4064 4139
4140 pdev->error_state = pci_channel_io_normal;
4141
4142 pci_restore_state(pdev);
4065 if (pci_enable_device(pdev)) { 4143 if (pci_enable_device(pdev)) {
4066 QPRINTK(qdev, IFUP, ERR, 4144 QPRINTK(qdev, IFUP, ERR,
4067 "Cannot re-enable PCI device after reset.\n"); 4145 "Cannot re-enable PCI device after reset.\n");
4068 return PCI_ERS_RESULT_DISCONNECT; 4146 return PCI_ERS_RESULT_DISCONNECT;
4069 } 4147 }
4070
4071 pci_set_master(pdev); 4148 pci_set_master(pdev);
4072
4073 netif_carrier_off(ndev);
4074 ql_adapter_reset(qdev);
4075
4076 /* Make sure the EEPROM is good */
4077 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4078
4079 if (!is_valid_ether_addr(ndev->perm_addr)) {
4080 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
4081 return PCI_ERS_RESULT_DISCONNECT;
4082 }
4083
4084 return PCI_ERS_RESULT_RECOVERED; 4149 return PCI_ERS_RESULT_RECOVERED;
4085} 4150}
4086 4151
@@ -4088,17 +4153,21 @@ static void qlge_io_resume(struct pci_dev *pdev)
4088{ 4153{
4089 struct net_device *ndev = pci_get_drvdata(pdev); 4154 struct net_device *ndev = pci_get_drvdata(pdev);
4090 struct ql_adapter *qdev = netdev_priv(ndev); 4155 struct ql_adapter *qdev = netdev_priv(ndev);
4156 int err = 0;
4091 4157
4092 pci_set_master(pdev); 4158 if (ql_adapter_reset(qdev))
4093 4159 QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
4094 if (netif_running(ndev)) { 4160 if (netif_running(ndev)) {
4095 if (ql_adapter_up(qdev)) { 4161 err = qlge_open(ndev);
4162 if (err) {
4096 QPRINTK(qdev, IFUP, ERR, 4163 QPRINTK(qdev, IFUP, ERR,
4097 "Device initialization failed after reset.\n"); 4164 "Device initialization failed after reset.\n");
4098 return; 4165 return;
4099 } 4166 }
4167 } else {
4168 QPRINTK(qdev, IFUP, ERR,
4169 "Device was not running prior to EEH.\n");
4100 } 4170 }
4101
4102 netif_device_attach(ndev); 4171 netif_device_attach(ndev);
4103} 4172}
4104 4173
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 6685bd97da91..bcf13c96f73f 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -470,9 +470,9 @@ end:
470 */ 470 */
471static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) 471static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
472{ 472{
473 int status, count; 473 int status;
474 unsigned long count;
474 475
475 mutex_lock(&qdev->mpi_mutex);
476 476
477 /* Begin polled mode for MPI */ 477 /* Begin polled mode for MPI */
478 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 478 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
@@ -492,9 +492,9 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
492 /* Wait for the command to complete. We loop 492 /* Wait for the command to complete. We loop
493 * here because some AEN might arrive while 493 * here because some AEN might arrive while
494 * we're waiting for the mailbox command to 494 * we're waiting for the mailbox command to
495 * complete. If more than 5 arrive then we can 495 * complete. If more than 5 seconds expire we can
496 * assume something is wrong. */ 496 * assume something is wrong. */
497 count = 5; 497 count = jiffies + HZ * MAILBOX_TIMEOUT;
498 do { 498 do {
499 /* Wait for the interrupt to come in. */ 499 /* Wait for the interrupt to come in. */
500 status = ql_wait_mbx_cmd_cmplt(qdev); 500 status = ql_wait_mbx_cmd_cmplt(qdev);
@@ -518,15 +518,15 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
518 MB_CMD_STS_GOOD) || 518 MB_CMD_STS_GOOD) ||
519 ((mbcp->mbox_out[0] & 0x0000f000) == 519 ((mbcp->mbox_out[0] & 0x0000f000) ==
520 MB_CMD_STS_INTRMDT)) 520 MB_CMD_STS_INTRMDT))
521 break; 521 goto done;
522 } while (--count); 522 } while (time_before(jiffies, count));
523 523
524 if (!count) { 524 QPRINTK(qdev, DRV, ERR,
525 QPRINTK(qdev, DRV, ERR, 525 "Timed out waiting for mailbox complete.\n");
526 "Timed out waiting for mailbox complete.\n"); 526 status = -ETIMEDOUT;
527 status = -ETIMEDOUT; 527 goto end;
528 goto end; 528
529 } 529done:
530 530
531 /* Now we can clear the interrupt condition 531 /* Now we can clear the interrupt condition
532 * and look at our status. 532 * and look at our status.
@@ -541,7 +541,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
541 status = -EIO; 541 status = -EIO;
542 } 542 }
543end: 543end:
544 mutex_unlock(&qdev->mpi_mutex);
545 /* End polled mode for MPI */ 544 /* End polled mode for MPI */
546 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); 545 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
547 return status; 546 return status;
@@ -770,13 +769,104 @@ static int ql_idc_wait(struct ql_adapter *qdev)
770 return status; 769 return status;
771} 770}
772 771
772int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
773{
774 struct mbox_params mbc;
775 struct mbox_params *mbcp = &mbc;
776 int status;
777
778 memset(mbcp, 0, sizeof(struct mbox_params));
779
780 mbcp->in_count = 1;
781 mbcp->out_count = 2;
782
783 mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL;
784 mbcp->mbox_in[1] = control;
785
786 status = ql_mailbox_command(qdev, mbcp);
787 if (status)
788 return status;
789
790 if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD)
791 return status;
792
793 if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
794 QPRINTK(qdev, DRV, ERR,
795 "Command not supported by firmware.\n");
796 status = -EINVAL;
797 } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
798 /* This indicates that the firmware is
799 * already in the state we are trying to
800 * change it to.
801 */
802 QPRINTK(qdev, DRV, ERR,
803 "Command parameters make no change.\n");
804 }
805 return status;
806}
807
808/* Returns a negative error code or the mailbox command status. */
809static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
810{
811 struct mbox_params mbc;
812 struct mbox_params *mbcp = &mbc;
813 int status;
814
815 memset(mbcp, 0, sizeof(struct mbox_params));
816 *control = 0;
817
818 mbcp->in_count = 1;
819 mbcp->out_count = 1;
820
821 mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL;
822
823 status = ql_mailbox_command(qdev, mbcp);
824 if (status)
825 return status;
826
827 if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) {
828 *control = mbcp->mbox_in[1];
829 return status;
830 }
831
832 if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
833 QPRINTK(qdev, DRV, ERR,
834 "Command not supported by firmware.\n");
835 status = -EINVAL;
836 } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
837 QPRINTK(qdev, DRV, ERR,
838 "Failed to get MPI traffic control.\n");
839 status = -EIO;
840 }
841 return status;
842}
843
844int ql_wait_fifo_empty(struct ql_adapter *qdev)
845{
846 int count = 5;
847 u32 mgmnt_fifo_empty;
848 u32 nic_fifo_empty;
849
850 do {
851 nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE;
852 ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
853 mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY;
854 if (nic_fifo_empty && mgmnt_fifo_empty)
855 return 0;
856 msleep(100);
857 } while (count-- > 0);
858 return -ETIMEDOUT;
859}
860
773/* API called in work thread context to set new TX/RX 861/* API called in work thread context to set new TX/RX
774 * maximum frame size values to match MTU. 862 * maximum frame size values to match MTU.
775 */ 863 */
776static int ql_set_port_cfg(struct ql_adapter *qdev) 864static int ql_set_port_cfg(struct ql_adapter *qdev)
777{ 865{
778 int status; 866 int status;
867 rtnl_lock();
779 status = ql_mb_set_port_cfg(qdev); 868 status = ql_mb_set_port_cfg(qdev);
869 rtnl_unlock();
780 if (status) 870 if (status)
781 return status; 871 return status;
782 status = ql_idc_wait(qdev); 872 status = ql_idc_wait(qdev);
@@ -797,7 +887,9 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
797 container_of(work, struct ql_adapter, mpi_port_cfg_work.work); 887 container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
798 int status; 888 int status;
799 889
890 rtnl_lock();
800 status = ql_mb_get_port_cfg(qdev); 891 status = ql_mb_get_port_cfg(qdev);
892 rtnl_unlock();
801 if (status) { 893 if (status) {
802 QPRINTK(qdev, DRV, ERR, 894 QPRINTK(qdev, DRV, ERR,
803 "Bug: Failed to get port config data.\n"); 895 "Bug: Failed to get port config data.\n");
@@ -855,7 +947,9 @@ void ql_mpi_idc_work(struct work_struct *work)
855 * needs to be set. 947 * needs to be set.
856 * */ 948 * */
857 set_bit(QL_CAM_RT_SET, &qdev->flags); 949 set_bit(QL_CAM_RT_SET, &qdev->flags);
950 rtnl_lock();
858 status = ql_mb_idc_ack(qdev); 951 status = ql_mb_idc_ack(qdev);
952 rtnl_unlock();
859 if (status) { 953 if (status) {
860 QPRINTK(qdev, DRV, ERR, 954 QPRINTK(qdev, DRV, ERR,
861 "Bug: No pending IDC!\n"); 955 "Bug: No pending IDC!\n");
@@ -871,7 +965,9 @@ void ql_mpi_work(struct work_struct *work)
871 struct mbox_params *mbcp = &mbc; 965 struct mbox_params *mbcp = &mbc;
872 int err = 0; 966 int err = 0;
873 967
874 mutex_lock(&qdev->mpi_mutex); 968 rtnl_lock();
969 /* Begin polled mode for MPI */
970 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
875 971
876 while (ql_read32(qdev, STS) & STS_PI) { 972 while (ql_read32(qdev, STS) & STS_PI) {
877 memset(mbcp, 0, sizeof(struct mbox_params)); 973 memset(mbcp, 0, sizeof(struct mbox_params));
@@ -884,7 +980,9 @@ void ql_mpi_work(struct work_struct *work)
884 break; 980 break;
885 } 981 }
886 982
887 mutex_unlock(&qdev->mpi_mutex); 983 /* End polled mode for MPI */
984 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
985 rtnl_unlock();
888 ql_enable_completion_interrupt(qdev, 0); 986 ql_enable_completion_interrupt(qdev, 0);
889} 987}
890 988
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 50c6a3cfe439..f98ef523f525 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -115,7 +115,9 @@ enum mac_version {
115 RTL_GIGA_MAC_VER_22 = 0x16, // 8168C 115 RTL_GIGA_MAC_VER_22 = 0x16, // 8168C
116 RTL_GIGA_MAC_VER_23 = 0x17, // 8168CP 116 RTL_GIGA_MAC_VER_23 = 0x17, // 8168CP
117 RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP 117 RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP
118 RTL_GIGA_MAC_VER_25 = 0x19 // 8168D 118 RTL_GIGA_MAC_VER_25 = 0x19, // 8168D
119 RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D
120 RTL_GIGA_MAC_VER_27 = 0x1b // 8168DP
119}; 121};
120 122
121#define _R(NAME,MAC,MASK) \ 123#define _R(NAME,MAC,MASK) \
@@ -150,7 +152,9 @@ static const struct {
150 _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_22, 0xff7e1880), // PCI-E 152 _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_22, 0xff7e1880), // PCI-E
151 _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_23, 0xff7e1880), // PCI-E 153 _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_23, 0xff7e1880), // PCI-E
152 _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E 154 _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E
153 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880) // PCI-E 155 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E
156 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E
157 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880) // PCI-E
154}; 158};
155#undef _R 159#undef _R
156 160
@@ -253,6 +257,13 @@ enum rtl8168_8101_registers {
253 DBG_REG = 0xd1, 257 DBG_REG = 0xd1,
254#define FIX_NAK_1 (1 << 4) 258#define FIX_NAK_1 (1 << 4)
255#define FIX_NAK_2 (1 << 3) 259#define FIX_NAK_2 (1 << 3)
260 EFUSEAR = 0xdc,
261#define EFUSEAR_FLAG 0x80000000
262#define EFUSEAR_WRITE_CMD 0x80000000
263#define EFUSEAR_READ_CMD 0x00000000
264#define EFUSEAR_REG_MASK 0x03ff
265#define EFUSEAR_REG_SHIFT 8
266#define EFUSEAR_DATA_MASK 0xff
256}; 267};
257 268
258enum rtl_register_content { 269enum rtl_register_content {
@@ -568,6 +579,14 @@ static void mdio_patch(void __iomem *ioaddr, int reg_addr, int value)
568 mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value); 579 mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value);
569} 580}
570 581
582static void mdio_plus_minus(void __iomem *ioaddr, int reg_addr, int p, int m)
583{
584 int val;
585
586 val = mdio_read(ioaddr, reg_addr);
587 mdio_write(ioaddr, reg_addr, (val | p) & ~m);
588}
589
571static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, 590static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
572 int val) 591 int val)
573{ 592{
@@ -651,6 +670,24 @@ static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
651 return value; 670 return value;
652} 671}
653 672
673static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
674{
675 u8 value = 0xff;
676 unsigned int i;
677
678 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
679
680 for (i = 0; i < 300; i++) {
681 if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
682 value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
683 break;
684 }
685 udelay(100);
686 }
687
688 return value;
689}
690
654static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) 691static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
655{ 692{
656 RTL_W16(IntrMask, 0x0000); 693 RTL_W16(IntrMask, 0x0000);
@@ -992,7 +1029,10 @@ static void rtl8169_vlan_rx_register(struct net_device *dev,
992 1029
993 spin_lock_irqsave(&tp->lock, flags); 1030 spin_lock_irqsave(&tp->lock, flags);
994 tp->vlgrp = grp; 1031 tp->vlgrp = grp;
995 if (tp->vlgrp) 1032 /*
1033 * Do not disable RxVlan on 8110SCd.
1034 */
1035 if (tp->vlgrp || (tp->mac_version == RTL_GIGA_MAC_VER_05))
996 tp->cp_cmd |= RxVlan; 1036 tp->cp_cmd |= RxVlan;
997 else 1037 else
998 tp->cp_cmd &= ~RxVlan; 1038 tp->cp_cmd &= ~RxVlan;
@@ -1243,7 +1283,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1243 int mac_version; 1283 int mac_version;
1244 } mac_info[] = { 1284 } mac_info[] = {
1245 /* 8168D family. */ 1285 /* 8168D family. */
1246 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_25 }, 1286 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
1287 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
1288 { 0x7c800000, 0x28800000, RTL_GIGA_MAC_VER_27 },
1289 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
1247 1290
1248 /* 8168C family. */ 1291 /* 8168C family. */
1249 { 0x7cf00000, 0x3ca00000, RTL_GIGA_MAC_VER_24 }, 1292 { 0x7cf00000, 0x3ca00000, RTL_GIGA_MAC_VER_24 },
@@ -1648,74 +1691,903 @@ static void rtl8168c_4_hw_phy_config(void __iomem *ioaddr)
1648 rtl8168c_3_hw_phy_config(ioaddr); 1691 rtl8168c_3_hw_phy_config(ioaddr);
1649} 1692}
1650 1693
1651static void rtl8168d_hw_phy_config(void __iomem *ioaddr) 1694static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
1652{ 1695{
1653 struct phy_reg phy_reg_init_0[] = { 1696 static struct phy_reg phy_reg_init_0[] = {
1654 { 0x1f, 0x0001 }, 1697 { 0x1f, 0x0001 },
1655 { 0x09, 0x2770 }, 1698 { 0x06, 0x4064 },
1656 { 0x08, 0x04d0 }, 1699 { 0x07, 0x2863 },
1657 { 0x0b, 0xad15 }, 1700 { 0x08, 0x059c },
1658 { 0x0c, 0x5bf0 }, 1701 { 0x09, 0x26b4 },
1659 { 0x1c, 0xf101 }, 1702 { 0x0a, 0x6a19 },
1703 { 0x0b, 0xdcc8 },
1704 { 0x10, 0xf06d },
1705 { 0x14, 0x7f68 },
1706 { 0x18, 0x7fd9 },
1707 { 0x1c, 0xf0ff },
1708 { 0x1d, 0x3d9c },
1660 { 0x1f, 0x0003 }, 1709 { 0x1f, 0x0003 },
1661 { 0x14, 0x94d7 }, 1710 { 0x12, 0xf49f },
1662 { 0x12, 0xf4d6 }, 1711 { 0x13, 0x070b },
1663 { 0x09, 0xca0f }, 1712 { 0x1a, 0x05ad },
1664 { 0x1f, 0x0002 }, 1713 { 0x14, 0x94c0 }
1665 { 0x0b, 0x0b10 }, 1714 };
1666 { 0x0c, 0xd1f7 }, 1715 static struct phy_reg phy_reg_init_1[] = {
1667 { 0x1f, 0x0002 },
1668 { 0x06, 0x5461 },
1669 { 0x1f, 0x0002 }, 1716 { 0x1f, 0x0002 },
1670 { 0x05, 0x6662 }, 1717 { 0x06, 0x5561 },
1718 { 0x1f, 0x0005 },
1719 { 0x05, 0x8332 },
1720 { 0x06, 0x5561 }
1721 };
1722 static struct phy_reg phy_reg_init_2[] = {
1723 { 0x1f, 0x0005 },
1724 { 0x05, 0xffc2 },
1725 { 0x1f, 0x0005 },
1726 { 0x05, 0x8000 },
1727 { 0x06, 0xf8f9 },
1728 { 0x06, 0xfaef },
1729 { 0x06, 0x59ee },
1730 { 0x06, 0xf8ea },
1731 { 0x06, 0x00ee },
1732 { 0x06, 0xf8eb },
1733 { 0x06, 0x00e0 },
1734 { 0x06, 0xf87c },
1735 { 0x06, 0xe1f8 },
1736 { 0x06, 0x7d59 },
1737 { 0x06, 0x0fef },
1738 { 0x06, 0x0139 },
1739 { 0x06, 0x029e },
1740 { 0x06, 0x06ef },
1741 { 0x06, 0x1039 },
1742 { 0x06, 0x089f },
1743 { 0x06, 0x2aee },
1744 { 0x06, 0xf8ea },
1745 { 0x06, 0x00ee },
1746 { 0x06, 0xf8eb },
1747 { 0x06, 0x01e0 },
1748 { 0x06, 0xf87c },
1749 { 0x06, 0xe1f8 },
1750 { 0x06, 0x7d58 },
1751 { 0x06, 0x409e },
1752 { 0x06, 0x0f39 },
1753 { 0x06, 0x46aa },
1754 { 0x06, 0x0bbf },
1755 { 0x06, 0x8290 },
1756 { 0x06, 0xd682 },
1757 { 0x06, 0x9802 },
1758 { 0x06, 0x014f },
1759 { 0x06, 0xae09 },
1760 { 0x06, 0xbf82 },
1761 { 0x06, 0x98d6 },
1762 { 0x06, 0x82a0 },
1763 { 0x06, 0x0201 },
1764 { 0x06, 0x4fef },
1765 { 0x06, 0x95fe },
1766 { 0x06, 0xfdfc },
1767 { 0x06, 0x05f8 },
1768 { 0x06, 0xf9fa },
1769 { 0x06, 0xeef8 },
1770 { 0x06, 0xea00 },
1771 { 0x06, 0xeef8 },
1772 { 0x06, 0xeb00 },
1773 { 0x06, 0xe2f8 },
1774 { 0x06, 0x7ce3 },
1775 { 0x06, 0xf87d },
1776 { 0x06, 0xa511 },
1777 { 0x06, 0x1112 },
1778 { 0x06, 0xd240 },
1779 { 0x06, 0xd644 },
1780 { 0x06, 0x4402 },
1781 { 0x06, 0x8217 },
1782 { 0x06, 0xd2a0 },
1783 { 0x06, 0xd6aa },
1784 { 0x06, 0xaa02 },
1785 { 0x06, 0x8217 },
1786 { 0x06, 0xae0f },
1787 { 0x06, 0xa544 },
1788 { 0x06, 0x4402 },
1789 { 0x06, 0xae4d },
1790 { 0x06, 0xa5aa },
1791 { 0x06, 0xaa02 },
1792 { 0x06, 0xae47 },
1793 { 0x06, 0xaf82 },
1794 { 0x06, 0x13ee },
1795 { 0x06, 0x834e },
1796 { 0x06, 0x00ee },
1797 { 0x06, 0x834d },
1798 { 0x06, 0x0fee },
1799 { 0x06, 0x834c },
1800 { 0x06, 0x0fee },
1801 { 0x06, 0x834f },
1802 { 0x06, 0x00ee },
1803 { 0x06, 0x8351 },
1804 { 0x06, 0x00ee },
1805 { 0x06, 0x834a },
1806 { 0x06, 0xffee },
1807 { 0x06, 0x834b },
1808 { 0x06, 0xffe0 },
1809 { 0x06, 0x8330 },
1810 { 0x06, 0xe183 },
1811 { 0x06, 0x3158 },
1812 { 0x06, 0xfee4 },
1813 { 0x06, 0xf88a },
1814 { 0x06, 0xe5f8 },
1815 { 0x06, 0x8be0 },
1816 { 0x06, 0x8332 },
1817 { 0x06, 0xe183 },
1818 { 0x06, 0x3359 },
1819 { 0x06, 0x0fe2 },
1820 { 0x06, 0x834d },
1821 { 0x06, 0x0c24 },
1822 { 0x06, 0x5af0 },
1823 { 0x06, 0x1e12 },
1824 { 0x06, 0xe4f8 },
1825 { 0x06, 0x8ce5 },
1826 { 0x06, 0xf88d },
1827 { 0x06, 0xaf82 },
1828 { 0x06, 0x13e0 },
1829 { 0x06, 0x834f },
1830 { 0x06, 0x10e4 },
1831 { 0x06, 0x834f },
1832 { 0x06, 0xe083 },
1833 { 0x06, 0x4e78 },
1834 { 0x06, 0x009f },
1835 { 0x06, 0x0ae0 },
1836 { 0x06, 0x834f },
1837 { 0x06, 0xa010 },
1838 { 0x06, 0xa5ee },
1839 { 0x06, 0x834e },
1840 { 0x06, 0x01e0 },
1841 { 0x06, 0x834e },
1842 { 0x06, 0x7805 },
1843 { 0x06, 0x9e9a },
1844 { 0x06, 0xe083 },
1845 { 0x06, 0x4e78 },
1846 { 0x06, 0x049e },
1847 { 0x06, 0x10e0 },
1848 { 0x06, 0x834e },
1849 { 0x06, 0x7803 },
1850 { 0x06, 0x9e0f },
1851 { 0x06, 0xe083 },
1852 { 0x06, 0x4e78 },
1853 { 0x06, 0x019e },
1854 { 0x06, 0x05ae },
1855 { 0x06, 0x0caf },
1856 { 0x06, 0x81f8 },
1857 { 0x06, 0xaf81 },
1858 { 0x06, 0xa3af },
1859 { 0x06, 0x81dc },
1860 { 0x06, 0xaf82 },
1861 { 0x06, 0x13ee },
1862 { 0x06, 0x8348 },
1863 { 0x06, 0x00ee },
1864 { 0x06, 0x8349 },
1865 { 0x06, 0x00e0 },
1866 { 0x06, 0x8351 },
1867 { 0x06, 0x10e4 },
1868 { 0x06, 0x8351 },
1869 { 0x06, 0x5801 },
1870 { 0x06, 0x9fea },
1871 { 0x06, 0xd000 },
1872 { 0x06, 0xd180 },
1873 { 0x06, 0x1f66 },
1874 { 0x06, 0xe2f8 },
1875 { 0x06, 0xeae3 },
1876 { 0x06, 0xf8eb },
1877 { 0x06, 0x5af8 },
1878 { 0x06, 0x1e20 },
1879 { 0x06, 0xe6f8 },
1880 { 0x06, 0xeae5 },
1881 { 0x06, 0xf8eb },
1882 { 0x06, 0xd302 },
1883 { 0x06, 0xb3fe },
1884 { 0x06, 0xe2f8 },
1885 { 0x06, 0x7cef },
1886 { 0x06, 0x325b },
1887 { 0x06, 0x80e3 },
1888 { 0x06, 0xf87d },
1889 { 0x06, 0x9e03 },
1890 { 0x06, 0x7dff },
1891 { 0x06, 0xff0d },
1892 { 0x06, 0x581c },
1893 { 0x06, 0x551a },
1894 { 0x06, 0x6511 },
1895 { 0x06, 0xa190 },
1896 { 0x06, 0xd3e2 },
1897 { 0x06, 0x8348 },
1898 { 0x06, 0xe383 },
1899 { 0x06, 0x491b },
1900 { 0x06, 0x56ab },
1901 { 0x06, 0x08ef },
1902 { 0x06, 0x56e6 },
1903 { 0x06, 0x8348 },
1904 { 0x06, 0xe783 },
1905 { 0x06, 0x4910 },
1906 { 0x06, 0xd180 },
1907 { 0x06, 0x1f66 },
1908 { 0x06, 0xa004 },
1909 { 0x06, 0xb9e2 },
1910 { 0x06, 0x8348 },
1911 { 0x06, 0xe383 },
1912 { 0x06, 0x49ef },
1913 { 0x06, 0x65e2 },
1914 { 0x06, 0x834a },
1915 { 0x06, 0xe383 },
1916 { 0x06, 0x4b1b },
1917 { 0x06, 0x56aa },
1918 { 0x06, 0x0eef },
1919 { 0x06, 0x56e6 },
1920 { 0x06, 0x834a },
1921 { 0x06, 0xe783 },
1922 { 0x06, 0x4be2 },
1923 { 0x06, 0x834d },
1924 { 0x06, 0xe683 },
1925 { 0x06, 0x4ce0 },
1926 { 0x06, 0x834d },
1927 { 0x06, 0xa000 },
1928 { 0x06, 0x0caf },
1929 { 0x06, 0x81dc },
1930 { 0x06, 0xe083 },
1931 { 0x06, 0x4d10 },
1932 { 0x06, 0xe483 },
1933 { 0x06, 0x4dae },
1934 { 0x06, 0x0480 },
1935 { 0x06, 0xe483 },
1936 { 0x06, 0x4de0 },
1937 { 0x06, 0x834e },
1938 { 0x06, 0x7803 },
1939 { 0x06, 0x9e0b },
1940 { 0x06, 0xe083 },
1941 { 0x06, 0x4e78 },
1942 { 0x06, 0x049e },
1943 { 0x06, 0x04ee },
1944 { 0x06, 0x834e },
1945 { 0x06, 0x02e0 },
1946 { 0x06, 0x8332 },
1947 { 0x06, 0xe183 },
1948 { 0x06, 0x3359 },
1949 { 0x06, 0x0fe2 },
1950 { 0x06, 0x834d },
1951 { 0x06, 0x0c24 },
1952 { 0x06, 0x5af0 },
1953 { 0x06, 0x1e12 },
1954 { 0x06, 0xe4f8 },
1955 { 0x06, 0x8ce5 },
1956 { 0x06, 0xf88d },
1957 { 0x06, 0xe083 },
1958 { 0x06, 0x30e1 },
1959 { 0x06, 0x8331 },
1960 { 0x06, 0x6801 },
1961 { 0x06, 0xe4f8 },
1962 { 0x06, 0x8ae5 },
1963 { 0x06, 0xf88b },
1964 { 0x06, 0xae37 },
1965 { 0x06, 0xee83 },
1966 { 0x06, 0x4e03 },
1967 { 0x06, 0xe083 },
1968 { 0x06, 0x4ce1 },
1969 { 0x06, 0x834d },
1970 { 0x06, 0x1b01 },
1971 { 0x06, 0x9e04 },
1972 { 0x06, 0xaaa1 },
1973 { 0x06, 0xaea8 },
1974 { 0x06, 0xee83 },
1975 { 0x06, 0x4e04 },
1976 { 0x06, 0xee83 },
1977 { 0x06, 0x4f00 },
1978 { 0x06, 0xaeab },
1979 { 0x06, 0xe083 },
1980 { 0x06, 0x4f78 },
1981 { 0x06, 0x039f },
1982 { 0x06, 0x14ee },
1983 { 0x06, 0x834e },
1984 { 0x06, 0x05d2 },
1985 { 0x06, 0x40d6 },
1986 { 0x06, 0x5554 },
1987 { 0x06, 0x0282 },
1988 { 0x06, 0x17d2 },
1989 { 0x06, 0xa0d6 },
1990 { 0x06, 0xba00 },
1991 { 0x06, 0x0282 },
1992 { 0x06, 0x17fe },
1993 { 0x06, 0xfdfc },
1994 { 0x06, 0x05f8 },
1995 { 0x06, 0xe0f8 },
1996 { 0x06, 0x60e1 },
1997 { 0x06, 0xf861 },
1998 { 0x06, 0x6802 },
1999 { 0x06, 0xe4f8 },
2000 { 0x06, 0x60e5 },
2001 { 0x06, 0xf861 },
2002 { 0x06, 0xe0f8 },
2003 { 0x06, 0x48e1 },
2004 { 0x06, 0xf849 },
2005 { 0x06, 0x580f },
2006 { 0x06, 0x1e02 },
2007 { 0x06, 0xe4f8 },
2008 { 0x06, 0x48e5 },
2009 { 0x06, 0xf849 },
2010 { 0x06, 0xd000 },
2011 { 0x06, 0x0282 },
2012 { 0x06, 0x5bbf },
2013 { 0x06, 0x8350 },
2014 { 0x06, 0xef46 },
2015 { 0x06, 0xdc19 },
2016 { 0x06, 0xddd0 },
2017 { 0x06, 0x0102 },
2018 { 0x06, 0x825b },
2019 { 0x06, 0x0282 },
2020 { 0x06, 0x77e0 },
2021 { 0x06, 0xf860 },
2022 { 0x06, 0xe1f8 },
2023 { 0x06, 0x6158 },
2024 { 0x06, 0xfde4 },
2025 { 0x06, 0xf860 },
2026 { 0x06, 0xe5f8 },
2027 { 0x06, 0x61fc },
2028 { 0x06, 0x04f9 },
2029 { 0x06, 0xfafb },
2030 { 0x06, 0xc6bf },
2031 { 0x06, 0xf840 },
2032 { 0x06, 0xbe83 },
2033 { 0x06, 0x50a0 },
2034 { 0x06, 0x0101 },
2035 { 0x06, 0x071b },
2036 { 0x06, 0x89cf },
2037 { 0x06, 0xd208 },
2038 { 0x06, 0xebdb },
2039 { 0x06, 0x19b2 },
2040 { 0x06, 0xfbff },
2041 { 0x06, 0xfefd },
2042 { 0x06, 0x04f8 },
2043 { 0x06, 0xe0f8 },
2044 { 0x06, 0x48e1 },
2045 { 0x06, 0xf849 },
2046 { 0x06, 0x6808 },
2047 { 0x06, 0xe4f8 },
2048 { 0x06, 0x48e5 },
2049 { 0x06, 0xf849 },
2050 { 0x06, 0x58f7 },
2051 { 0x06, 0xe4f8 },
2052 { 0x06, 0x48e5 },
2053 { 0x06, 0xf849 },
2054 { 0x06, 0xfc04 },
2055 { 0x06, 0x4d20 },
2056 { 0x06, 0x0002 },
2057 { 0x06, 0x4e22 },
2058 { 0x06, 0x0002 },
2059 { 0x06, 0x4ddf },
2060 { 0x06, 0xff01 },
2061 { 0x06, 0x4edd },
2062 { 0x06, 0xff01 },
2063 { 0x05, 0x83d4 },
2064 { 0x06, 0x8000 },
2065 { 0x05, 0x83d8 },
2066 { 0x06, 0x8051 },
2067 { 0x02, 0x6010 },
2068 { 0x03, 0xdc00 },
2069 { 0x05, 0xfff6 },
2070 { 0x06, 0x00fc },
1671 { 0x1f, 0x0000 }, 2071 { 0x1f, 0x0000 },
1672 { 0x14, 0x0060 }, 2072
1673 { 0x1f, 0x0000 }, 2073 { 0x1f, 0x0000 },
1674 { 0x0d, 0xf8a0 }, 2074 { 0x0d, 0xf880 },
2075 { 0x1f, 0x0000 }
2076 };
2077
2078 rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2079
2080 mdio_write(ioaddr, 0x1f, 0x0002);
2081 mdio_plus_minus(ioaddr, 0x0b, 0x0010, 0x00ef);
2082 mdio_plus_minus(ioaddr, 0x0c, 0xa200, 0x5d00);
2083
2084 rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1));
2085
2086 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
2087 struct phy_reg phy_reg_init[] = {
2088 { 0x1f, 0x0002 },
2089 { 0x05, 0x669a },
2090 { 0x1f, 0x0005 },
2091 { 0x05, 0x8330 },
2092 { 0x06, 0x669a },
2093 { 0x1f, 0x0002 }
2094 };
2095 int val;
2096
2097 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2098
2099 val = mdio_read(ioaddr, 0x0d);
2100
2101 if ((val & 0x00ff) != 0x006c) {
2102 u32 set[] = {
2103 0x0065, 0x0066, 0x0067, 0x0068,
2104 0x0069, 0x006a, 0x006b, 0x006c
2105 };
2106 int i;
2107
2108 mdio_write(ioaddr, 0x1f, 0x0002);
2109
2110 val &= 0xff00;
2111 for (i = 0; i < ARRAY_SIZE(set); i++)
2112 mdio_write(ioaddr, 0x0d, val | set[i]);
2113 }
2114 } else {
2115 struct phy_reg phy_reg_init[] = {
2116 { 0x1f, 0x0002 },
2117 { 0x05, 0x6662 },
2118 { 0x1f, 0x0005 },
2119 { 0x05, 0x8330 },
2120 { 0x06, 0x6662 }
2121 };
2122
2123 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2124 }
2125
2126 mdio_write(ioaddr, 0x1f, 0x0002);
2127 mdio_patch(ioaddr, 0x0d, 0x0300);
2128 mdio_patch(ioaddr, 0x0f, 0x0010);
2129
2130 mdio_write(ioaddr, 0x1f, 0x0002);
2131 mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600);
2132 mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000);
2133
2134 rtl_phy_write(ioaddr, phy_reg_init_2, ARRAY_SIZE(phy_reg_init_2));
2135}
2136
2137static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
2138{
2139 static struct phy_reg phy_reg_init_0[] = {
2140 { 0x1f, 0x0001 },
2141 { 0x06, 0x4064 },
2142 { 0x07, 0x2863 },
2143 { 0x08, 0x059c },
2144 { 0x09, 0x26b4 },
2145 { 0x0a, 0x6a19 },
2146 { 0x0b, 0xdcc8 },
2147 { 0x10, 0xf06d },
2148 { 0x14, 0x7f68 },
2149 { 0x18, 0x7fd9 },
2150 { 0x1c, 0xf0ff },
2151 { 0x1d, 0x3d9c },
2152 { 0x1f, 0x0003 },
2153 { 0x12, 0xf49f },
2154 { 0x13, 0x070b },
2155 { 0x1a, 0x05ad },
2156 { 0x14, 0x94c0 },
2157
2158 { 0x1f, 0x0002 },
2159 { 0x06, 0x5561 },
1675 { 0x1f, 0x0005 }, 2160 { 0x1f, 0x0005 },
1676 { 0x05, 0xffc2 } 2161 { 0x05, 0x8332 },
2162 { 0x06, 0x5561 }
2163 };
2164 static struct phy_reg phy_reg_init_1[] = {
2165 { 0x1f, 0x0005 },
2166 { 0x05, 0xffc2 },
2167 { 0x1f, 0x0005 },
2168 { 0x05, 0x8000 },
2169 { 0x06, 0xf8f9 },
2170 { 0x06, 0xfaee },
2171 { 0x06, 0xf8ea },
2172 { 0x06, 0x00ee },
2173 { 0x06, 0xf8eb },
2174 { 0x06, 0x00e2 },
2175 { 0x06, 0xf87c },
2176 { 0x06, 0xe3f8 },
2177 { 0x06, 0x7da5 },
2178 { 0x06, 0x1111 },
2179 { 0x06, 0x12d2 },
2180 { 0x06, 0x40d6 },
2181 { 0x06, 0x4444 },
2182 { 0x06, 0x0281 },
2183 { 0x06, 0xc6d2 },
2184 { 0x06, 0xa0d6 },
2185 { 0x06, 0xaaaa },
2186 { 0x06, 0x0281 },
2187 { 0x06, 0xc6ae },
2188 { 0x06, 0x0fa5 },
2189 { 0x06, 0x4444 },
2190 { 0x06, 0x02ae },
2191 { 0x06, 0x4da5 },
2192 { 0x06, 0xaaaa },
2193 { 0x06, 0x02ae },
2194 { 0x06, 0x47af },
2195 { 0x06, 0x81c2 },
2196 { 0x06, 0xee83 },
2197 { 0x06, 0x4e00 },
2198 { 0x06, 0xee83 },
2199 { 0x06, 0x4d0f },
2200 { 0x06, 0xee83 },
2201 { 0x06, 0x4c0f },
2202 { 0x06, 0xee83 },
2203 { 0x06, 0x4f00 },
2204 { 0x06, 0xee83 },
2205 { 0x06, 0x5100 },
2206 { 0x06, 0xee83 },
2207 { 0x06, 0x4aff },
2208 { 0x06, 0xee83 },
2209 { 0x06, 0x4bff },
2210 { 0x06, 0xe083 },
2211 { 0x06, 0x30e1 },
2212 { 0x06, 0x8331 },
2213 { 0x06, 0x58fe },
2214 { 0x06, 0xe4f8 },
2215 { 0x06, 0x8ae5 },
2216 { 0x06, 0xf88b },
2217 { 0x06, 0xe083 },
2218 { 0x06, 0x32e1 },
2219 { 0x06, 0x8333 },
2220 { 0x06, 0x590f },
2221 { 0x06, 0xe283 },
2222 { 0x06, 0x4d0c },
2223 { 0x06, 0x245a },
2224 { 0x06, 0xf01e },
2225 { 0x06, 0x12e4 },
2226 { 0x06, 0xf88c },
2227 { 0x06, 0xe5f8 },
2228 { 0x06, 0x8daf },
2229 { 0x06, 0x81c2 },
2230 { 0x06, 0xe083 },
2231 { 0x06, 0x4f10 },
2232 { 0x06, 0xe483 },
2233 { 0x06, 0x4fe0 },
2234 { 0x06, 0x834e },
2235 { 0x06, 0x7800 },
2236 { 0x06, 0x9f0a },
2237 { 0x06, 0xe083 },
2238 { 0x06, 0x4fa0 },
2239 { 0x06, 0x10a5 },
2240 { 0x06, 0xee83 },
2241 { 0x06, 0x4e01 },
2242 { 0x06, 0xe083 },
2243 { 0x06, 0x4e78 },
2244 { 0x06, 0x059e },
2245 { 0x06, 0x9ae0 },
2246 { 0x06, 0x834e },
2247 { 0x06, 0x7804 },
2248 { 0x06, 0x9e10 },
2249 { 0x06, 0xe083 },
2250 { 0x06, 0x4e78 },
2251 { 0x06, 0x039e },
2252 { 0x06, 0x0fe0 },
2253 { 0x06, 0x834e },
2254 { 0x06, 0x7801 },
2255 { 0x06, 0x9e05 },
2256 { 0x06, 0xae0c },
2257 { 0x06, 0xaf81 },
2258 { 0x06, 0xa7af },
2259 { 0x06, 0x8152 },
2260 { 0x06, 0xaf81 },
2261 { 0x06, 0x8baf },
2262 { 0x06, 0x81c2 },
2263 { 0x06, 0xee83 },
2264 { 0x06, 0x4800 },
2265 { 0x06, 0xee83 },
2266 { 0x06, 0x4900 },
2267 { 0x06, 0xe083 },
2268 { 0x06, 0x5110 },
2269 { 0x06, 0xe483 },
2270 { 0x06, 0x5158 },
2271 { 0x06, 0x019f },
2272 { 0x06, 0xead0 },
2273 { 0x06, 0x00d1 },
2274 { 0x06, 0x801f },
2275 { 0x06, 0x66e2 },
2276 { 0x06, 0xf8ea },
2277 { 0x06, 0xe3f8 },
2278 { 0x06, 0xeb5a },
2279 { 0x06, 0xf81e },
2280 { 0x06, 0x20e6 },
2281 { 0x06, 0xf8ea },
2282 { 0x06, 0xe5f8 },
2283 { 0x06, 0xebd3 },
2284 { 0x06, 0x02b3 },
2285 { 0x06, 0xfee2 },
2286 { 0x06, 0xf87c },
2287 { 0x06, 0xef32 },
2288 { 0x06, 0x5b80 },
2289 { 0x06, 0xe3f8 },
2290 { 0x06, 0x7d9e },
2291 { 0x06, 0x037d },
2292 { 0x06, 0xffff },
2293 { 0x06, 0x0d58 },
2294 { 0x06, 0x1c55 },
2295 { 0x06, 0x1a65 },
2296 { 0x06, 0x11a1 },
2297 { 0x06, 0x90d3 },
2298 { 0x06, 0xe283 },
2299 { 0x06, 0x48e3 },
2300 { 0x06, 0x8349 },
2301 { 0x06, 0x1b56 },
2302 { 0x06, 0xab08 },
2303 { 0x06, 0xef56 },
2304 { 0x06, 0xe683 },
2305 { 0x06, 0x48e7 },
2306 { 0x06, 0x8349 },
2307 { 0x06, 0x10d1 },
2308 { 0x06, 0x801f },
2309 { 0x06, 0x66a0 },
2310 { 0x06, 0x04b9 },
2311 { 0x06, 0xe283 },
2312 { 0x06, 0x48e3 },
2313 { 0x06, 0x8349 },
2314 { 0x06, 0xef65 },
2315 { 0x06, 0xe283 },
2316 { 0x06, 0x4ae3 },
2317 { 0x06, 0x834b },
2318 { 0x06, 0x1b56 },
2319 { 0x06, 0xaa0e },
2320 { 0x06, 0xef56 },
2321 { 0x06, 0xe683 },
2322 { 0x06, 0x4ae7 },
2323 { 0x06, 0x834b },
2324 { 0x06, 0xe283 },
2325 { 0x06, 0x4de6 },
2326 { 0x06, 0x834c },
2327 { 0x06, 0xe083 },
2328 { 0x06, 0x4da0 },
2329 { 0x06, 0x000c },
2330 { 0x06, 0xaf81 },
2331 { 0x06, 0x8be0 },
2332 { 0x06, 0x834d },
2333 { 0x06, 0x10e4 },
2334 { 0x06, 0x834d },
2335 { 0x06, 0xae04 },
2336 { 0x06, 0x80e4 },
2337 { 0x06, 0x834d },
2338 { 0x06, 0xe083 },
2339 { 0x06, 0x4e78 },
2340 { 0x06, 0x039e },
2341 { 0x06, 0x0be0 },
2342 { 0x06, 0x834e },
2343 { 0x06, 0x7804 },
2344 { 0x06, 0x9e04 },
2345 { 0x06, 0xee83 },
2346 { 0x06, 0x4e02 },
2347 { 0x06, 0xe083 },
2348 { 0x06, 0x32e1 },
2349 { 0x06, 0x8333 },
2350 { 0x06, 0x590f },
2351 { 0x06, 0xe283 },
2352 { 0x06, 0x4d0c },
2353 { 0x06, 0x245a },
2354 { 0x06, 0xf01e },
2355 { 0x06, 0x12e4 },
2356 { 0x06, 0xf88c },
2357 { 0x06, 0xe5f8 },
2358 { 0x06, 0x8de0 },
2359 { 0x06, 0x8330 },
2360 { 0x06, 0xe183 },
2361 { 0x06, 0x3168 },
2362 { 0x06, 0x01e4 },
2363 { 0x06, 0xf88a },
2364 { 0x06, 0xe5f8 },
2365 { 0x06, 0x8bae },
2366 { 0x06, 0x37ee },
2367 { 0x06, 0x834e },
2368 { 0x06, 0x03e0 },
2369 { 0x06, 0x834c },
2370 { 0x06, 0xe183 },
2371 { 0x06, 0x4d1b },
2372 { 0x06, 0x019e },
2373 { 0x06, 0x04aa },
2374 { 0x06, 0xa1ae },
2375 { 0x06, 0xa8ee },
2376 { 0x06, 0x834e },
2377 { 0x06, 0x04ee },
2378 { 0x06, 0x834f },
2379 { 0x06, 0x00ae },
2380 { 0x06, 0xabe0 },
2381 { 0x06, 0x834f },
2382 { 0x06, 0x7803 },
2383 { 0x06, 0x9f14 },
2384 { 0x06, 0xee83 },
2385 { 0x06, 0x4e05 },
2386 { 0x06, 0xd240 },
2387 { 0x06, 0xd655 },
2388 { 0x06, 0x5402 },
2389 { 0x06, 0x81c6 },
2390 { 0x06, 0xd2a0 },
2391 { 0x06, 0xd6ba },
2392 { 0x06, 0x0002 },
2393 { 0x06, 0x81c6 },
2394 { 0x06, 0xfefd },
2395 { 0x06, 0xfc05 },
2396 { 0x06, 0xf8e0 },
2397 { 0x06, 0xf860 },
2398 { 0x06, 0xe1f8 },
2399 { 0x06, 0x6168 },
2400 { 0x06, 0x02e4 },
2401 { 0x06, 0xf860 },
2402 { 0x06, 0xe5f8 },
2403 { 0x06, 0x61e0 },
2404 { 0x06, 0xf848 },
2405 { 0x06, 0xe1f8 },
2406 { 0x06, 0x4958 },
2407 { 0x06, 0x0f1e },
2408 { 0x06, 0x02e4 },
2409 { 0x06, 0xf848 },
2410 { 0x06, 0xe5f8 },
2411 { 0x06, 0x49d0 },
2412 { 0x06, 0x0002 },
2413 { 0x06, 0x820a },
2414 { 0x06, 0xbf83 },
2415 { 0x06, 0x50ef },
2416 { 0x06, 0x46dc },
2417 { 0x06, 0x19dd },
2418 { 0x06, 0xd001 },
2419 { 0x06, 0x0282 },
2420 { 0x06, 0x0a02 },
2421 { 0x06, 0x8226 },
2422 { 0x06, 0xe0f8 },
2423 { 0x06, 0x60e1 },
2424 { 0x06, 0xf861 },
2425 { 0x06, 0x58fd },
2426 { 0x06, 0xe4f8 },
2427 { 0x06, 0x60e5 },
2428 { 0x06, 0xf861 },
2429 { 0x06, 0xfc04 },
2430 { 0x06, 0xf9fa },
2431 { 0x06, 0xfbc6 },
2432 { 0x06, 0xbff8 },
2433 { 0x06, 0x40be },
2434 { 0x06, 0x8350 },
2435 { 0x06, 0xa001 },
2436 { 0x06, 0x0107 },
2437 { 0x06, 0x1b89 },
2438 { 0x06, 0xcfd2 },
2439 { 0x06, 0x08eb },
2440 { 0x06, 0xdb19 },
2441 { 0x06, 0xb2fb },
2442 { 0x06, 0xfffe },
2443 { 0x06, 0xfd04 },
2444 { 0x06, 0xf8e0 },
2445 { 0x06, 0xf848 },
2446 { 0x06, 0xe1f8 },
2447 { 0x06, 0x4968 },
2448 { 0x06, 0x08e4 },
2449 { 0x06, 0xf848 },
2450 { 0x06, 0xe5f8 },
2451 { 0x06, 0x4958 },
2452 { 0x06, 0xf7e4 },
2453 { 0x06, 0xf848 },
2454 { 0x06, 0xe5f8 },
2455 { 0x06, 0x49fc },
2456 { 0x06, 0x044d },
2457 { 0x06, 0x2000 },
2458 { 0x06, 0x024e },
2459 { 0x06, 0x2200 },
2460 { 0x06, 0x024d },
2461 { 0x06, 0xdfff },
2462 { 0x06, 0x014e },
2463 { 0x06, 0xddff },
2464 { 0x06, 0x0100 },
2465 { 0x05, 0x83d8 },
2466 { 0x06, 0x8000 },
2467 { 0x03, 0xdc00 },
2468 { 0x05, 0xfff6 },
2469 { 0x06, 0x00fc },
2470 { 0x1f, 0x0000 },
2471
2472 { 0x1f, 0x0000 },
2473 { 0x0d, 0xf880 },
2474 { 0x1f, 0x0000 }
1677 }; 2475 };
1678 2476
1679 rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); 2477 rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
1680 2478
1681 if (mdio_read(ioaddr, 0x06) == 0xc400) { 2479 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
1682 struct phy_reg phy_reg_init_1[] = { 2480 struct phy_reg phy_reg_init[] = {
2481 { 0x1f, 0x0002 },
2482 { 0x05, 0x669a },
1683 { 0x1f, 0x0005 }, 2483 { 0x1f, 0x0005 },
1684 { 0x01, 0x0300 }, 2484 { 0x05, 0x8330 },
1685 { 0x1f, 0x0000 }, 2485 { 0x06, 0x669a },
1686 { 0x11, 0x401c }, 2486
1687 { 0x16, 0x4100 }, 2487 { 0x1f, 0x0002 }
2488 };
2489 int val;
2490
2491 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2492
2493 val = mdio_read(ioaddr, 0x0d);
2494 if ((val & 0x00ff) != 0x006c) {
2495 u32 set[] = {
2496 0x0065, 0x0066, 0x0067, 0x0068,
2497 0x0069, 0x006a, 0x006b, 0x006c
2498 };
2499 int i;
2500
2501 mdio_write(ioaddr, 0x1f, 0x0002);
2502
2503 val &= 0xff00;
2504 for (i = 0; i < ARRAY_SIZE(set); i++)
2505 mdio_write(ioaddr, 0x0d, val | set[i]);
2506 }
2507 } else {
2508 struct phy_reg phy_reg_init[] = {
2509 { 0x1f, 0x0002 },
2510 { 0x05, 0x2642 },
1688 { 0x1f, 0x0005 }, 2511 { 0x1f, 0x0005 },
1689 { 0x07, 0x0010 }, 2512 { 0x05, 0x8330 },
1690 { 0x05, 0x83dc }, 2513 { 0x06, 0x2642 }
1691 { 0x06, 0x087d },
1692 { 0x05, 0x8300 },
1693 { 0x06, 0x0101 },
1694 { 0x06, 0x05f8 },
1695 { 0x06, 0xf9fa },
1696 { 0x06, 0xfbef },
1697 { 0x06, 0x79e2 },
1698 { 0x06, 0x835f },
1699 { 0x06, 0xe0f8 },
1700 { 0x06, 0x9ae1 },
1701 { 0x06, 0xf89b },
1702 { 0x06, 0xef31 },
1703 { 0x06, 0x3b65 },
1704 { 0x06, 0xaa07 },
1705 { 0x06, 0x81e4 },
1706 { 0x06, 0xf89a },
1707 { 0x06, 0xe5f8 },
1708 { 0x06, 0x9baf },
1709 { 0x06, 0x06ae },
1710 { 0x05, 0x83dc },
1711 { 0x06, 0x8300 },
1712 }; 2514 };
1713 2515
1714 rtl_phy_write(ioaddr, phy_reg_init_1, 2516 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1715 ARRAY_SIZE(phy_reg_init_1));
1716 } 2517 }
1717 2518
1718 mdio_write(ioaddr, 0x1f, 0x0000); 2519 mdio_write(ioaddr, 0x1f, 0x0002);
2520 mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600);
2521 mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000);
2522
2523 mdio_write(ioaddr, 0x1f, 0x0001);
2524 mdio_write(ioaddr, 0x17, 0x0cc0);
2525
2526 mdio_write(ioaddr, 0x1f, 0x0002);
2527 mdio_patch(ioaddr, 0x0f, 0x0017);
2528
2529 rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1));
2530}
2531
2532static void rtl8168d_3_hw_phy_config(void __iomem *ioaddr)
2533{
2534 struct phy_reg phy_reg_init[] = {
2535 { 0x1f, 0x0002 },
2536 { 0x10, 0x0008 },
2537 { 0x0d, 0x006c },
2538
2539 { 0x1f, 0x0000 },
2540 { 0x0d, 0xf880 },
2541
2542 { 0x1f, 0x0001 },
2543 { 0x17, 0x0cc0 },
2544
2545 { 0x1f, 0x0001 },
2546 { 0x0b, 0xa4d8 },
2547 { 0x09, 0x281c },
2548 { 0x07, 0x2883 },
2549 { 0x0a, 0x6b35 },
2550 { 0x1d, 0x3da4 },
2551 { 0x1c, 0xeffd },
2552 { 0x14, 0x7f52 },
2553 { 0x18, 0x7fc6 },
2554 { 0x08, 0x0601 },
2555 { 0x06, 0x4063 },
2556 { 0x10, 0xf074 },
2557 { 0x1f, 0x0003 },
2558 { 0x13, 0x0789 },
2559 { 0x12, 0xf4bd },
2560 { 0x1a, 0x04fd },
2561 { 0x14, 0x84b0 },
2562 { 0x1f, 0x0000 },
2563 { 0x00, 0x9200 },
2564
2565 { 0x1f, 0x0005 },
2566 { 0x01, 0x0340 },
2567 { 0x1f, 0x0001 },
2568 { 0x04, 0x4000 },
2569 { 0x03, 0x1d21 },
2570 { 0x02, 0x0c32 },
2571 { 0x01, 0x0200 },
2572 { 0x00, 0x5554 },
2573 { 0x04, 0x4800 },
2574 { 0x04, 0x4000 },
2575 { 0x04, 0xf000 },
2576 { 0x03, 0xdf01 },
2577 { 0x02, 0xdf20 },
2578 { 0x01, 0x101a },
2579 { 0x00, 0xa0ff },
2580 { 0x04, 0xf800 },
2581 { 0x04, 0xf000 },
2582 { 0x1f, 0x0000 },
2583
2584 { 0x1f, 0x0007 },
2585 { 0x1e, 0x0023 },
2586 { 0x16, 0x0000 },
2587 { 0x1f, 0x0000 }
2588 };
2589
2590 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1719} 2591}
1720 2592
1721static void rtl8102e_hw_phy_config(void __iomem *ioaddr) 2593static void rtl8102e_hw_phy_config(void __iomem *ioaddr)
@@ -1792,7 +2664,13 @@ static void rtl_hw_phy_config(struct net_device *dev)
1792 rtl8168cp_2_hw_phy_config(ioaddr); 2664 rtl8168cp_2_hw_phy_config(ioaddr);
1793 break; 2665 break;
1794 case RTL_GIGA_MAC_VER_25: 2666 case RTL_GIGA_MAC_VER_25:
1795 rtl8168d_hw_phy_config(ioaddr); 2667 rtl8168d_1_hw_phy_config(ioaddr);
2668 break;
2669 case RTL_GIGA_MAC_VER_26:
2670 rtl8168d_2_hw_phy_config(ioaddr);
2671 break;
2672 case RTL_GIGA_MAC_VER_27:
2673 rtl8168d_3_hw_phy_config(ioaddr);
1796 break; 2674 break;
1797 2675
1798 default: 2676 default:
@@ -2322,6 +3200,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2322 } 3200 }
2323 3201
2324 rtl8169_init_phy(dev, tp); 3202 rtl8169_init_phy(dev, tp);
3203
3204 /*
3205 * Pretend we are using VLANs; This bypasses a nasty bug where
3206 * Interrupts stop flowing on high load on 8110SCd controllers.
3207 */
3208 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3209 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
3210
2325 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); 3211 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
2326 3212
2327out: 3213out:
@@ -2863,6 +3749,8 @@ static void rtl_hw_start_8168(struct net_device *dev)
2863 break; 3749 break;
2864 3750
2865 case RTL_GIGA_MAC_VER_25: 3751 case RTL_GIGA_MAC_VER_25:
3752 case RTL_GIGA_MAC_VER_26:
3753 case RTL_GIGA_MAC_VER_27:
2866 rtl_hw_start_8168d(ioaddr, pdev); 3754 rtl_hw_start_8168d(ioaddr, pdev);
2867 break; 3755 break;
2868 3756
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index ee366c5a8fa3..c9c70ab0cce0 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -36,6 +36,7 @@ static char version[] = "sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)\n";
36 36
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/kernel.h> 38#include <linux/kernel.h>
39#include <linux/sched.h>
39#include <linux/string.h> 40#include <linux/string.h>
40#include <linux/interrupt.h> 41#include <linux/interrupt.h>
41#include <linux/errno.h> 42#include <linux/errno.h>
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 07a7e4b8f8fc..cc4b2f99989d 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -884,13 +884,12 @@ static int efx_wanted_rx_queues(void)
884 int count; 884 int count;
885 int cpu; 885 int cpu;
886 886
887 if (unlikely(!alloc_cpumask_var(&core_mask, GFP_KERNEL))) { 887 if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
888 printk(KERN_WARNING 888 printk(KERN_WARNING
889 "sfc: RSS disabled due to allocation failure\n"); 889 "sfc: RSS disabled due to allocation failure\n");
890 return 1; 890 return 1;
891 } 891 }
892 892
893 cpumask_clear(core_mask);
894 count = 0; 893 count = 0;
895 for_each_online_cpu(cpu) { 894 for_each_online_cpu(cpu) {
896 if (!cpumask_test_cpu(cpu, core_mask)) { 895 if (!cpumask_test_cpu(cpu, core_mask)) {
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 01f9432c31ef..98bff5ada09a 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -444,7 +444,8 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
444 * the appropriate LRO method 444 * the appropriate LRO method
445 */ 445 */
446static void efx_rx_packet_lro(struct efx_channel *channel, 446static void efx_rx_packet_lro(struct efx_channel *channel,
447 struct efx_rx_buffer *rx_buf) 447 struct efx_rx_buffer *rx_buf,
448 bool checksummed)
448{ 449{
449 struct napi_struct *napi = &channel->napi_str; 450 struct napi_struct *napi = &channel->napi_str;
450 451
@@ -466,7 +467,8 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
466 skb->len = rx_buf->len; 467 skb->len = rx_buf->len;
467 skb->data_len = rx_buf->len; 468 skb->data_len = rx_buf->len;
468 skb->truesize += rx_buf->len; 469 skb->truesize += rx_buf->len;
469 skb->ip_summed = CHECKSUM_UNNECESSARY; 470 skb->ip_summed =
471 checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
470 472
471 napi_gro_frags(napi); 473 napi_gro_frags(napi);
472 474
@@ -475,6 +477,7 @@ out:
475 rx_buf->page = NULL; 477 rx_buf->page = NULL;
476 } else { 478 } else {
477 EFX_BUG_ON_PARANOID(!rx_buf->skb); 479 EFX_BUG_ON_PARANOID(!rx_buf->skb);
480 EFX_BUG_ON_PARANOID(!checksummed);
478 481
479 napi_gro_receive(napi, rx_buf->skb); 482 napi_gro_receive(napi, rx_buf->skb);
480 rx_buf->skb = NULL; 483 rx_buf->skb = NULL;
@@ -570,7 +573,7 @@ void __efx_rx_packet(struct efx_channel *channel,
570 } 573 }
571 574
572 if (likely(checksummed || rx_buf->page)) { 575 if (likely(checksummed || rx_buf->page)) {
573 efx_rx_packet_lro(channel, rx_buf); 576 efx_rx_packet_lro(channel, rx_buf, checksummed);
574 goto done; 577 goto done;
575 } 578 }
576 579
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index ecf3279fbef5..f4dfd1f679a9 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -826,7 +826,7 @@ static int __exit sgiseeq_remove(struct platform_device *pdev)
826 826
827static struct platform_driver sgiseeq_driver = { 827static struct platform_driver sgiseeq_driver = {
828 .probe = sgiseeq_probe, 828 .probe = sgiseeq_probe,
829 .remove = __devexit_p(sgiseeq_remove), 829 .remove = __exit_p(sgiseeq_remove),
830 .driver = { 830 .driver = {
831 .name = "sgiseeq", 831 .name = "sgiseeq",
832 .owner = THIS_MODULE, 832 .owner = THIS_MODULE,
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index f49d0800c1d1..528b912a4b0d 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -30,6 +30,7 @@
30#include <linux/phy.h> 30#include <linux/phy.h>
31#include <linux/cache.h> 31#include <linux/cache.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <asm/cacheflush.h>
33 34
34#include "sh_eth.h" 35#include "sh_eth.h"
35 36
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 97949d0a699b..c072f7f36acf 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -52,6 +52,7 @@
52#include <linux/module.h> 52#include <linux/module.h>
53#include <linux/moduleparam.h> 53#include <linux/moduleparam.h>
54#include <linux/kernel.h> 54#include <linux/kernel.h>
55#include <linux/sched.h>
55#include <linux/string.h> 56#include <linux/string.h>
56#include <linux/timer.h> 57#include <linux/timer.h>
57#include <linux/errno.h> 58#include <linux/errno.h>
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index 38a508b4aad9..b27156eaf267 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -73,6 +73,7 @@ static const char * const boot_msg =
73 73
74/* Include files */ 74/* Include files */
75 75
76#include <linux/capability.h>
76#include <linux/module.h> 77#include <linux/module.h>
77#include <linux/kernel.h> 78#include <linux/kernel.h>
78#include <linux/errno.h> 79#include <linux/errno.h>
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 55bad4081966..8f5414348e86 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -37,6 +37,7 @@
37#include <linux/crc32.h> 37#include <linux/crc32.h>
38#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
39#include <linux/debugfs.h> 39#include <linux/debugfs.h>
40#include <linux/sched.h>
40#include <linux/seq_file.h> 41#include <linux/seq_file.h>
41#include <linux/mii.h> 42#include <linux/mii.h>
42#include <asm/irq.h> 43#include <asm/irq.h>
@@ -3935,11 +3936,14 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3935#endif 3936#endif
3936 3937
3937 err = -ENOMEM; 3938 err = -ENOMEM;
3938 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 3939 /* space for skge@pci:0000:04:00.0 */
3940 hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:" )
3941 + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
3939 if (!hw) { 3942 if (!hw) {
3940 dev_err(&pdev->dev, "cannot allocate hardware struct\n"); 3943 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
3941 goto err_out_free_regions; 3944 goto err_out_free_regions;
3942 } 3945 }
3946 sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
3943 3947
3944 hw->pdev = pdev; 3948 hw->pdev = pdev;
3945 spin_lock_init(&hw->hw_lock); 3949 spin_lock_init(&hw->hw_lock);
@@ -3974,7 +3978,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3974 goto err_out_free_netdev; 3978 goto err_out_free_netdev;
3975 } 3979 }
3976 3980
3977 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw); 3981 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, hw->irq_name, hw);
3978 if (err) { 3982 if (err) {
3979 dev_err(&pdev->dev, "%s: cannot assign irq %d\n", 3983 dev_err(&pdev->dev, "%s: cannot assign irq %d\n",
3980 dev->name, pdev->irq); 3984 dev->name, pdev->irq);
@@ -3982,14 +3986,17 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3982 } 3986 }
3983 skge_show_addr(dev); 3987 skge_show_addr(dev);
3984 3988
3985 if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) { 3989 if (hw->ports > 1) {
3986 if (register_netdev(dev1) == 0) 3990 dev1 = skge_devinit(hw, 1, using_dac);
3991 if (dev1 && register_netdev(dev1) == 0)
3987 skge_show_addr(dev1); 3992 skge_show_addr(dev1);
3988 else { 3993 else {
3989 /* Failure to register second port need not be fatal */ 3994 /* Failure to register second port need not be fatal */
3990 dev_warn(&pdev->dev, "register of second port failed\n"); 3995 dev_warn(&pdev->dev, "register of second port failed\n");
3991 hw->dev[1] = NULL; 3996 hw->dev[1] = NULL;
3992 free_netdev(dev1); 3997 hw->ports = 1;
3998 if (dev1)
3999 free_netdev(dev1);
3993 } 4000 }
3994 } 4001 }
3995 pci_set_drvdata(pdev, hw); 4002 pci_set_drvdata(pdev, hw);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 17caccbb7685..831de1b6e96e 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2423,6 +2423,8 @@ struct skge_hw {
2423 u16 phy_addr; 2423 u16 phy_addr;
2424 spinlock_t phy_lock; 2424 spinlock_t phy_lock;
2425 struct tasklet_struct phy_task; 2425 struct tasklet_struct phy_task;
2426
2427 char irq_name[0]; /* skge@pci:000:04:00.0 */
2426}; 2428};
2427 2429
2428enum pause_control { 2430enum pause_control {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 15140f9f2e92..2ab5c39f33ca 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1497,7 +1497,6 @@ static int sky2_up(struct net_device *dev)
1497 if (ramsize > 0) { 1497 if (ramsize > 0) {
1498 u32 rxspace; 1498 u32 rxspace;
1499 1499
1500 hw->flags |= SKY2_HW_RAM_BUFFER;
1501 pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize); 1500 pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
1502 if (ramsize < 16) 1501 if (ramsize < 16)
1503 rxspace = ramsize / 2; 1502 rxspace = ramsize / 2;
@@ -2926,6 +2925,9 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2926 ++hw->ports; 2925 ++hw->ports;
2927 } 2926 }
2928 2927
2928 if (sky2_read8(hw, B2_E_0))
2929 hw->flags |= SKY2_HW_RAM_BUFFER;
2930
2929 return 0; 2931 return 0;
2930} 2932}
2931 2933
@@ -4485,13 +4487,16 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4485 wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0; 4487 wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
4486 4488
4487 err = -ENOMEM; 4489 err = -ENOMEM;
4488 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 4490
4491 hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
4492 + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
4489 if (!hw) { 4493 if (!hw) {
4490 dev_err(&pdev->dev, "cannot allocate hardware struct\n"); 4494 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
4491 goto err_out_free_regions; 4495 goto err_out_free_regions;
4492 } 4496 }
4493 4497
4494 hw->pdev = pdev; 4498 hw->pdev = pdev;
4499 sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
4495 4500
4496 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 4501 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
4497 if (!hw->regs) { 4502 if (!hw->regs) {
@@ -4537,7 +4542,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4537 4542
4538 err = request_irq(pdev->irq, sky2_intr, 4543 err = request_irq(pdev->irq, sky2_intr,
4539 (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, 4544 (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
4540 dev->name, hw); 4545 hw->irq_name, hw);
4541 if (err) { 4546 if (err) {
4542 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); 4547 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
4543 goto err_out_unregister; 4548 goto err_out_unregister;
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index e0f23a101043..ed54129698b4 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2085,6 +2085,8 @@ struct sky2_hw {
2085 struct timer_list watchdog_timer; 2085 struct timer_list watchdog_timer;
2086 struct work_struct restart_work; 2086 struct work_struct restart_work;
2087 wait_queue_head_t msi_wait; 2087 wait_queue_head_t msi_wait;
2088
2089 char irq_name[0];
2088}; 2090};
2089 2091
2090static inline int sky2_is_copper(const struct sky2_hw *hw) 2092static inline int sky2_is_copper(const struct sky2_hw *hw)
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index e17c535a577e..fe3cebb984de 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -67,6 +67,7 @@
67#include <asm/system.h> 67#include <asm/system.h>
68#include <asm/uaccess.h> 68#include <asm/uaccess.h>
69#include <linux/bitops.h> 69#include <linux/bitops.h>
70#include <linux/sched.h>
70#include <linux/string.h> 71#include <linux/string.h>
71#include <linux/mm.h> 72#include <linux/mm.h>
72#include <linux/interrupt.h> 73#include <linux/interrupt.h>
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
new file mode 100644
index 000000000000..35eaa5251d7f
--- /dev/null
+++ b/drivers/net/stmmac/Kconfig
@@ -0,0 +1,53 @@
1config STMMAC_ETH
2 tristate "STMicroelectronics 10/100/1000 Ethernet driver"
3 select MII
4 select PHYLIB
5 depends on NETDEVICES && CPU_SUBTYPE_ST40
6 help
7 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet
8 controllers. ST Ethernet IPs are built around a Synopsys IP Core.
9
10if STMMAC_ETH
11
12config STMMAC_DA
13 bool "STMMAC DMA arbitration scheme"
14 default n
15 help
16 Selecting this option, rx has priority over Tx (only for Giga
17 Ethernet device).
18 By default, the DMA arbitration scheme is based on Round-robin
19 (rx:tx priority is 1:1).
20
21config STMMAC_DUAL_MAC
22 bool "STMMAC: dual mac support (EXPERIMENTAL)"
23 default n
24 depends on EXPERIMENTAL && STMMAC_ETH && !STMMAC_TIMER
25 help
26 Some ST SoCs (for example the stx7141 and stx7200c2) have two
27 Ethernet Controllers. This option turns on the second Ethernet
28 device on this kind of platforms.
29
30config STMMAC_TIMER
31 bool "STMMAC Timer optimisation"
32 default n
33 help
34 Use an external timer for mitigating the number of network
35 interrupts.
36
37choice
38 prompt "Select Timer device"
39 depends on STMMAC_TIMER
40
41config STMMAC_TMU_TIMER
42 bool "TMU channel 2"
43 depends on CPU_SH4
44 help
45
46config STMMAC_RTC_TIMER
47 bool "Real time clock"
48 depends on RTC_CLASS
49 help
50
51endchoice
52
53endif
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
new file mode 100644
index 000000000000..b2d7a5564dfa
--- /dev/null
+++ b/drivers/net/stmmac/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_STMMAC_ETH) += stmmac.o
2stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
3stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
4 mac100.o gmac.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
new file mode 100644
index 000000000000..e49e5188e887
--- /dev/null
+++ b/drivers/net/stmmac/common.h
@@ -0,0 +1,330 @@
1/*******************************************************************************
2 STMMAC Common Header File
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25#include "descs.h"
26#include <linux/io.h>
27
28/* *********************************************
29 DMA CRS Control and Status Register Mapping
30 * *********************************************/
31#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
32#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
33#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
34#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
35#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
36#define DMA_STATUS 0x00001014 /* Status Register */
37#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
38#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
39#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
40#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
41#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
42
43/* ********************************
44 DMA Control register defines
45 * ********************************/
46#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
47#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
48
49/* **************************************
50 DMA Interrupt Enable register defines
51 * **************************************/
52/**** NORMAL INTERRUPT ****/
53#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
54#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
55#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
56#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
57#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
58
59#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
60 DMA_INTR_ENA_TIE)
61
62/**** ABNORMAL INTERRUPT ****/
63#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
64#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
65#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
66#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
67#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
68#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
69#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
70#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
71#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
72#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
73
74#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
75 DMA_INTR_ENA_UNE)
76
77/* DMA default interrupt mask */
78#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
79
80/* ****************************
81 * DMA Status register defines
82 * ****************************/
83#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
84#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
85#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */
86#define DMA_STATUS_GMI 0x08000000
87#define DMA_STATUS_GLI 0x04000000
88#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
89#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
90#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
91#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
92#define DMA_STATUS_TS_SHIFT 20
93#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
94#define DMA_STATUS_RS_SHIFT 17
95#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
96#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
97#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
98#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
99#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
100#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
101#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
102#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
103#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
104#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
105#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
106#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
107#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
108#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
109#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
110
111/* Other defines */
112#define HASH_TABLE_SIZE 64
113#define PAUSE_TIME 0x200
114
115/* Flow Control defines */
116#define FLOW_OFF 0
117#define FLOW_RX 1
118#define FLOW_TX 2
119#define FLOW_AUTO (FLOW_TX | FLOW_RX)
120
121/* DMA STORE-AND-FORWARD Operation Mode */
122#define SF_DMA_MODE 1
123
124#define HW_CSUM 1
125#define NO_HW_CSUM 0
126
127/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
128#define BUF_SIZE_16KiB 16384
129#define BUF_SIZE_8KiB 8192
130#define BUF_SIZE_4KiB 4096
131#define BUF_SIZE_2KiB 2048
132
133/* Power Down and WOL */
134#define PMT_NOT_SUPPORTED 0
135#define PMT_SUPPORTED 1
136
137/* Common MAC defines */
138#define MAC_CTRL_REG 0x00000000 /* MAC Control */
139#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
140#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
141
142/* MAC Management Counters register */
143#define MMC_CONTROL 0x00000100 /* MMC Control */
144#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
145#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
146#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
147#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
148
149#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
150#define MMC_CONTROL_MAX_FRM_SHIFT 3
151#define MMC_CONTROL_MAX_FRAME 0x7FF
152
153struct stmmac_extra_stats {
154 /* Transmit errors */
155 unsigned long tx_underflow ____cacheline_aligned;
156 unsigned long tx_carrier;
157 unsigned long tx_losscarrier;
158 unsigned long tx_heartbeat;
159 unsigned long tx_deferred;
160 unsigned long tx_vlan;
161 unsigned long tx_jabber;
162 unsigned long tx_frame_flushed;
163 unsigned long tx_payload_error;
164 unsigned long tx_ip_header_error;
165 /* Receive errors */
166 unsigned long rx_desc;
167 unsigned long rx_partial;
168 unsigned long rx_runt;
169 unsigned long rx_toolong;
170 unsigned long rx_collision;
171 unsigned long rx_crc;
172 unsigned long rx_lenght;
173 unsigned long rx_mii;
174 unsigned long rx_multicast;
175 unsigned long rx_gmac_overflow;
176 unsigned long rx_watchdog;
177 unsigned long da_rx_filter_fail;
178 unsigned long sa_rx_filter_fail;
179 unsigned long rx_missed_cntr;
180 unsigned long rx_overflow_cntr;
181 unsigned long rx_vlan;
182 /* Tx/Rx IRQ errors */
183 unsigned long tx_undeflow_irq;
184 unsigned long tx_process_stopped_irq;
185 unsigned long tx_jabber_irq;
186 unsigned long rx_overflow_irq;
187 unsigned long rx_buf_unav_irq;
188 unsigned long rx_process_stopped_irq;
189 unsigned long rx_watchdog_irq;
190 unsigned long tx_early_irq;
191 unsigned long fatal_bus_error_irq;
192 /* Extra info */
193 unsigned long threshold;
194 unsigned long tx_pkt_n;
195 unsigned long rx_pkt_n;
196 unsigned long poll_n;
197 unsigned long sched_timer_n;
198 unsigned long normal_irq_n;
199};
200
201/* GMAC core can compute the checksums in HW. */
202enum rx_frame_status {
203 good_frame = 0,
204 discard_frame = 1,
205 csum_none = 2,
206};
207
208static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
209 unsigned int high, unsigned int low)
210{
211 unsigned long data;
212
213 data = (addr[5] << 8) | addr[4];
214 writel(data, ioaddr + high);
215 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
216 writel(data, ioaddr + low);
217
218 return;
219}
220
221static inline void stmmac_get_mac_addr(unsigned long ioaddr,
222 unsigned char *addr, unsigned int high,
223 unsigned int low)
224{
225 unsigned int hi_addr, lo_addr;
226
227 /* Read the MAC address from the hardware */
228 hi_addr = readl(ioaddr + high);
229 lo_addr = readl(ioaddr + low);
230
231 /* Extract the MAC address from the high and low words */
232 addr[0] = lo_addr & 0xff;
233 addr[1] = (lo_addr >> 8) & 0xff;
234 addr[2] = (lo_addr >> 16) & 0xff;
235 addr[3] = (lo_addr >> 24) & 0xff;
236 addr[4] = hi_addr & 0xff;
237 addr[5] = (hi_addr >> 8) & 0xff;
238
239 return;
240}
241
242struct stmmac_ops {
243 /* MAC core initialization */
244 void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
245 /* DMA core initialization */
246 int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
247 /* Dump MAC registers */
248 void (*dump_mac_regs) (unsigned long ioaddr);
249 /* Dump DMA registers */
250 void (*dump_dma_regs) (unsigned long ioaddr);
251 /* Set tx/rx threshold in the csr6 register
252 * An invalid value enables the store-and-forward mode */
253 void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
254 /* To track extra statistic (if supported) */
255 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
256 unsigned long ioaddr);
257 /* RX descriptor ring initialization */
258 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
259 int disable_rx_ic);
260 /* TX descriptor ring initialization */
261 void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
262
263 /* Invoked by the xmit function to prepare the tx descriptor */
264 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
265 int csum_flag);
266 /* Set/get the owner of the descriptor */
267 void (*set_tx_owner) (struct dma_desc *p);
268 int (*get_tx_owner) (struct dma_desc *p);
269 /* Invoked by the xmit function to close the tx descriptor */
270 void (*close_tx_desc) (struct dma_desc *p);
271 /* Clean the tx descriptor as soon as the tx irq is received */
272 void (*release_tx_desc) (struct dma_desc *p);
273 /* Clear interrupt on tx frame completion. When this bit is
274 * set an interrupt happens as soon as the frame is transmitted */
275 void (*clear_tx_ic) (struct dma_desc *p);
276 /* Last tx segment reports the transmit status */
277 int (*get_tx_ls) (struct dma_desc *p);
278 /* Return the transmit status looking at the TDES1 */
279 int (*tx_status) (void *data, struct stmmac_extra_stats *x,
280 struct dma_desc *p, unsigned long ioaddr);
281 /* Get the buffer size from the descriptor */
282 int (*get_tx_len) (struct dma_desc *p);
283 /* Handle extra events on specific interrupts hw dependent */
284 void (*host_irq_status) (unsigned long ioaddr);
285 int (*get_rx_owner) (struct dma_desc *p);
286 void (*set_rx_owner) (struct dma_desc *p);
287 /* Get the receive frame size */
288 int (*get_rx_frame_len) (struct dma_desc *p);
289 /* Return the reception status looking at the RDES1 */
290 int (*rx_status) (void *data, struct stmmac_extra_stats *x,
291 struct dma_desc *p);
292 /* Multicast filter setting */
293 void (*set_filter) (struct net_device *dev);
294 /* Flow control setting */
295 void (*flow_ctrl) (unsigned long ioaddr, unsigned int duplex,
296 unsigned int fc, unsigned int pause_time);
297 /* Set power management mode (e.g. magic frame) */
298 void (*pmt) (unsigned long ioaddr, unsigned long mode);
299 /* Set/Get Unicast MAC addresses */
300 void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
301 unsigned int reg_n);
302 void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
303 unsigned int reg_n);
304};
305
306struct mac_link {
307 int port;
308 int duplex;
309 int speed;
310};
311
312struct mii_regs {
313 unsigned int addr; /* MII Address */
314 unsigned int data; /* MII Data */
315};
316
317struct hw_cap {
318 unsigned int version; /* Core Version register (GMAC) */
319 unsigned int pmt; /* Power-Down mode (GMAC) */
320 struct mac_link link;
321 struct mii_regs mii;
322};
323
324struct mac_device_info {
325 struct hw_cap hw;
326 struct stmmac_ops *ops;
327};
328
329struct mac_device_info *gmac_setup(unsigned long addr);
330struct mac_device_info *mac100_setup(unsigned long addr);
diff --git a/drivers/net/stmmac/descs.h b/drivers/net/stmmac/descs.h
new file mode 100644
index 000000000000..6d2a0b2f5e57
--- /dev/null
+++ b/drivers/net/stmmac/descs.h
@@ -0,0 +1,163 @@
1/*******************************************************************************
2 Header File to describe the DMA descriptors
3 Use enhanced descriptors in case of GMAC Cores.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms and conditions of the GNU General Public License,
7 version 2, as published by the Free Software Foundation.
8
9 This program is distributed in the hope it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
13
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc.,
16 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
20
21 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
22*******************************************************************************/
23struct dma_desc {
24 /* Receive descriptor */
25 union {
26 struct {
27 /* RDES0 */
28 u32 reserved1:1;
29 u32 crc_error:1;
30 u32 dribbling:1;
31 u32 mii_error:1;
32 u32 receive_watchdog:1;
33 u32 frame_type:1;
34 u32 collision:1;
35 u32 frame_too_long:1;
36 u32 last_descriptor:1;
37 u32 first_descriptor:1;
38 u32 multicast_frame:1;
39 u32 run_frame:1;
40 u32 length_error:1;
41 u32 partial_frame_error:1;
42 u32 descriptor_error:1;
43 u32 error_summary:1;
44 u32 frame_length:14;
45 u32 filtering_fail:1;
46 u32 own:1;
47 /* RDES1 */
48 u32 buffer1_size:11;
49 u32 buffer2_size:11;
50 u32 reserved2:2;
51 u32 second_address_chained:1;
52 u32 end_ring:1;
53 u32 reserved3:5;
54 u32 disable_ic:1;
55 } rx;
56 struct {
57 /* RDES0 */
58 u32 payload_csum_error:1;
59 u32 crc_error:1;
60 u32 dribbling:1;
61 u32 error_gmii:1;
62 u32 receive_watchdog:1;
63 u32 frame_type:1;
64 u32 late_collision:1;
65 u32 ipc_csum_error:1;
66 u32 last_descriptor:1;
67 u32 first_descriptor:1;
68 u32 vlan_tag:1;
69 u32 overflow_error:1;
70 u32 length_error:1;
71 u32 sa_filter_fail:1;
72 u32 descriptor_error:1;
73 u32 error_summary:1;
74 u32 frame_length:14;
75 u32 da_filter_fail:1;
76 u32 own:1;
77 /* RDES1 */
78 u32 buffer1_size:13;
79 u32 reserved1:1;
80 u32 second_address_chained:1;
81 u32 end_ring:1;
82 u32 buffer2_size:13;
83 u32 reserved2:2;
84 u32 disable_ic:1;
85 } erx; /* -- enhanced -- */
86
87 /* Transmit descriptor */
88 struct {
89 /* TDES0 */
90 u32 deferred:1;
91 u32 underflow_error:1;
92 u32 excessive_deferral:1;
93 u32 collision_count:4;
94 u32 heartbeat_fail:1;
95 u32 excessive_collisions:1;
96 u32 late_collision:1;
97 u32 no_carrier:1;
98 u32 loss_carrier:1;
99 u32 reserved1:3;
100 u32 error_summary:1;
101 u32 reserved2:15;
102 u32 own:1;
103 /* TDES1 */
104 u32 buffer1_size:11;
105 u32 buffer2_size:11;
106 u32 reserved3:1;
107 u32 disable_padding:1;
108 u32 second_address_chained:1;
109 u32 end_ring:1;
110 u32 crc_disable:1;
111 u32 reserved4:2;
112 u32 first_segment:1;
113 u32 last_segment:1;
114 u32 interrupt:1;
115 } tx;
116 struct {
117 /* TDES0 */
118 u32 deferred:1;
119 u32 underflow_error:1;
120 u32 excessive_deferral:1;
121 u32 collision_count:4;
122 u32 vlan_frame:1;
123 u32 excessive_collisions:1;
124 u32 late_collision:1;
125 u32 no_carrier:1;
126 u32 loss_carrier:1;
127 u32 payload_error:1;
128 u32 frame_flushed:1;
129 u32 jabber_timeout:1;
130 u32 error_summary:1;
131 u32 ip_header_error:1;
132 u32 time_stamp_status:1;
133 u32 reserved1:2;
134 u32 second_address_chained:1;
135 u32 end_ring:1;
136 u32 checksum_insertion:2;
137 u32 reserved2:1;
138 u32 time_stamp_enable:1;
139 u32 disable_padding:1;
140 u32 crc_disable:1;
141 u32 first_segment:1;
142 u32 last_segment:1;
143 u32 interrupt:1;
144 u32 own:1;
145 /* TDES1 */
146 u32 buffer1_size:13;
147 u32 reserved3:3;
148 u32 buffer2_size:13;
149 u32 reserved4:3;
150 } etx; /* -- enhanced -- */
151 } des01;
152 unsigned int des2;
153 unsigned int des3;
154};
155
156/* Transmit checksum insertion control */
157enum tdes_csum_insertion {
158 cic_disabled = 0, /* Checksum Insertion Control */
159 cic_only_ip = 1, /* Only IP header */
160 cic_no_pseudoheader = 2, /* IP header but pseudoheader
161 * is not calculated */
162 cic_full = 3, /* IP header and pseudoheader */
163};
diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/gmac.c
new file mode 100644
index 000000000000..b624bb5bae0a
--- /dev/null
+++ b/drivers/net/stmmac/gmac.c
@@ -0,0 +1,693 @@
1/*******************************************************************************
2 This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
4 developing this code.
5
6 Copyright (C) 2007-2009 STMicroelectronics Ltd
7
8 This program is free software; you can redistribute it and/or modify it
9 under the terms and conditions of the GNU General Public License,
10 version 2, as published by the Free Software Foundation.
11
12 This program is distributed in the hope it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
16
17 You should have received a copy of the GNU General Public License along with
18 this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20
21 The full GNU General Public License is included in this distribution in
22 the file called "COPYING".
23
24 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25*******************************************************************************/
26
27#include <linux/netdevice.h>
28#include <linux/crc32.h>
29#include <linux/mii.h>
30#include <linux/phy.h>
31
32#include "stmmac.h"
33#include "gmac.h"
34
35#undef GMAC_DEBUG
36/*#define GMAC_DEBUG*/
37#undef FRAME_FILTER_DEBUG
38/*#define FRAME_FILTER_DEBUG*/
39#ifdef GMAC_DEBUG
40#define DBG(fmt, args...) printk(fmt, ## args)
41#else
42#define DBG(fmt, args...) do { } while (0)
43#endif
44
45static void gmac_dump_regs(unsigned long ioaddr)
46{
47 int i;
48 pr_info("\t----------------------------------------------\n"
49 "\t GMAC registers (base addr = 0x%8x)\n"
50 "\t----------------------------------------------\n",
51 (unsigned int)ioaddr);
52
53 for (i = 0; i < 55; i++) {
54 int offset = i * 4;
55 pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
56 offset, readl(ioaddr + offset));
57 }
58 return;
59}
60
61static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
62{
63 u32 value = readl(ioaddr + DMA_BUS_MODE);
64 /* DMA SW reset */
65 value |= DMA_BUS_MODE_SFT_RESET;
66 writel(value, ioaddr + DMA_BUS_MODE);
67 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
68
69 value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
70 ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
71 (pbl << DMA_BUS_MODE_RPBL_SHIFT));
72
73#ifdef CONFIG_STMMAC_DA
74 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
75#endif
76 writel(value, ioaddr + DMA_BUS_MODE);
77
78 /* Mask interrupts by writing to CSR7 */
79 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
80
81 /* The base address of the RX/TX descriptor lists must be written into
82 * DMA CSR3 and CSR4, respectively. */
83 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
84 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
85
86 return 0;
87}
88
89/* Transmit FIFO flush operation */
90static void gmac_flush_tx_fifo(unsigned long ioaddr)
91{
92 u32 csr6 = readl(ioaddr + DMA_CONTROL);
93 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
94
95 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
96}
97
98static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
99 int rxmode)
100{
101 u32 csr6 = readl(ioaddr + DMA_CONTROL);
102
103 if (txmode == SF_DMA_MODE) {
104 DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n");
105 /* Transmit COE type 2 cannot be done in cut-through mode. */
106 csr6 |= DMA_CONTROL_TSF;
107 /* Operating on second frame increase the performance
108 * especially when transmit store-and-forward is used.*/
109 csr6 |= DMA_CONTROL_OSF;
110 } else {
111 DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
112 " (threshold = %d)\n", txmode);
113 csr6 &= ~DMA_CONTROL_TSF;
114 csr6 &= DMA_CONTROL_TC_TX_MASK;
115 /* Set the transmit threashold */
116 if (txmode <= 32)
117 csr6 |= DMA_CONTROL_TTC_32;
118 else if (txmode <= 64)
119 csr6 |= DMA_CONTROL_TTC_64;
120 else if (txmode <= 128)
121 csr6 |= DMA_CONTROL_TTC_128;
122 else if (txmode <= 192)
123 csr6 |= DMA_CONTROL_TTC_192;
124 else
125 csr6 |= DMA_CONTROL_TTC_256;
126 }
127
128 if (rxmode == SF_DMA_MODE) {
129 DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n");
130 csr6 |= DMA_CONTROL_RSF;
131 } else {
132 DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
133 " (threshold = %d)\n", rxmode);
134 csr6 &= ~DMA_CONTROL_RSF;
135 csr6 &= DMA_CONTROL_TC_RX_MASK;
136 if (rxmode <= 32)
137 csr6 |= DMA_CONTROL_RTC_32;
138 else if (rxmode <= 64)
139 csr6 |= DMA_CONTROL_RTC_64;
140 else if (rxmode <= 96)
141 csr6 |= DMA_CONTROL_RTC_96;
142 else
143 csr6 |= DMA_CONTROL_RTC_128;
144 }
145
146 writel(csr6, ioaddr + DMA_CONTROL);
147 return;
148}
149
150/* Not yet implemented --- no RMON module */
151static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
152 unsigned long ioaddr)
153{
154 return;
155}
156
157static void gmac_dump_dma_regs(unsigned long ioaddr)
158{
159 int i;
160 pr_info(" DMA registers\n");
161 for (i = 0; i < 22; i++) {
162 if ((i < 9) || (i > 17)) {
163 int offset = i * 4;
164 pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
165 (DMA_BUS_MODE + offset),
166 readl(ioaddr + DMA_BUS_MODE + offset));
167 }
168 }
169 return;
170}
171
172static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
173 struct dma_desc *p, unsigned long ioaddr)
174{
175 int ret = 0;
176 struct net_device_stats *stats = (struct net_device_stats *)data;
177
178 if (unlikely(p->des01.etx.error_summary)) {
179 DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
180 if (unlikely(p->des01.etx.jabber_timeout)) {
181 DBG(KERN_ERR "\tjabber_timeout error\n");
182 x->tx_jabber++;
183 }
184
185 if (unlikely(p->des01.etx.frame_flushed)) {
186 DBG(KERN_ERR "\tframe_flushed error\n");
187 x->tx_frame_flushed++;
188 gmac_flush_tx_fifo(ioaddr);
189 }
190
191 if (unlikely(p->des01.etx.loss_carrier)) {
192 DBG(KERN_ERR "\tloss_carrier error\n");
193 x->tx_losscarrier++;
194 stats->tx_carrier_errors++;
195 }
196 if (unlikely(p->des01.etx.no_carrier)) {
197 DBG(KERN_ERR "\tno_carrier error\n");
198 x->tx_carrier++;
199 stats->tx_carrier_errors++;
200 }
201 if (unlikely(p->des01.etx.late_collision)) {
202 DBG(KERN_ERR "\tlate_collision error\n");
203 stats->collisions += p->des01.etx.collision_count;
204 }
205 if (unlikely(p->des01.etx.excessive_collisions)) {
206 DBG(KERN_ERR "\texcessive_collisions\n");
207 stats->collisions += p->des01.etx.collision_count;
208 }
209 if (unlikely(p->des01.etx.excessive_deferral)) {
210 DBG(KERN_INFO "\texcessive tx_deferral\n");
211 x->tx_deferred++;
212 }
213
214 if (unlikely(p->des01.etx.underflow_error)) {
215 DBG(KERN_ERR "\tunderflow error\n");
216 gmac_flush_tx_fifo(ioaddr);
217 x->tx_underflow++;
218 }
219
220 if (unlikely(p->des01.etx.ip_header_error)) {
221 DBG(KERN_ERR "\tTX IP header csum error\n");
222 x->tx_ip_header_error++;
223 }
224
225 if (unlikely(p->des01.etx.payload_error)) {
226 DBG(KERN_ERR "\tAddr/Payload csum error\n");
227 x->tx_payload_error++;
228 gmac_flush_tx_fifo(ioaddr);
229 }
230
231 ret = -1;
232 }
233
234 if (unlikely(p->des01.etx.deferred)) {
235 DBG(KERN_INFO "GMAC TX status: tx deferred\n");
236 x->tx_deferred++;
237 }
238#ifdef STMMAC_VLAN_TAG_USED
239 if (p->des01.etx.vlan_frame) {
240 DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
241 x->tx_vlan++;
242 }
243#endif
244
245 return ret;
246}
247
248static int gmac_get_tx_len(struct dma_desc *p)
249{
250 return p->des01.etx.buffer1_size;
251}
252
253static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
254{
255 int ret = good_frame;
256 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
257
258 /* bits 5 7 0 | Frame status
259 * ----------------------------------------------------------
260 * 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects)
261 * 1 0 0 | IPv4/6 No CSUM errorS.
262 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
263 * 1 1 0 | IPv4/6 CSUM IP HR error
264 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
265 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
266 * 0 1 1 | COE bypassed.. no IPv4/6 frame
267 * 0 1 0 | Reserved.
268 */
269 if (status == 0x0) {
270 DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
271 ret = good_frame;
272 } else if (status == 0x4) {
273 DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
274 ret = good_frame;
275 } else if (status == 0x5) {
276 DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
277 ret = csum_none;
278 } else if (status == 0x6) {
279 DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
280 ret = csum_none;
281 } else if (status == 0x7) {
282 DBG(KERN_ERR
283 "RX Des0 status: IPv4/6 Header and Payload Error.\n");
284 ret = csum_none;
285 } else if (status == 0x1) {
286 DBG(KERN_ERR
287 "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
288 ret = discard_frame;
289 } else if (status == 0x3) {
290 DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
291 ret = discard_frame;
292 }
293 return ret;
294}
295
296static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
297 struct dma_desc *p)
298{
299 int ret = good_frame;
300 struct net_device_stats *stats = (struct net_device_stats *)data;
301
302 if (unlikely(p->des01.erx.error_summary)) {
303 DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
304 if (unlikely(p->des01.erx.descriptor_error)) {
305 DBG(KERN_ERR "\tdescriptor error\n");
306 x->rx_desc++;
307 stats->rx_length_errors++;
308 }
309 if (unlikely(p->des01.erx.overflow_error)) {
310 DBG(KERN_ERR "\toverflow error\n");
311 x->rx_gmac_overflow++;
312 }
313
314 if (unlikely(p->des01.erx.ipc_csum_error))
315 DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
316
317 if (unlikely(p->des01.erx.late_collision)) {
318 DBG(KERN_ERR "\tlate_collision error\n");
319 stats->collisions++;
320 stats->collisions++;
321 }
322 if (unlikely(p->des01.erx.receive_watchdog)) {
323 DBG(KERN_ERR "\treceive_watchdog error\n");
324 x->rx_watchdog++;
325 }
326 if (unlikely(p->des01.erx.error_gmii)) {
327 DBG(KERN_ERR "\tReceive Error\n");
328 x->rx_mii++;
329 }
330 if (unlikely(p->des01.erx.crc_error)) {
331 DBG(KERN_ERR "\tCRC error\n");
332 x->rx_crc++;
333 stats->rx_crc_errors++;
334 }
335 ret = discard_frame;
336 }
337
338 /* After a payload csum error, the ES bit is set.
339 * It doesn't match with the information reported into the databook.
340 * At any rate, we need to understand if the CSUM hw computation is ok
341 * and report this info to the upper layers. */
342 ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
343 p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
344
345 if (unlikely(p->des01.erx.dribbling)) {
346 DBG(KERN_ERR "GMAC RX: dribbling error\n");
347 ret = discard_frame;
348 }
349 if (unlikely(p->des01.erx.sa_filter_fail)) {
350 DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
351 x->sa_rx_filter_fail++;
352 ret = discard_frame;
353 }
354 if (unlikely(p->des01.erx.da_filter_fail)) {
355 DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
356 x->da_rx_filter_fail++;
357 ret = discard_frame;
358 }
359 if (unlikely(p->des01.erx.length_error)) {
360 DBG(KERN_ERR "GMAC RX: length_error error\n");
361 x->rx_lenght++;
362 ret = discard_frame;
363 }
364#ifdef STMMAC_VLAN_TAG_USED
365 if (p->des01.erx.vlan_tag) {
366 DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
367 x->rx_vlan++;
368 }
369#endif
370 return ret;
371}
372
373static void gmac_irq_status(unsigned long ioaddr)
374{
375 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
376
377 /* Not used events (e.g. MMC interrupts) are not handled. */
378 if ((intr_status & mmc_tx_irq))
379 DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
380 readl(ioaddr + GMAC_MMC_TX_INTR));
381 if (unlikely(intr_status & mmc_rx_irq))
382 DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
383 readl(ioaddr + GMAC_MMC_RX_INTR));
384 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
385 DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
386 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
387 if (unlikely(intr_status & pmt_irq)) {
388 DBG(KERN_DEBUG "GMAC: received Magic frame\n");
389 /* clear the PMT bits 5 and 6 by reading the PMT
390 * status register. */
391 readl(ioaddr + GMAC_PMT);
392 }
393
394 return;
395}
396
397static void gmac_core_init(unsigned long ioaddr)
398{
399 u32 value = readl(ioaddr + GMAC_CONTROL);
400 value |= GMAC_CORE_INIT;
401 writel(value, ioaddr + GMAC_CONTROL);
402
403 /* STBus Bridge Configuration */
404 /*writel(0xc5608, ioaddr + 0x00007000);*/
405
406 /* Freeze MMC counters */
407 writel(0x8, ioaddr + GMAC_MMC_CTRL);
408 /* Mask GMAC interrupts */
409 writel(0x207, ioaddr + GMAC_INT_MASK);
410
411#ifdef STMMAC_VLAN_TAG_USED
412 /* Tag detection without filtering */
413 writel(0x0, ioaddr + GMAC_VLAN_TAG);
414#endif
415 return;
416}
417
418static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
419 unsigned int reg_n)
420{
421 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
422 GMAC_ADDR_LOW(reg_n));
423}
424
425static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
426 unsigned int reg_n)
427{
428 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
429 GMAC_ADDR_LOW(reg_n));
430}
431
432static void gmac_set_filter(struct net_device *dev)
433{
434 unsigned long ioaddr = dev->base_addr;
435 unsigned int value = 0;
436
437 DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
438 __func__, dev->mc_count, dev->uc_count);
439
440 if (dev->flags & IFF_PROMISC)
441 value = GMAC_FRAME_FILTER_PR;
442 else if ((dev->mc_count > HASH_TABLE_SIZE)
443 || (dev->flags & IFF_ALLMULTI)) {
444 value = GMAC_FRAME_FILTER_PM; /* pass all multi */
445 writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
446 writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
447 } else if (dev->mc_count > 0) {
448 int i;
449 u32 mc_filter[2];
450 struct dev_mc_list *mclist;
451
452 /* Hash filter for multicast */
453 value = GMAC_FRAME_FILTER_HMC;
454
455 memset(mc_filter, 0, sizeof(mc_filter));
456 for (i = 0, mclist = dev->mc_list;
457 mclist && i < dev->mc_count; i++, mclist = mclist->next) {
458 /* The upper 6 bits of the calculated CRC are used to
459 index the contens of the hash table */
460 int bit_nr =
461 bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
462 /* The most significant bit determines the register to
463 * use (H/L) while the other 5 bits determine the bit
464 * within the register. */
465 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
466 }
467 writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
468 writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
469 }
470
471 /* Handle multiple unicast addresses (perfect filtering)*/
472 if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
473 /* Switch to promiscuous mode is more than 16 addrs
474 are required */
475 value |= GMAC_FRAME_FILTER_PR;
476 else {
477 int i;
478 struct dev_addr_list *uc_ptr = dev->uc_list;
479
480 for (i = 0; i < dev->uc_count; i++) {
481 gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
482 i + 1);
483
484 DBG(KERN_INFO "\t%d "
485 "- Unicast addr %02x:%02x:%02x:%02x:%02x:"
486 "%02x\n", i + 1,
487 uc_ptr->da_addr[0], uc_ptr->da_addr[1],
488 uc_ptr->da_addr[2], uc_ptr->da_addr[3],
489 uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
490 uc_ptr = uc_ptr->next;
491 }
492 }
493
494#ifdef FRAME_FILTER_DEBUG
495 /* Enable Receive all mode (to debug filtering_fail errors) */
496 value |= GMAC_FRAME_FILTER_RA;
497#endif
498 writel(value, ioaddr + GMAC_FRAME_FILTER);
499
500 DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
501 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
502 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
503
504 return;
505}
506
507static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
508 unsigned int fc, unsigned int pause_time)
509{
510 unsigned int flow = 0;
511
512 DBG(KERN_DEBUG "GMAC Flow-Control:\n");
513 if (fc & FLOW_RX) {
514 DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
515 flow |= GMAC_FLOW_CTRL_RFE;
516 }
517 if (fc & FLOW_TX) {
518 DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
519 flow |= GMAC_FLOW_CTRL_TFE;
520 }
521
522 if (duplex) {
523 DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
524 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
525 }
526
527 writel(flow, ioaddr + GMAC_FLOW_CTRL);
528 return;
529}
530
531static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
532{
533 unsigned int pmt = 0;
534
535 if (mode == WAKE_MAGIC) {
536 DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
537 pmt |= power_down | magic_pkt_en;
538 } else if (mode == WAKE_UCAST) {
539 DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
540 pmt |= global_unicast;
541 }
542
543 writel(pmt, ioaddr + GMAC_PMT);
544 return;
545}
546
547static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
548 int disable_rx_ic)
549{
550 int i;
551 for (i = 0; i < ring_size; i++) {
552 p->des01.erx.own = 1;
553 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
554 /* To support jumbo frames */
555 p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
556 if (i == ring_size - 1)
557 p->des01.erx.end_ring = 1;
558 if (disable_rx_ic)
559 p->des01.erx.disable_ic = 1;
560 p++;
561 }
562 return;
563}
564
565static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
566{
567 int i;
568
569 for (i = 0; i < ring_size; i++) {
570 p->des01.etx.own = 0;
571 if (i == ring_size - 1)
572 p->des01.etx.end_ring = 1;
573 p++;
574 }
575
576 return;
577}
578
579static int gmac_get_tx_owner(struct dma_desc *p)
580{
581 return p->des01.etx.own;
582}
583
584static int gmac_get_rx_owner(struct dma_desc *p)
585{
586 return p->des01.erx.own;
587}
588
589static void gmac_set_tx_owner(struct dma_desc *p)
590{
591 p->des01.etx.own = 1;
592}
593
594static void gmac_set_rx_owner(struct dma_desc *p)
595{
596 p->des01.erx.own = 1;
597}
598
599static int gmac_get_tx_ls(struct dma_desc *p)
600{
601 return p->des01.etx.last_segment;
602}
603
604static void gmac_release_tx_desc(struct dma_desc *p)
605{
606 int ter = p->des01.etx.end_ring;
607
608 memset(p, 0, sizeof(struct dma_desc));
609 p->des01.etx.end_ring = ter;
610
611 return;
612}
613
614static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
615 int csum_flag)
616{
617 p->des01.etx.first_segment = is_fs;
618 if (unlikely(len > BUF_SIZE_4KiB)) {
619 p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
620 p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
621 } else {
622 p->des01.etx.buffer1_size = len;
623 }
624 if (likely(csum_flag))
625 p->des01.etx.checksum_insertion = cic_full;
626}
627
628static void gmac_clear_tx_ic(struct dma_desc *p)
629{
630 p->des01.etx.interrupt = 0;
631}
632
633static void gmac_close_tx_desc(struct dma_desc *p)
634{
635 p->des01.etx.last_segment = 1;
636 p->des01.etx.interrupt = 1;
637}
638
639static int gmac_get_rx_frame_len(struct dma_desc *p)
640{
641 return p->des01.erx.frame_length;
642}
643
644struct stmmac_ops gmac_driver = {
645 .core_init = gmac_core_init,
646 .dump_mac_regs = gmac_dump_regs,
647 .dma_init = gmac_dma_init,
648 .dump_dma_regs = gmac_dump_dma_regs,
649 .dma_mode = gmac_dma_operation_mode,
650 .dma_diagnostic_fr = gmac_dma_diagnostic_fr,
651 .tx_status = gmac_get_tx_frame_status,
652 .rx_status = gmac_get_rx_frame_status,
653 .get_tx_len = gmac_get_tx_len,
654 .set_filter = gmac_set_filter,
655 .flow_ctrl = gmac_flow_ctrl,
656 .pmt = gmac_pmt,
657 .init_rx_desc = gmac_init_rx_desc,
658 .init_tx_desc = gmac_init_tx_desc,
659 .get_tx_owner = gmac_get_tx_owner,
660 .get_rx_owner = gmac_get_rx_owner,
661 .release_tx_desc = gmac_release_tx_desc,
662 .prepare_tx_desc = gmac_prepare_tx_desc,
663 .clear_tx_ic = gmac_clear_tx_ic,
664 .close_tx_desc = gmac_close_tx_desc,
665 .get_tx_ls = gmac_get_tx_ls,
666 .set_tx_owner = gmac_set_tx_owner,
667 .set_rx_owner = gmac_set_rx_owner,
668 .get_rx_frame_len = gmac_get_rx_frame_len,
669 .host_irq_status = gmac_irq_status,
670 .set_umac_addr = gmac_set_umac_addr,
671 .get_umac_addr = gmac_get_umac_addr,
672};
673
674struct mac_device_info *gmac_setup(unsigned long ioaddr)
675{
676 struct mac_device_info *mac;
677 u32 uid = readl(ioaddr + GMAC_VERSION);
678
679 pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
680 ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
681
682 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
683
684 mac->ops = &gmac_driver;
685 mac->hw.pmt = PMT_SUPPORTED;
686 mac->hw.link.port = GMAC_CONTROL_PS;
687 mac->hw.link.duplex = GMAC_CONTROL_DM;
688 mac->hw.link.speed = GMAC_CONTROL_FES;
689 mac->hw.mii.addr = GMAC_MII_ADDR;
690 mac->hw.mii.data = GMAC_MII_DATA;
691
692 return mac;
693}
diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/gmac.h
new file mode 100644
index 000000000000..684a363120a9
--- /dev/null
+++ b/drivers/net/stmmac/gmac.h
@@ -0,0 +1,204 @@
1/*******************************************************************************
2 Copyright (C) 2007-2009 STMicroelectronics Ltd
3
4 This program is free software; you can redistribute it and/or modify it
5 under the terms and conditions of the GNU General Public License,
6 version 2, as published by the Free Software Foundation.
7
8 This program is distributed in the hope it will be useful, but WITHOUT
9 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 more details.
12
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc.,
15 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16
17 The full GNU General Public License is included in this distribution in
18 the file called "COPYING".
19
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/
22
23#define GMAC_CONTROL 0x00000000 /* Configuration */
24#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
25#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
26#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
27#define GMAC_MII_ADDR 0x00000010 /* MII Address */
28#define GMAC_MII_DATA 0x00000014 /* MII Data */
29#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */
30#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */
31#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
32#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
33
34#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
35enum gmac_irq_status {
36 time_stamp_irq = 0x0200,
37 mmc_rx_csum_offload_irq = 0x0080,
38 mmc_tx_irq = 0x0040,
39 mmc_rx_irq = 0x0020,
40 mmc_irq = 0x0010,
41 pmt_irq = 0x0008,
42 pcs_ane_irq = 0x0004,
43 pcs_link_irq = 0x0002,
44 rgmii_irq = 0x0001,
45};
46#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */
47
48/* PMT Control and Status */
49#define GMAC_PMT 0x0000002c
50enum power_event {
51 pointer_reset = 0x80000000,
52 global_unicast = 0x00000200,
53 wake_up_rx_frame = 0x00000040,
54 magic_frame = 0x00000020,
55 wake_up_frame_en = 0x00000004,
56 magic_pkt_en = 0x00000002,
57 power_down = 0x00000001,
58};
59
60/* GMAC HW ADDR regs */
61#define GMAC_ADDR_HIGH(reg) (0x00000040+(reg * 8))
62#define GMAC_ADDR_LOW(reg) (0x00000044+(reg * 8))
63#define GMAC_MAX_UNICAST_ADDRESSES 16
64
65#define GMAC_AN_CTRL 0x000000c0 /* AN control */
66#define GMAC_AN_STATUS 0x000000c4 /* AN status */
67#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */
68#define GMAC_ANE_LINK 0x000000cc /* Auto-Neg. link partener ability */
69#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */
70#define GMAC_TBI 0x000000d4 /* TBI extend status */
71#define GMAC_GMII_STATUS 0x000000d8 /* S/R-GMII status */
72
73/* GMAC Configuration defines */
74#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
75#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
76#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */
77#define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */
78#define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
79enum inter_frame_gap {
80 GMAC_CONTROL_IFG_88 = 0x00040000,
81 GMAC_CONTROL_IFG_80 = 0x00020000,
82 GMAC_CONTROL_IFG_40 = 0x000e0000,
83};
84#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense during tx */
85#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */
86#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */
87#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */
88#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
89#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */
90#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
91#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
92#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
93#define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad Stripping */
94#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
95#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
96#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
97
98#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
99 GMAC_CONTROL_IPC | GMAC_CONTROL_JE | GMAC_CONTROL_BE)
100
101/* GMAC Frame Filter defines */
102#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
103#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
104#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
105#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
106#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
107#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
108#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
109#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
110#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
111#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
112/* GMII ADDR defines */
113#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
114#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
115/* GMAC FLOW CTRL defines */
116#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
117#define GMAC_FLOW_CTRL_PT_SHIFT 16
118#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
119#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
120#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
121
122/*--- DMA BLOCK defines ---*/
123/* DMA Bus Mode register defines */
124#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
125#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */
126#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
127#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
128/* Programmable burst length (passed thorugh platform)*/
129#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
130#define DMA_BUS_MODE_PBL_SHIFT 8
131
132enum rx_tx_priority_ratio {
133 double_ratio = 0x00004000, /*2:1 */
134 triple_ratio = 0x00008000, /*3:1 */
135 quadruple_ratio = 0x0000c000, /*4:1 */
136};
137
138#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
139#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
140#define DMA_BUS_MODE_RPBL_SHIFT 17
141#define DMA_BUS_MODE_USP 0x00800000
142#define DMA_BUS_MODE_4PBL 0x01000000
143#define DMA_BUS_MODE_AAL 0x02000000
144
145/* DMA CRS Control and Status Register Mapping */
146#define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */
147#define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */
148/* DMA Bus Mode register defines */
149#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
150#define DMA_BUS_PR_RATIO_SHIFT 14
151#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
152
153/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/
154#define DMA_CONTROL_DT 0x04000000 /* Disable Drop TCP/IP csum error */
155#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */
156#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */
157/* Theshold for Activating the FC */
158enum rfa {
159 act_full_minus_1 = 0x00800000,
160 act_full_minus_2 = 0x00800200,
161 act_full_minus_3 = 0x00800400,
162 act_full_minus_4 = 0x00800600,
163};
164/* Theshold for Deactivating the FC */
165enum rfd {
166 deac_full_minus_1 = 0x00400000,
167 deac_full_minus_2 = 0x00400800,
168 deac_full_minus_3 = 0x00401000,
169 deac_full_minus_4 = 0x00401800,
170};
171#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
172#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
173
174enum ttc_control {
175 DMA_CONTROL_TTC_64 = 0x00000000,
176 DMA_CONTROL_TTC_128 = 0x00004000,
177 DMA_CONTROL_TTC_192 = 0x00008000,
178 DMA_CONTROL_TTC_256 = 0x0000c000,
179 DMA_CONTROL_TTC_40 = 0x00010000,
180 DMA_CONTROL_TTC_32 = 0x00014000,
181 DMA_CONTROL_TTC_24 = 0x00018000,
182 DMA_CONTROL_TTC_16 = 0x0001c000,
183};
184#define DMA_CONTROL_TC_TX_MASK 0xfffe3fff
185
186#define DMA_CONTROL_EFC 0x00000100
187#define DMA_CONTROL_FEF 0x00000080
188#define DMA_CONTROL_FUF 0x00000040
189
190enum rtc_control {
191 DMA_CONTROL_RTC_64 = 0x00000000,
192 DMA_CONTROL_RTC_32 = 0x00000008,
193 DMA_CONTROL_RTC_96 = 0x00000010,
194 DMA_CONTROL_RTC_128 = 0x00000018,
195};
196#define DMA_CONTROL_TC_RX_MASK 0xffffffe7
197
198#define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */
199
200/* MMC registers offset */
201#define GMAC_MMC_CTRL 0x100
202#define GMAC_MMC_RX_INTR 0x104
203#define GMAC_MMC_TX_INTR 0x108
204#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
diff --git a/drivers/net/stmmac/mac100.c b/drivers/net/stmmac/mac100.c
new file mode 100644
index 000000000000..625171b6062b
--- /dev/null
+++ b/drivers/net/stmmac/mac100.c
@@ -0,0 +1,517 @@
1/*******************************************************************************
2 This is the driver for the MAC 10/100 on-chip Ethernet controller
3 currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
4
5 DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
6 this code.
7
8 Copyright (C) 2007-2009 STMicroelectronics Ltd
9
10 This program is free software; you can redistribute it and/or modify it
11 under the terms and conditions of the GNU General Public License,
12 version 2, as published by the Free Software Foundation.
13
14 This program is distributed in the hope it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 more details.
18
19 You should have received a copy of the GNU General Public License along with
20 this program; if not, write to the Free Software Foundation, Inc.,
21 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22
23 The full GNU General Public License is included in this distribution in
24 the file called "COPYING".
25
26 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
27*******************************************************************************/
28
29#include <linux/netdevice.h>
30#include <linux/crc32.h>
31#include <linux/mii.h>
32#include <linux/phy.h>
33
34#include "common.h"
35#include "mac100.h"
36
37#undef MAC100_DEBUG
38/*#define MAC100_DEBUG*/
39#ifdef MAC100_DEBUG
40#define DBG(fmt, args...) printk(fmt, ## args)
41#else
42#define DBG(fmt, args...) do { } while (0)
43#endif
44
45static void mac100_core_init(unsigned long ioaddr)
46{
47 u32 value = readl(ioaddr + MAC_CONTROL);
48
49 writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
50
51#ifdef STMMAC_VLAN_TAG_USED
52 writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
53#endif
54 return;
55}
56
57static void mac100_dump_mac_regs(unsigned long ioaddr)
58{
59 pr_info("\t----------------------------------------------\n"
60 "\t MAC100 CSR (base addr = 0x%8x)\n"
61 "\t----------------------------------------------\n",
62 (unsigned int)ioaddr);
63 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
64 readl(ioaddr + MAC_CONTROL));
65 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
66 readl(ioaddr + MAC_ADDR_HIGH));
67 pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
68 readl(ioaddr + MAC_ADDR_LOW));
69 pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
70 MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
71 pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
72 MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
73 pr_info("\tflow control (offset 0x%x): 0x%08x\n",
74 MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
75 pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
76 readl(ioaddr + MAC_VLAN1));
77 pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
78 readl(ioaddr + MAC_VLAN2));
79 pr_info("\n\tMAC management counter registers\n");
80 pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
81 MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
82 pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
83 MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
84 pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
85 MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
86 pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
87 MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
88 pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
89 MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
90 return;
91}
92
93static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
94 u32 dma_rx)
95{
96 u32 value = readl(ioaddr + DMA_BUS_MODE);
97 /* DMA SW reset */
98 value |= DMA_BUS_MODE_SFT_RESET;
99 writel(value, ioaddr + DMA_BUS_MODE);
100 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
101
102 /* Enable Application Access by writing to DMA CSR0 */
103 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
104 ioaddr + DMA_BUS_MODE);
105
106 /* Mask interrupts by writing to CSR7 */
107 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
108
109 /* The base address of the RX/TX descriptor lists must be written into
110 * DMA CSR3 and CSR4, respectively. */
111 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
112 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
113
114 return 0;
115}
116
117/* Store and Forward capability is not used at all..
118 * The transmit threshold can be programmed by
119 * setting the TTC bits in the DMA control register.*/
120static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
121 int rxmode)
122{
123 u32 csr6 = readl(ioaddr + DMA_CONTROL);
124
125 if (txmode <= 32)
126 csr6 |= DMA_CONTROL_TTC_32;
127 else if (txmode <= 64)
128 csr6 |= DMA_CONTROL_TTC_64;
129 else
130 csr6 |= DMA_CONTROL_TTC_128;
131
132 writel(csr6, ioaddr + DMA_CONTROL);
133
134 return;
135}
136
137static void mac100_dump_dma_regs(unsigned long ioaddr)
138{
139 int i;
140
141 DBG(KERN_DEBUG "MAC100 DMA CSR \n");
142 for (i = 0; i < 9; i++)
143 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
144 (DMA_BUS_MODE + i * 4),
145 readl(ioaddr + DMA_BUS_MODE + i * 4));
146 DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
147 DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
148 DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
149 DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
150 return;
151}
152
153/* DMA controller has two counters to track the number of
154 the receive missed frames. */
155static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
156 unsigned long ioaddr)
157{
158 struct net_device_stats *stats = (struct net_device_stats *)data;
159 u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
160
161 if (unlikely(csr8)) {
162 if (csr8 & DMA_MISSED_FRAME_OVE) {
163 stats->rx_over_errors += 0x800;
164 x->rx_overflow_cntr += 0x800;
165 } else {
166 unsigned int ove_cntr;
167 ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
168 stats->rx_over_errors += ove_cntr;
169 x->rx_overflow_cntr += ove_cntr;
170 }
171
172 if (csr8 & DMA_MISSED_FRAME_OVE_M) {
173 stats->rx_missed_errors += 0xffff;
174 x->rx_missed_cntr += 0xffff;
175 } else {
176 unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
177 stats->rx_missed_errors += miss_f;
178 x->rx_missed_cntr += miss_f;
179 }
180 }
181 return;
182}
183
184static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
185 struct dma_desc *p, unsigned long ioaddr)
186{
187 int ret = 0;
188 struct net_device_stats *stats = (struct net_device_stats *)data;
189
190 if (unlikely(p->des01.tx.error_summary)) {
191 if (unlikely(p->des01.tx.underflow_error)) {
192 x->tx_underflow++;
193 stats->tx_fifo_errors++;
194 }
195 if (unlikely(p->des01.tx.no_carrier)) {
196 x->tx_carrier++;
197 stats->tx_carrier_errors++;
198 }
199 if (unlikely(p->des01.tx.loss_carrier)) {
200 x->tx_losscarrier++;
201 stats->tx_carrier_errors++;
202 }
203 if (unlikely((p->des01.tx.excessive_deferral) ||
204 (p->des01.tx.excessive_collisions) ||
205 (p->des01.tx.late_collision)))
206 stats->collisions += p->des01.tx.collision_count;
207 ret = -1;
208 }
209 if (unlikely(p->des01.tx.heartbeat_fail)) {
210 x->tx_heartbeat++;
211 stats->tx_heartbeat_errors++;
212 ret = -1;
213 }
214 if (unlikely(p->des01.tx.deferred))
215 x->tx_deferred++;
216
217 return ret;
218}
219
220static int mac100_get_tx_len(struct dma_desc *p)
221{
222 return p->des01.tx.buffer1_size;
223}
224
225/* This function verifies if each incoming frame has some errors
226 * and, if required, updates the multicast statistics.
227 * In case of success, it returns csum_none becasue the device
228 * is not able to compute the csum in HW. */
229static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
230 struct dma_desc *p)
231{
232 int ret = csum_none;
233 struct net_device_stats *stats = (struct net_device_stats *)data;
234
235 if (unlikely(p->des01.rx.last_descriptor == 0)) {
236 pr_warning("mac100 Error: Oversized Ethernet "
237 "frame spanned multiple buffers\n");
238 stats->rx_length_errors++;
239 return discard_frame;
240 }
241
242 if (unlikely(p->des01.rx.error_summary)) {
243 if (unlikely(p->des01.rx.descriptor_error))
244 x->rx_desc++;
245 if (unlikely(p->des01.rx.partial_frame_error))
246 x->rx_partial++;
247 if (unlikely(p->des01.rx.run_frame))
248 x->rx_runt++;
249 if (unlikely(p->des01.rx.frame_too_long))
250 x->rx_toolong++;
251 if (unlikely(p->des01.rx.collision)) {
252 x->rx_collision++;
253 stats->collisions++;
254 }
255 if (unlikely(p->des01.rx.crc_error)) {
256 x->rx_crc++;
257 stats->rx_crc_errors++;
258 }
259 ret = discard_frame;
260 }
261 if (unlikely(p->des01.rx.dribbling))
262 ret = discard_frame;
263
264 if (unlikely(p->des01.rx.length_error)) {
265 x->rx_lenght++;
266 ret = discard_frame;
267 }
268 if (unlikely(p->des01.rx.mii_error)) {
269 x->rx_mii++;
270 ret = discard_frame;
271 }
272 if (p->des01.rx.multicast_frame) {
273 x->rx_multicast++;
274 stats->multicast++;
275 }
276 return ret;
277}
278
279static void mac100_irq_status(unsigned long ioaddr)
280{
281 return;
282}
283
284static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
285 unsigned int reg_n)
286{
287 stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
288}
289
290static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
291 unsigned int reg_n)
292{
293 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
294}
295
296static void mac100_set_filter(struct net_device *dev)
297{
298 unsigned long ioaddr = dev->base_addr;
299 u32 value = readl(ioaddr + MAC_CONTROL);
300
301 if (dev->flags & IFF_PROMISC) {
302 value |= MAC_CONTROL_PR;
303 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
304 MAC_CONTROL_HP);
305 } else if ((dev->mc_count > HASH_TABLE_SIZE)
306 || (dev->flags & IFF_ALLMULTI)) {
307 value |= MAC_CONTROL_PM;
308 value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
309 writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
310 writel(0xffffffff, ioaddr + MAC_HASH_LOW);
311 } else if (dev->mc_count == 0) { /* no multicast */
312 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
313 MAC_CONTROL_HO | MAC_CONTROL_HP);
314 } else {
315 int i;
316 u32 mc_filter[2];
317 struct dev_mc_list *mclist;
318
319 /* Perfect filter mode for physical address and Hash
320 filter for multicast */
321 value |= MAC_CONTROL_HP;
322 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF
323 | MAC_CONTROL_HO);
324
325 memset(mc_filter, 0, sizeof(mc_filter));
326 for (i = 0, mclist = dev->mc_list;
327 mclist && i < dev->mc_count; i++, mclist = mclist->next) {
328 /* The upper 6 bits of the calculated CRC are used to
329 * index the contens of the hash table */
330 int bit_nr =
331 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
332 /* The most significant bit determines the register to
333 * use (H/L) while the other 5 bits determine the bit
334 * within the register. */
335 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
336 }
337 writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
338 writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
339 }
340
341 writel(value, ioaddr + MAC_CONTROL);
342
343 DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
344 "HI 0x%08x, LO 0x%08x\n",
345 __func__, readl(ioaddr + MAC_CONTROL),
346 readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
347 return;
348}
349
350static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
351 unsigned int fc, unsigned int pause_time)
352{
353 unsigned int flow = MAC_FLOW_CTRL_ENABLE;
354
355 if (duplex)
356 flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
357 writel(flow, ioaddr + MAC_FLOW_CTRL);
358
359 return;
360}
361
362/* No PMT module supported in our SoC for the Ethernet Controller. */
363static void mac100_pmt(unsigned long ioaddr, unsigned long mode)
364{
365 return;
366}
367
368static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
369 int disable_rx_ic)
370{
371 int i;
372 for (i = 0; i < ring_size; i++) {
373 p->des01.rx.own = 1;
374 p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
375 if (i == ring_size - 1)
376 p->des01.rx.end_ring = 1;
377 if (disable_rx_ic)
378 p->des01.rx.disable_ic = 1;
379 p++;
380 }
381 return;
382}
383
384static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
385{
386 int i;
387 for (i = 0; i < ring_size; i++) {
388 p->des01.tx.own = 0;
389 if (i == ring_size - 1)
390 p->des01.tx.end_ring = 1;
391 p++;
392 }
393 return;
394}
395
396static int mac100_get_tx_owner(struct dma_desc *p)
397{
398 return p->des01.tx.own;
399}
400
401static int mac100_get_rx_owner(struct dma_desc *p)
402{
403 return p->des01.rx.own;
404}
405
406static void mac100_set_tx_owner(struct dma_desc *p)
407{
408 p->des01.tx.own = 1;
409}
410
411static void mac100_set_rx_owner(struct dma_desc *p)
412{
413 p->des01.rx.own = 1;
414}
415
416static int mac100_get_tx_ls(struct dma_desc *p)
417{
418 return p->des01.tx.last_segment;
419}
420
421static void mac100_release_tx_desc(struct dma_desc *p)
422{
423 int ter = p->des01.tx.end_ring;
424
425 /* clean field used within the xmit */
426 p->des01.tx.first_segment = 0;
427 p->des01.tx.last_segment = 0;
428 p->des01.tx.buffer1_size = 0;
429
430 /* clean status reported */
431 p->des01.tx.error_summary = 0;
432 p->des01.tx.underflow_error = 0;
433 p->des01.tx.no_carrier = 0;
434 p->des01.tx.loss_carrier = 0;
435 p->des01.tx.excessive_deferral = 0;
436 p->des01.tx.excessive_collisions = 0;
437 p->des01.tx.late_collision = 0;
438 p->des01.tx.heartbeat_fail = 0;
439 p->des01.tx.deferred = 0;
440
441 /* set termination field */
442 p->des01.tx.end_ring = ter;
443
444 return;
445}
446
447static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
448 int csum_flag)
449{
450 p->des01.tx.first_segment = is_fs;
451 p->des01.tx.buffer1_size = len;
452}
453
454static void mac100_clear_tx_ic(struct dma_desc *p)
455{
456 p->des01.tx.interrupt = 0;
457}
458
459static void mac100_close_tx_desc(struct dma_desc *p)
460{
461 p->des01.tx.last_segment = 1;
462 p->des01.tx.interrupt = 1;
463}
464
465static int mac100_get_rx_frame_len(struct dma_desc *p)
466{
467 return p->des01.rx.frame_length;
468}
469
470struct stmmac_ops mac100_driver = {
471 .core_init = mac100_core_init,
472 .dump_mac_regs = mac100_dump_mac_regs,
473 .dma_init = mac100_dma_init,
474 .dump_dma_regs = mac100_dump_dma_regs,
475 .dma_mode = mac100_dma_operation_mode,
476 .dma_diagnostic_fr = mac100_dma_diagnostic_fr,
477 .tx_status = mac100_get_tx_frame_status,
478 .rx_status = mac100_get_rx_frame_status,
479 .get_tx_len = mac100_get_tx_len,
480 .set_filter = mac100_set_filter,
481 .flow_ctrl = mac100_flow_ctrl,
482 .pmt = mac100_pmt,
483 .init_rx_desc = mac100_init_rx_desc,
484 .init_tx_desc = mac100_init_tx_desc,
485 .get_tx_owner = mac100_get_tx_owner,
486 .get_rx_owner = mac100_get_rx_owner,
487 .release_tx_desc = mac100_release_tx_desc,
488 .prepare_tx_desc = mac100_prepare_tx_desc,
489 .clear_tx_ic = mac100_clear_tx_ic,
490 .close_tx_desc = mac100_close_tx_desc,
491 .get_tx_ls = mac100_get_tx_ls,
492 .set_tx_owner = mac100_set_tx_owner,
493 .set_rx_owner = mac100_set_rx_owner,
494 .get_rx_frame_len = mac100_get_rx_frame_len,
495 .host_irq_status = mac100_irq_status,
496 .set_umac_addr = mac100_set_umac_addr,
497 .get_umac_addr = mac100_get_umac_addr,
498};
499
500struct mac_device_info *mac100_setup(unsigned long ioaddr)
501{
502 struct mac_device_info *mac;
503
504 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
505
506 pr_info("\tMAC 10/100\n");
507
508 mac->ops = &mac100_driver;
509 mac->hw.pmt = PMT_NOT_SUPPORTED;
510 mac->hw.link.port = MAC_CONTROL_PS;
511 mac->hw.link.duplex = MAC_CONTROL_F;
512 mac->hw.link.speed = 0;
513 mac->hw.mii.addr = MAC_MII_ADDR;
514 mac->hw.mii.data = MAC_MII_DATA;
515
516 return mac;
517}
diff --git a/drivers/net/stmmac/mac100.h b/drivers/net/stmmac/mac100.h
new file mode 100644
index 000000000000..0f8f110d004a
--- /dev/null
+++ b/drivers/net/stmmac/mac100.h
@@ -0,0 +1,116 @@
1/*******************************************************************************
2 MAC 10/100 Header File
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25/*----------------------------------------------------------------------------
26 * MAC BLOCK defines
27 *---------------------------------------------------------------------------*/
28/* MAC CSR offset */
29#define MAC_CONTROL 0x00000000 /* MAC Control */
30#define MAC_ADDR_HIGH 0x00000004 /* MAC Address High */
31#define MAC_ADDR_LOW 0x00000008 /* MAC Address Low */
32#define MAC_HASH_HIGH 0x0000000c /* Multicast Hash Table High */
33#define MAC_HASH_LOW 0x00000010 /* Multicast Hash Table Low */
34#define MAC_MII_ADDR 0x00000014 /* MII Address */
35#define MAC_MII_DATA 0x00000018 /* MII Data */
36#define MAC_FLOW_CTRL 0x0000001c /* Flow Control */
37#define MAC_VLAN1 0x00000020 /* VLAN1 Tag */
38#define MAC_VLAN2 0x00000024 /* VLAN2 Tag */
39
40/* MAC CTRL defines */
41#define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */
42#define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */
43#define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */
44#define MAC_CONTROL_PS 0x08000000 /* Port Select */
45#define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */
46#define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */
47#define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */
48#define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */
49#define MAC_CONTROL_PM 0x00080000 /* Pass All Multicast */
50#define MAC_CONTROL_PR 0x00040000 /* Promiscuous Mode */
51#define MAC_CONTROL_IF 0x00020000 /* Inverse Filtering */
52#define MAC_CONTROL_PB 0x00010000 /* Pass Bad Frames */
53#define MAC_CONTROL_HO 0x00008000 /* Hash Only Filtering Mode */
54#define MAC_CONTROL_HP 0x00002000 /* Hash/Perfect Filtering Mode */
55#define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */
56#define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */
57#define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */
58#define MAC_CONTROL_ASTP 0x00000100 /* Automatic Pad Stripping */
59#define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */
60#define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */
61#define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */
62#define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */
63#define MAC_CONTROL_DC 0x00000020 /* Deferral Check */
64#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
65#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */
66
67#define MAC_CORE_INIT (MAC_CONTROL_HBD | MAC_CONTROL_ASTP)
68
69/* MAC FLOW CTRL defines */
70#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
71#define MAC_FLOW_CTRL_PT_SHIFT 16
72#define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */
73#define MAC_FLOW_CTRL_ENABLE 0x00000002 /* Flow Control Enable */
74#define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */
75
76/* MII ADDR defines */
77#define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
78#define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
79
80/*----------------------------------------------------------------------------
81 * DMA BLOCK defines
82 *---------------------------------------------------------------------------*/
83
84/* DMA Bus Mode register defines */
85#define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */
86#define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */
87#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
88#define DMA_BUS_MODE_PBL_SHIFT 8
89#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
90#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
91#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */
92#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
93#define DMA_BUS_MODE_DEFAULT 0x00000000
94
95/* DMA Control register defines */
96#define DMA_CONTROL_SF 0x00200000 /* Store And Forward */
97
98/* Transmit Threshold Control */
99enum ttc_control {
100 DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */
101 DMA_CONTROL_TTC_64 = 0x00004000, /* Threshold is 64 DWORDS */
102 DMA_CONTROL_TTC_128 = 0x00008000, /* Threshold is 128 DWORDS */
103 DMA_CONTROL_TTC_256 = 0x0000c000, /* Threshold is 256 DWORDS */
104 DMA_CONTROL_TTC_18 = 0x00400000, /* Threshold is 18 DWORDS */
105 DMA_CONTROL_TTC_24 = 0x00404000, /* Threshold is 24 DWORDS */
106 DMA_CONTROL_TTC_32 = 0x00408000, /* Threshold is 32 DWORDS */
107 DMA_CONTROL_TTC_40 = 0x0040c000, /* Threshold is 40 DWORDS */
108 DMA_CONTROL_SE = 0x00000008, /* Stop On Empty */
109 DMA_CONTROL_OSF = 0x00000004, /* Operate On 2nd Frame */
110};
111
112/* STMAC110 DMA Missed Frame Counter register defines */
113#define DMA_MISSED_FRAME_OVE 0x10000000 /* FIFO Overflow Overflow */
114#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */
115#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */
116#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
new file mode 100644
index 000000000000..6d2eae3040e5
--- /dev/null
+++ b/drivers/net/stmmac/stmmac.h
@@ -0,0 +1,98 @@
1/*******************************************************************************
2 Copyright (C) 2007-2009 STMicroelectronics Ltd
3
4 This program is free software; you can redistribute it and/or modify it
5 under the terms and conditions of the GNU General Public License,
6 version 2, as published by the Free Software Foundation.
7
8 This program is distributed in the hope it will be useful, but WITHOUT
9 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 more details.
12
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc.,
15 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16
17 The full GNU General Public License is included in this distribution in
18 the file called "COPYING".
19
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/
22
23#define DRV_MODULE_VERSION "Oct_09"
24
25#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
26#define STMMAC_VLAN_TAG_USED
27#include <linux/if_vlan.h>
28#endif
29
30#include "common.h"
31#ifdef CONFIG_STMMAC_TIMER
32#include "stmmac_timer.h"
33#endif
34
35struct stmmac_priv {
36 /* Frequently used values are kept adjacent for cache effect */
37 struct dma_desc *dma_tx ____cacheline_aligned;
38 dma_addr_t dma_tx_phy;
39 struct sk_buff **tx_skbuff;
40 unsigned int cur_tx;
41 unsigned int dirty_tx;
42 unsigned int dma_tx_size;
43 int tx_coe;
44 int tx_coalesce;
45
46 struct dma_desc *dma_rx ;
47 unsigned int cur_rx;
48 unsigned int dirty_rx;
49 struct sk_buff **rx_skbuff;
50 dma_addr_t *rx_skbuff_dma;
51 struct sk_buff_head rx_recycle;
52
53 struct net_device *dev;
54 int is_gmac;
55 dma_addr_t dma_rx_phy;
56 unsigned int dma_rx_size;
57 int rx_csum;
58 unsigned int dma_buf_sz;
59 struct device *device;
60 struct mac_device_info *mac_type;
61
62 struct stmmac_extra_stats xstats;
63 struct napi_struct napi;
64
65 phy_interface_t phy_interface;
66 int pbl;
67 int bus_id;
68 int phy_addr;
69 int phy_mask;
70 int (*phy_reset) (void *priv);
71 void (*fix_mac_speed) (void *priv, unsigned int speed);
72 void *bsp_priv;
73
74 int phy_irq;
75 struct phy_device *phydev;
76 int oldlink;
77 int speed;
78 int oldduplex;
79 unsigned int flow_ctrl;
80 unsigned int pause;
81 struct mii_bus *mii;
82
83 u32 msg_enable;
84 spinlock_t lock;
85 int wolopts;
86 int wolenabled;
87 int shutdown;
88#ifdef CONFIG_STMMAC_TIMER
89 struct stmmac_timer *tm;
90#endif
91#ifdef STMMAC_VLAN_TAG_USED
92 struct vlan_group *vlgrp;
93#endif
94};
95
96extern int stmmac_mdio_unregister(struct net_device *ndev);
97extern int stmmac_mdio_register(struct net_device *ndev);
98extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
new file mode 100644
index 000000000000..694ebe6a0758
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -0,0 +1,395 @@
1/*******************************************************************************
2 STMMAC Ethtool support
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25#include <linux/etherdevice.h>
26#include <linux/ethtool.h>
27#include <linux/mii.h>
28#include <linux/phy.h>
29
30#include "stmmac.h"
31
32#define REG_SPACE_SIZE 0x1054
33#define MAC100_ETHTOOL_NAME "st_mac100"
34#define GMAC_ETHTOOL_NAME "st_gmac"
35
36struct stmmac_stats {
37 char stat_string[ETH_GSTRING_LEN];
38 int sizeof_stat;
39 int stat_offset;
40};
41
42#define STMMAC_STAT(m) \
43 { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \
44 offsetof(struct stmmac_priv, xstats.m)}
45
46static const struct stmmac_stats stmmac_gstrings_stats[] = {
47 STMMAC_STAT(tx_underflow),
48 STMMAC_STAT(tx_carrier),
49 STMMAC_STAT(tx_losscarrier),
50 STMMAC_STAT(tx_heartbeat),
51 STMMAC_STAT(tx_deferred),
52 STMMAC_STAT(tx_vlan),
53 STMMAC_STAT(rx_vlan),
54 STMMAC_STAT(tx_jabber),
55 STMMAC_STAT(tx_frame_flushed),
56 STMMAC_STAT(tx_payload_error),
57 STMMAC_STAT(tx_ip_header_error),
58 STMMAC_STAT(rx_desc),
59 STMMAC_STAT(rx_partial),
60 STMMAC_STAT(rx_runt),
61 STMMAC_STAT(rx_toolong),
62 STMMAC_STAT(rx_collision),
63 STMMAC_STAT(rx_crc),
64 STMMAC_STAT(rx_lenght),
65 STMMAC_STAT(rx_mii),
66 STMMAC_STAT(rx_multicast),
67 STMMAC_STAT(rx_gmac_overflow),
68 STMMAC_STAT(rx_watchdog),
69 STMMAC_STAT(da_rx_filter_fail),
70 STMMAC_STAT(sa_rx_filter_fail),
71 STMMAC_STAT(rx_missed_cntr),
72 STMMAC_STAT(rx_overflow_cntr),
73 STMMAC_STAT(tx_undeflow_irq),
74 STMMAC_STAT(tx_process_stopped_irq),
75 STMMAC_STAT(tx_jabber_irq),
76 STMMAC_STAT(rx_overflow_irq),
77 STMMAC_STAT(rx_buf_unav_irq),
78 STMMAC_STAT(rx_process_stopped_irq),
79 STMMAC_STAT(rx_watchdog_irq),
80 STMMAC_STAT(tx_early_irq),
81 STMMAC_STAT(fatal_bus_error_irq),
82 STMMAC_STAT(threshold),
83 STMMAC_STAT(tx_pkt_n),
84 STMMAC_STAT(rx_pkt_n),
85 STMMAC_STAT(poll_n),
86 STMMAC_STAT(sched_timer_n),
87 STMMAC_STAT(normal_irq_n),
88};
89#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
90
91void stmmac_ethtool_getdrvinfo(struct net_device *dev,
92 struct ethtool_drvinfo *info)
93{
94 struct stmmac_priv *priv = netdev_priv(dev);
95
96 if (!priv->is_gmac)
97 strcpy(info->driver, MAC100_ETHTOOL_NAME);
98 else
99 strcpy(info->driver, GMAC_ETHTOOL_NAME);
100
101 strcpy(info->version, DRV_MODULE_VERSION);
102 info->fw_version[0] = '\0';
103 info->n_stats = STMMAC_STATS_LEN;
104 return;
105}
106
107int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
108{
109 struct stmmac_priv *priv = netdev_priv(dev);
110 struct phy_device *phy = priv->phydev;
111 int rc;
112 if (phy == NULL) {
113 pr_err("%s: %s: PHY is not registered\n",
114 __func__, dev->name);
115 return -ENODEV;
116 }
117 if (!netif_running(dev)) {
118 pr_err("%s: interface is disabled: we cannot track "
119 "link speed / duplex setting\n", dev->name);
120 return -EBUSY;
121 }
122 cmd->transceiver = XCVR_INTERNAL;
123 spin_lock_irq(&priv->lock);
124 rc = phy_ethtool_gset(phy, cmd);
125 spin_unlock_irq(&priv->lock);
126 return rc;
127}
128
129int stmmac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
130{
131 struct stmmac_priv *priv = netdev_priv(dev);
132 struct phy_device *phy = priv->phydev;
133 int rc;
134
135 spin_lock(&priv->lock);
136 rc = phy_ethtool_sset(phy, cmd);
137 spin_unlock(&priv->lock);
138
139 return rc;
140}
141
142u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
143{
144 struct stmmac_priv *priv = netdev_priv(dev);
145 return priv->msg_enable;
146}
147
148void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
149{
150 struct stmmac_priv *priv = netdev_priv(dev);
151 priv->msg_enable = level;
152
153}
154
155int stmmac_check_if_running(struct net_device *dev)
156{
157 if (!netif_running(dev))
158 return -EBUSY;
159 return 0;
160}
161
162int stmmac_ethtool_get_regs_len(struct net_device *dev)
163{
164 return REG_SPACE_SIZE;
165}
166
167void stmmac_ethtool_gregs(struct net_device *dev,
168 struct ethtool_regs *regs, void *space)
169{
170 int i;
171 u32 *reg_space = (u32 *) space;
172
173 struct stmmac_priv *priv = netdev_priv(dev);
174
175 memset(reg_space, 0x0, REG_SPACE_SIZE);
176
177 if (!priv->is_gmac) {
178 /* MAC registers */
179 for (i = 0; i < 12; i++)
180 reg_space[i] = readl(dev->base_addr + (i * 4));
181 /* DMA registers */
182 for (i = 0; i < 9; i++)
183 reg_space[i + 12] =
184 readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
185 reg_space[22] = readl(dev->base_addr + DMA_CUR_TX_BUF_ADDR);
186 reg_space[23] = readl(dev->base_addr + DMA_CUR_RX_BUF_ADDR);
187 } else {
188 /* MAC registers */
189 for (i = 0; i < 55; i++)
190 reg_space[i] = readl(dev->base_addr + (i * 4));
191 /* DMA registers */
192 for (i = 0; i < 22; i++)
193 reg_space[i + 55] =
194 readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
195 }
196
197 return;
198}
199
200int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
201{
202 if (data)
203 netdev->features |= NETIF_F_HW_CSUM;
204 else
205 netdev->features &= ~NETIF_F_HW_CSUM;
206
207 return 0;
208}
209
210u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
211{
212 struct stmmac_priv *priv = netdev_priv(dev);
213
214 return priv->rx_csum;
215}
216
217static void
218stmmac_get_pauseparam(struct net_device *netdev,
219 struct ethtool_pauseparam *pause)
220{
221 struct stmmac_priv *priv = netdev_priv(netdev);
222
223 spin_lock(&priv->lock);
224
225 pause->rx_pause = 0;
226 pause->tx_pause = 0;
227 pause->autoneg = priv->phydev->autoneg;
228
229 if (priv->flow_ctrl & FLOW_RX)
230 pause->rx_pause = 1;
231 if (priv->flow_ctrl & FLOW_TX)
232 pause->tx_pause = 1;
233
234 spin_unlock(&priv->lock);
235 return;
236}
237
238static int
239stmmac_set_pauseparam(struct net_device *netdev,
240 struct ethtool_pauseparam *pause)
241{
242 struct stmmac_priv *priv = netdev_priv(netdev);
243 struct phy_device *phy = priv->phydev;
244 int new_pause = FLOW_OFF;
245 int ret = 0;
246
247 spin_lock(&priv->lock);
248
249 if (pause->rx_pause)
250 new_pause |= FLOW_RX;
251 if (pause->tx_pause)
252 new_pause |= FLOW_TX;
253
254 priv->flow_ctrl = new_pause;
255
256 if (phy->autoneg) {
257 if (netif_running(netdev)) {
258 struct ethtool_cmd cmd;
259 /* auto-negotiation automatically restarted */
260 cmd.cmd = ETHTOOL_NWAY_RST;
261 cmd.supported = phy->supported;
262 cmd.advertising = phy->advertising;
263 cmd.autoneg = phy->autoneg;
264 cmd.speed = phy->speed;
265 cmd.duplex = phy->duplex;
266 cmd.phy_address = phy->addr;
267 ret = phy_ethtool_sset(phy, &cmd);
268 }
269 } else {
270 unsigned long ioaddr = netdev->base_addr;
271 priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex,
272 priv->flow_ctrl, priv->pause);
273 }
274 spin_unlock(&priv->lock);
275 return ret;
276}
277
278static void stmmac_get_ethtool_stats(struct net_device *dev,
279 struct ethtool_stats *dummy, u64 *data)
280{
281 struct stmmac_priv *priv = netdev_priv(dev);
282 unsigned long ioaddr = dev->base_addr;
283 int i;
284
285 /* Update HW stats if supported */
286 priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats,
287 ioaddr);
288
289 for (i = 0; i < STMMAC_STATS_LEN; i++) {
290 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
291 data[i] = (stmmac_gstrings_stats[i].sizeof_stat ==
292 sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
293 }
294
295 return;
296}
297
298static int stmmac_get_sset_count(struct net_device *netdev, int sset)
299{
300 switch (sset) {
301 case ETH_SS_STATS:
302 return STMMAC_STATS_LEN;
303 default:
304 return -EOPNOTSUPP;
305 }
306}
307
308static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
309{
310 int i;
311 u8 *p = data;
312
313 switch (stringset) {
314 case ETH_SS_STATS:
315 for (i = 0; i < STMMAC_STATS_LEN; i++) {
316 memcpy(p, stmmac_gstrings_stats[i].stat_string,
317 ETH_GSTRING_LEN);
318 p += ETH_GSTRING_LEN;
319 }
320 break;
321 default:
322 WARN_ON(1);
323 break;
324 }
325 return;
326}
327
328/* Currently only support WOL through Magic packet. */
329static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
330{
331 struct stmmac_priv *priv = netdev_priv(dev);
332
333 spin_lock_irq(&priv->lock);
334 if (priv->wolenabled == PMT_SUPPORTED) {
335 wol->supported = WAKE_MAGIC;
336 wol->wolopts = priv->wolopts;
337 }
338 spin_unlock_irq(&priv->lock);
339}
340
341static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
342{
343 struct stmmac_priv *priv = netdev_priv(dev);
344 u32 support = WAKE_MAGIC;
345
346 if (priv->wolenabled == PMT_NOT_SUPPORTED)
347 return -EINVAL;
348
349 if (wol->wolopts & ~support)
350 return -EINVAL;
351
352 if (wol->wolopts == 0)
353 device_set_wakeup_enable(priv->device, 0);
354 else
355 device_set_wakeup_enable(priv->device, 1);
356
357 spin_lock_irq(&priv->lock);
358 priv->wolopts = wol->wolopts;
359 spin_unlock_irq(&priv->lock);
360
361 return 0;
362}
363
364static struct ethtool_ops stmmac_ethtool_ops = {
365 .begin = stmmac_check_if_running,
366 .get_drvinfo = stmmac_ethtool_getdrvinfo,
367 .get_settings = stmmac_ethtool_getsettings,
368 .set_settings = stmmac_ethtool_setsettings,
369 .get_msglevel = stmmac_ethtool_getmsglevel,
370 .set_msglevel = stmmac_ethtool_setmsglevel,
371 .get_regs = stmmac_ethtool_gregs,
372 .get_regs_len = stmmac_ethtool_get_regs_len,
373 .get_link = ethtool_op_get_link,
374 .get_rx_csum = stmmac_ethtool_get_rx_csum,
375 .get_tx_csum = ethtool_op_get_tx_csum,
376 .set_tx_csum = stmmac_ethtool_set_tx_csum,
377 .get_sg = ethtool_op_get_sg,
378 .set_sg = ethtool_op_set_sg,
379 .get_pauseparam = stmmac_get_pauseparam,
380 .set_pauseparam = stmmac_set_pauseparam,
381 .get_ethtool_stats = stmmac_get_ethtool_stats,
382 .get_strings = stmmac_get_strings,
383 .get_wol = stmmac_get_wol,
384 .set_wol = stmmac_set_wol,
385 .get_sset_count = stmmac_get_sset_count,
386#ifdef NETIF_F_TSO
387 .get_tso = ethtool_op_get_tso,
388 .set_tso = ethtool_op_set_tso,
389#endif
390};
391
392void stmmac_set_ethtool_ops(struct net_device *netdev)
393{
394 SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops);
395}
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
new file mode 100644
index 000000000000..c2f14dc9ba28
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -0,0 +1,2204 @@
1/*******************************************************************************
2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3 ST Ethernet IPs are built around a Synopsys IP Core.
4
5 Copyright (C) 2007-2009 STMicroelectronics Ltd
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
24
25 Documentation available at:
26 http://www.stlinux.com
27 Support available at:
28 https://bugzilla.stlinux.com/
29*******************************************************************************/
30
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/kernel.h>
34#include <linux/interrupt.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/platform_device.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40#include <linux/skbuff.h>
41#include <linux/ethtool.h>
42#include <linux/if_ether.h>
43#include <linux/crc32.h>
44#include <linux/mii.h>
45#include <linux/phy.h>
46#include <linux/if_vlan.h>
47#include <linux/dma-mapping.h>
48#include <linux/stm/soc.h>
49#include "stmmac.h"
50
51#define STMMAC_RESOURCE_NAME "stmmaceth"
52#define PHY_RESOURCE_NAME "stmmacphy"
53
54#undef STMMAC_DEBUG
55/*#define STMMAC_DEBUG*/
56#ifdef STMMAC_DEBUG
57#define DBG(nlevel, klevel, fmt, args...) \
58 ((void)(netif_msg_##nlevel(priv) && \
59 printk(KERN_##klevel fmt, ## args)))
60#else
61#define DBG(nlevel, klevel, fmt, args...) do { } while (0)
62#endif
63
64#undef STMMAC_RX_DEBUG
65/*#define STMMAC_RX_DEBUG*/
66#ifdef STMMAC_RX_DEBUG
67#define RX_DBG(fmt, args...) printk(fmt, ## args)
68#else
69#define RX_DBG(fmt, args...) do { } while (0)
70#endif
71
72#undef STMMAC_XMIT_DEBUG
73/*#define STMMAC_XMIT_DEBUG*/
74#ifdef STMMAC_TX_DEBUG
75#define TX_DBG(fmt, args...) printk(fmt, ## args)
76#else
77#define TX_DBG(fmt, args...) do { } while (0)
78#endif
79
80#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
81#define JUMBO_LEN 9000
82
83/* Module parameters */
84#define TX_TIMEO 5000 /* default 5 seconds */
85static int watchdog = TX_TIMEO;
86module_param(watchdog, int, S_IRUGO | S_IWUSR);
87MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds");
88
89static int debug = -1; /* -1: default, 0: no output, 16: all */
90module_param(debug, int, S_IRUGO | S_IWUSR);
91MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
92
93static int phyaddr = -1;
94module_param(phyaddr, int, S_IRUGO);
95MODULE_PARM_DESC(phyaddr, "Physical device address");
96
97#define DMA_TX_SIZE 256
98static int dma_txsize = DMA_TX_SIZE;
99module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
100MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
101
102#define DMA_RX_SIZE 256
103static int dma_rxsize = DMA_RX_SIZE;
104module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
105MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
106
107static int flow_ctrl = FLOW_OFF;
108module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
109MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
110
111static int pause = PAUSE_TIME;
112module_param(pause, int, S_IRUGO | S_IWUSR);
113MODULE_PARM_DESC(pause, "Flow Control Pause Time");
114
115#define TC_DEFAULT 64
116static int tc = TC_DEFAULT;
117module_param(tc, int, S_IRUGO | S_IWUSR);
118MODULE_PARM_DESC(tc, "DMA threshold control value");
119
120#define RX_NO_COALESCE 1 /* Always interrupt on completion */
121#define TX_NO_COALESCE -1 /* No moderation by default */
122
123/* Pay attention to tune this parameter; take care of both
124 * hardware capability and network stabitily/performance impact.
125 * Many tests showed that ~4ms latency seems to be good enough. */
126#ifdef CONFIG_STMMAC_TIMER
127#define DEFAULT_PERIODIC_RATE 256
128static int tmrate = DEFAULT_PERIODIC_RATE;
129module_param(tmrate, int, S_IRUGO | S_IWUSR);
130MODULE_PARM_DESC(tmrate, "External timer freq. (default: 256Hz)");
131#endif
132
133#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
134static int buf_sz = DMA_BUFFER_SIZE;
135module_param(buf_sz, int, S_IRUGO | S_IWUSR);
136MODULE_PARM_DESC(buf_sz, "DMA buffer size");
137
138/* In case of Giga ETH, we can enable/disable the COE for the
139 * transmit HW checksum computation.
140 * Note that, if tx csum is off in HW, SG will be still supported. */
141static int tx_coe = HW_CSUM;
142module_param(tx_coe, int, S_IRUGO | S_IWUSR);
143MODULE_PARM_DESC(tx_coe, "GMAC COE type 2 [on/off]");
144
145static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
146 NETIF_MSG_LINK | NETIF_MSG_IFUP |
147 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
148
149static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
150static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev);
151
152/**
153 * stmmac_verify_args - verify the driver parameters.
154 * Description: it verifies if some wrong parameter is passed to the driver.
155 * Note that wrong parameters are replaced with the default values.
156 */
157static void stmmac_verify_args(void)
158{
159 if (unlikely(watchdog < 0))
160 watchdog = TX_TIMEO;
161 if (unlikely(dma_rxsize < 0))
162 dma_rxsize = DMA_RX_SIZE;
163 if (unlikely(dma_txsize < 0))
164 dma_txsize = DMA_TX_SIZE;
165 if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
166 buf_sz = DMA_BUFFER_SIZE;
167 if (unlikely(flow_ctrl > 1))
168 flow_ctrl = FLOW_AUTO;
169 else if (likely(flow_ctrl < 0))
170 flow_ctrl = FLOW_OFF;
171 if (unlikely((pause < 0) || (pause > 0xffff)))
172 pause = PAUSE_TIME;
173
174 return;
175}
176
177#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
178static void print_pkt(unsigned char *buf, int len)
179{
180 int j;
181 pr_info("len = %d byte, buf addr: 0x%p", len, buf);
182 for (j = 0; j < len; j++) {
183 if ((j % 16) == 0)
184 pr_info("\n %03x:", j);
185 pr_info(" %02x", buf[j]);
186 }
187 pr_info("\n");
188 return;
189}
190#endif
191
192/* minimum number of free TX descriptors required to wake up TX process */
193#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4)
194
195static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
196{
197 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
198}
199
200/**
201 * stmmac_adjust_link
202 * @dev: net device structure
203 * Description: it adjusts the link parameters.
204 */
205static void stmmac_adjust_link(struct net_device *dev)
206{
207 struct stmmac_priv *priv = netdev_priv(dev);
208 struct phy_device *phydev = priv->phydev;
209 unsigned long ioaddr = dev->base_addr;
210 unsigned long flags;
211 int new_state = 0;
212 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
213
214 if (phydev == NULL)
215 return;
216
217 DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n",
218 phydev->addr, phydev->link);
219
220 spin_lock_irqsave(&priv->lock, flags);
221 if (phydev->link) {
222 u32 ctrl = readl(ioaddr + MAC_CTRL_REG);
223
224 /* Now we make sure that we can be in full duplex mode.
225 * If not, we operate in half-duplex mode. */
226 if (phydev->duplex != priv->oldduplex) {
227 new_state = 1;
228 if (!(phydev->duplex))
229 ctrl &= ~priv->mac_type->hw.link.duplex;
230 else
231 ctrl |= priv->mac_type->hw.link.duplex;
232 priv->oldduplex = phydev->duplex;
233 }
234 /* Flow Control operation */
235 if (phydev->pause)
236 priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex,
237 fc, pause_time);
238
239 if (phydev->speed != priv->speed) {
240 new_state = 1;
241 switch (phydev->speed) {
242 case 1000:
243 if (likely(priv->is_gmac))
244 ctrl &= ~priv->mac_type->hw.link.port;
245 break;
246 case 100:
247 case 10:
248 if (priv->is_gmac) {
249 ctrl |= priv->mac_type->hw.link.port;
250 if (phydev->speed == SPEED_100) {
251 ctrl |=
252 priv->mac_type->hw.link.
253 speed;
254 } else {
255 ctrl &=
256 ~(priv->mac_type->hw.
257 link.speed);
258 }
259 } else {
260 ctrl &= ~priv->mac_type->hw.link.port;
261 }
262 priv->fix_mac_speed(priv->bsp_priv,
263 phydev->speed);
264 break;
265 default:
266 if (netif_msg_link(priv))
267 pr_warning("%s: Speed (%d) is not 10"
268 " or 100!\n", dev->name, phydev->speed);
269 break;
270 }
271
272 priv->speed = phydev->speed;
273 }
274
275 writel(ctrl, ioaddr + MAC_CTRL_REG);
276
277 if (!priv->oldlink) {
278 new_state = 1;
279 priv->oldlink = 1;
280 }
281 } else if (priv->oldlink) {
282 new_state = 1;
283 priv->oldlink = 0;
284 priv->speed = 0;
285 priv->oldduplex = -1;
286 }
287
288 if (new_state && netif_msg_link(priv))
289 phy_print_status(phydev);
290
291 spin_unlock_irqrestore(&priv->lock, flags);
292
293 DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
294}
295
296/**
297 * stmmac_init_phy - PHY initialization
298 * @dev: net device structure
299 * Description: it initializes the driver's PHY state, and attaches the PHY
300 * to the mac driver.
301 * Return value:
302 * 0 on success
303 */
304static int stmmac_init_phy(struct net_device *dev)
305{
306 struct stmmac_priv *priv = netdev_priv(dev);
307 struct phy_device *phydev;
308 char phy_id[BUS_ID_SIZE]; /* PHY to connect */
309 char bus_id[BUS_ID_SIZE];
310
311 priv->oldlink = 0;
312 priv->speed = 0;
313 priv->oldduplex = -1;
314
315 if (priv->phy_addr == -1) {
316 /* We don't have a PHY, so do nothing */
317 return 0;
318 }
319
320 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
321 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, bus_id, priv->phy_addr);
322 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
323
324 phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
325 priv->phy_interface);
326
327 if (IS_ERR(phydev)) {
328 pr_err("%s: Could not attach to PHY\n", dev->name);
329 return PTR_ERR(phydev);
330 }
331
332 /*
333 * Broken HW is sometimes missing the pull-up resistor on the
334 * MDIO line, which results in reads to non-existent devices returning
335 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
336 * device as well.
337 * Note: phydev->phy_id is the result of reading the UID PHY registers.
338 */
339 if (phydev->phy_id == 0) {
340 phy_disconnect(phydev);
341 return -ENODEV;
342 }
343 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
344 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
345
346 priv->phydev = phydev;
347
348 return 0;
349}
350
351static inline void stmmac_mac_enable_rx(unsigned long ioaddr)
352{
353 u32 value = readl(ioaddr + MAC_CTRL_REG);
354 value |= MAC_RNABLE_RX;
355 /* Set the RE (receive enable bit into the MAC CTRL register). */
356 writel(value, ioaddr + MAC_CTRL_REG);
357}
358
359static inline void stmmac_mac_enable_tx(unsigned long ioaddr)
360{
361 u32 value = readl(ioaddr + MAC_CTRL_REG);
362 value |= MAC_ENABLE_TX;
363 /* Set the TE (transmit enable bit into the MAC CTRL register). */
364 writel(value, ioaddr + MAC_CTRL_REG);
365}
366
367static inline void stmmac_mac_disable_rx(unsigned long ioaddr)
368{
369 u32 value = readl(ioaddr + MAC_CTRL_REG);
370 value &= ~MAC_RNABLE_RX;
371 writel(value, ioaddr + MAC_CTRL_REG);
372}
373
374static inline void stmmac_mac_disable_tx(unsigned long ioaddr)
375{
376 u32 value = readl(ioaddr + MAC_CTRL_REG);
377 value &= ~MAC_ENABLE_TX;
378 writel(value, ioaddr + MAC_CTRL_REG);
379}
380
381/**
382 * display_ring
383 * @p: pointer to the ring.
384 * @size: size of the ring.
385 * Description: display all the descriptors within the ring.
386 */
387static void display_ring(struct dma_desc *p, int size)
388{
389 struct tmp_s {
390 u64 a;
391 unsigned int b;
392 unsigned int c;
393 };
394 int i;
395 for (i = 0; i < size; i++) {
396 struct tmp_s *x = (struct tmp_s *)(p + i);
397 pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
398 i, (unsigned int)virt_to_phys(&p[i]),
399 (unsigned int)(x->a), (unsigned int)((x->a) >> 32),
400 x->b, x->c);
401 pr_info("\n");
402 }
403}
404
405/**
406 * init_dma_desc_rings - init the RX/TX descriptor rings
407 * @dev: net device structure
408 * Description: this function initializes the DMA RX/TX descriptors
409 * and allocates the socket buffers.
410 */
411static void init_dma_desc_rings(struct net_device *dev)
412{
413 int i;
414 struct stmmac_priv *priv = netdev_priv(dev);
415 struct sk_buff *skb;
416 unsigned int txsize = priv->dma_tx_size;
417 unsigned int rxsize = priv->dma_rx_size;
418 unsigned int bfsize = priv->dma_buf_sz;
419 int buff2_needed = 0;
420 int dis_ic = 0;
421
422#ifdef CONFIG_STMMAC_TIMER
423 /* Using Timers disable interrupts on completion for the reception */
424 dis_ic = 1;
425#endif
426 /* Set the Buffer size according to the MTU;
427 * indeed, in case of jumbo we need to bump-up the buffer sizes.
428 */
429 if (unlikely(dev->mtu >= BUF_SIZE_8KiB))
430 bfsize = BUF_SIZE_16KiB;
431 else if (unlikely(dev->mtu >= BUF_SIZE_4KiB))
432 bfsize = BUF_SIZE_8KiB;
433 else if (unlikely(dev->mtu >= BUF_SIZE_2KiB))
434 bfsize = BUF_SIZE_4KiB;
435 else if (unlikely(dev->mtu >= DMA_BUFFER_SIZE))
436 bfsize = BUF_SIZE_2KiB;
437 else
438 bfsize = DMA_BUFFER_SIZE;
439
440 /* If the MTU exceeds 8k so use the second buffer in the chain */
441 if (bfsize >= BUF_SIZE_8KiB)
442 buff2_needed = 1;
443
444 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
445 txsize, rxsize, bfsize);
446
447 priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL);
448 priv->rx_skbuff =
449 kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL);
450 priv->dma_rx =
451 (struct dma_desc *)dma_alloc_coherent(priv->device,
452 rxsize *
453 sizeof(struct dma_desc),
454 &priv->dma_rx_phy,
455 GFP_KERNEL);
456 priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize,
457 GFP_KERNEL);
458 priv->dma_tx =
459 (struct dma_desc *)dma_alloc_coherent(priv->device,
460 txsize *
461 sizeof(struct dma_desc),
462 &priv->dma_tx_phy,
463 GFP_KERNEL);
464
465 if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) {
466 pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__);
467 return;
468 }
469
470 DBG(probe, INFO, "stmmac (%s) DMA desc rings: virt addr (Rx %p, "
471 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
472 dev->name, priv->dma_rx, priv->dma_tx,
473 (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
474
475 /* RX INITIALIZATION */
476 DBG(probe, INFO, "stmmac: SKB addresses:\n"
477 "skb\t\tskb data\tdma data\n");
478
479 for (i = 0; i < rxsize; i++) {
480 struct dma_desc *p = priv->dma_rx + i;
481
482 skb = netdev_alloc_skb_ip_align(dev, bfsize);
483 if (unlikely(skb == NULL)) {
484 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
485 break;
486 }
487 priv->rx_skbuff[i] = skb;
488 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
489 bfsize, DMA_FROM_DEVICE);
490
491 p->des2 = priv->rx_skbuff_dma[i];
492 if (unlikely(buff2_needed))
493 p->des3 = p->des2 + BUF_SIZE_8KiB;
494 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
495 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
496 }
497 priv->cur_rx = 0;
498 priv->dirty_rx = (unsigned int)(i - rxsize);
499 priv->dma_buf_sz = bfsize;
500 buf_sz = bfsize;
501
502 /* TX INITIALIZATION */
503 for (i = 0; i < txsize; i++) {
504 priv->tx_skbuff[i] = NULL;
505 priv->dma_tx[i].des2 = 0;
506 }
507 priv->dirty_tx = 0;
508 priv->cur_tx = 0;
509
510 /* Clear the Rx/Tx descriptors */
511 priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
512 priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize);
513
514 if (netif_msg_hw(priv)) {
515 pr_info("RX descriptor ring:\n");
516 display_ring(priv->dma_rx, rxsize);
517 pr_info("TX descriptor ring:\n");
518 display_ring(priv->dma_tx, txsize);
519 }
520 return;
521}
522
523static void dma_free_rx_skbufs(struct stmmac_priv *priv)
524{
525 int i;
526
527 for (i = 0; i < priv->dma_rx_size; i++) {
528 if (priv->rx_skbuff[i]) {
529 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
530 priv->dma_buf_sz, DMA_FROM_DEVICE);
531 dev_kfree_skb_any(priv->rx_skbuff[i]);
532 }
533 priv->rx_skbuff[i] = NULL;
534 }
535 return;
536}
537
538static void dma_free_tx_skbufs(struct stmmac_priv *priv)
539{
540 int i;
541
542 for (i = 0; i < priv->dma_tx_size; i++) {
543 if (priv->tx_skbuff[i] != NULL) {
544 struct dma_desc *p = priv->dma_tx + i;
545 if (p->des2)
546 dma_unmap_single(priv->device, p->des2,
547 priv->mac_type->ops->get_tx_len(p),
548 DMA_TO_DEVICE);
549 dev_kfree_skb_any(priv->tx_skbuff[i]);
550 priv->tx_skbuff[i] = NULL;
551 }
552 }
553 return;
554}
555
556static void free_dma_desc_resources(struct stmmac_priv *priv)
557{
558 /* Release the DMA TX/RX socket buffers */
559 dma_free_rx_skbufs(priv);
560 dma_free_tx_skbufs(priv);
561
562 /* Free the region of consistent memory previously allocated for
563 * the DMA */
564 dma_free_coherent(priv->device,
565 priv->dma_tx_size * sizeof(struct dma_desc),
566 priv->dma_tx, priv->dma_tx_phy);
567 dma_free_coherent(priv->device,
568 priv->dma_rx_size * sizeof(struct dma_desc),
569 priv->dma_rx, priv->dma_rx_phy);
570 kfree(priv->rx_skbuff_dma);
571 kfree(priv->rx_skbuff);
572 kfree(priv->tx_skbuff);
573
574 return;
575}
576
577/**
578 * stmmac_dma_start_tx
579 * @ioaddr: device I/O address
580 * Description: this function starts the DMA tx process.
581 */
582static void stmmac_dma_start_tx(unsigned long ioaddr)
583{
584 u32 value = readl(ioaddr + DMA_CONTROL);
585 value |= DMA_CONTROL_ST;
586 writel(value, ioaddr + DMA_CONTROL);
587 return;
588}
589
590static void stmmac_dma_stop_tx(unsigned long ioaddr)
591{
592 u32 value = readl(ioaddr + DMA_CONTROL);
593 value &= ~DMA_CONTROL_ST;
594 writel(value, ioaddr + DMA_CONTROL);
595 return;
596}
597
598/**
599 * stmmac_dma_start_rx
600 * @ioaddr: device I/O address
601 * Description: this function starts the DMA rx process.
602 */
603static void stmmac_dma_start_rx(unsigned long ioaddr)
604{
605 u32 value = readl(ioaddr + DMA_CONTROL);
606 value |= DMA_CONTROL_SR;
607 writel(value, ioaddr + DMA_CONTROL);
608
609 return;
610}
611
612static void stmmac_dma_stop_rx(unsigned long ioaddr)
613{
614 u32 value = readl(ioaddr + DMA_CONTROL);
615 value &= ~DMA_CONTROL_SR;
616 writel(value, ioaddr + DMA_CONTROL);
617
618 return;
619}
620
621/**
622 * stmmac_dma_operation_mode - HW DMA operation mode
623 * @priv : pointer to the private device structure.
624 * Description: it sets the DMA operation mode: tx/rx DMA thresholds
625 * or Store-And-Forward capability. It also verifies the COE for the
626 * transmission in case of Giga ETH.
627 */
628static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
629{
630 if (!priv->is_gmac) {
631 /* MAC 10/100 */
632 priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0);
633 priv->tx_coe = NO_HW_CSUM;
634 } else {
635 if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
636 priv->mac_type->ops->dma_mode(priv->dev->base_addr,
637 SF_DMA_MODE, SF_DMA_MODE);
638 tc = SF_DMA_MODE;
639 priv->tx_coe = HW_CSUM;
640 } else {
641 /* Checksum computation is performed in software. */
642 priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc,
643 SF_DMA_MODE);
644 priv->tx_coe = NO_HW_CSUM;
645 }
646 }
647 tx_coe = priv->tx_coe;
648
649 return;
650}
651
652#ifdef STMMAC_DEBUG
653/**
654 * show_tx_process_state
655 * @status: tx descriptor status field
656 * Description: it shows the Transmit Process State for CSR5[22:20]
657 */
658static void show_tx_process_state(unsigned int status)
659{
660 unsigned int state;
661 state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
662
663 switch (state) {
664 case 0:
665 pr_info("- TX (Stopped): Reset or Stop command\n");
666 break;
667 case 1:
668 pr_info("- TX (Running):Fetching the Tx desc\n");
669 break;
670 case 2:
671 pr_info("- TX (Running): Waiting for end of tx\n");
672 break;
673 case 3:
674 pr_info("- TX (Running): Reading the data "
675 "and queuing the data into the Tx buf\n");
676 break;
677 case 6:
678 pr_info("- TX (Suspended): Tx Buff Underflow "
679 "or an unavailable Transmit descriptor\n");
680 break;
681 case 7:
682 pr_info("- TX (Running): Closing Tx descriptor\n");
683 break;
684 default:
685 break;
686 }
687 return;
688}
689
690/**
691 * show_rx_process_state
692 * @status: rx descriptor status field
693 * Description: it shows the Receive Process State for CSR5[19:17]
694 */
695static void show_rx_process_state(unsigned int status)
696{
697 unsigned int state;
698 state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
699
700 switch (state) {
701 case 0:
702 pr_info("- RX (Stopped): Reset or Stop command\n");
703 break;
704 case 1:
705 pr_info("- RX (Running): Fetching the Rx desc\n");
706 break;
707 case 2:
708 pr_info("- RX (Running):Checking for end of pkt\n");
709 break;
710 case 3:
711 pr_info("- RX (Running): Waiting for Rx pkt\n");
712 break;
713 case 4:
714 pr_info("- RX (Suspended): Unavailable Rx buf\n");
715 break;
716 case 5:
717 pr_info("- RX (Running): Closing Rx descriptor\n");
718 break;
719 case 6:
720 pr_info("- RX(Running): Flushing the current frame"
721 " from the Rx buf\n");
722 break;
723 case 7:
724 pr_info("- RX (Running): Queuing the Rx frame"
725 " from the Rx buf into memory\n");
726 break;
727 default:
728 break;
729 }
730 return;
731}
732#endif
733
734/**
735 * stmmac_tx:
736 * @priv: private driver structure
737 * Description: it reclaims resources after transmission completes.
738 */
739static void stmmac_tx(struct stmmac_priv *priv)
740{
741 unsigned int txsize = priv->dma_tx_size;
742 unsigned long ioaddr = priv->dev->base_addr;
743
744 while (priv->dirty_tx != priv->cur_tx) {
745 int last;
746 unsigned int entry = priv->dirty_tx % txsize;
747 struct sk_buff *skb = priv->tx_skbuff[entry];
748 struct dma_desc *p = priv->dma_tx + entry;
749
750 /* Check if the descriptor is owned by the DMA. */
751 if (priv->mac_type->ops->get_tx_owner(p))
752 break;
753
754 /* Verify tx error by looking at the last segment */
755 last = priv->mac_type->ops->get_tx_ls(p);
756 if (likely(last)) {
757 int tx_error =
758 priv->mac_type->ops->tx_status(&priv->dev->stats,
759 &priv->xstats,
760 p, ioaddr);
761 if (likely(tx_error == 0)) {
762 priv->dev->stats.tx_packets++;
763 priv->xstats.tx_pkt_n++;
764 } else
765 priv->dev->stats.tx_errors++;
766 }
767 TX_DBG("%s: curr %d, dirty %d\n", __func__,
768 priv->cur_tx, priv->dirty_tx);
769
770 if (likely(p->des2))
771 dma_unmap_single(priv->device, p->des2,
772 priv->mac_type->ops->get_tx_len(p),
773 DMA_TO_DEVICE);
774 if (unlikely(p->des3))
775 p->des3 = 0;
776
777 if (likely(skb != NULL)) {
778 /*
779 * If there's room in the queue (limit it to size)
780 * we add this skb back into the pool,
781 * if it's the right size.
782 */
783 if ((skb_queue_len(&priv->rx_recycle) <
784 priv->dma_rx_size) &&
785 skb_recycle_check(skb, priv->dma_buf_sz))
786 __skb_queue_head(&priv->rx_recycle, skb);
787 else
788 dev_kfree_skb(skb);
789
790 priv->tx_skbuff[entry] = NULL;
791 }
792
793 priv->mac_type->ops->release_tx_desc(p);
794
795 entry = (++priv->dirty_tx) % txsize;
796 }
797 if (unlikely(netif_queue_stopped(priv->dev) &&
798 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
799 netif_tx_lock(priv->dev);
800 if (netif_queue_stopped(priv->dev) &&
801 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
802 TX_DBG("%s: restart transmit\n", __func__);
803 netif_wake_queue(priv->dev);
804 }
805 netif_tx_unlock(priv->dev);
806 }
807 return;
808}
809
810static inline void stmmac_enable_irq(struct stmmac_priv *priv)
811{
812#ifndef CONFIG_STMMAC_TIMER
813 writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA);
814#else
815 priv->tm->timer_start(tmrate);
816#endif
817}
818
819static inline void stmmac_disable_irq(struct stmmac_priv *priv)
820{
821#ifndef CONFIG_STMMAC_TIMER
822 writel(0, priv->dev->base_addr + DMA_INTR_ENA);
823#else
824 priv->tm->timer_stop();
825#endif
826}
827
828static int stmmac_has_work(struct stmmac_priv *priv)
829{
830 unsigned int has_work = 0;
831 int rxret, tx_work = 0;
832
833 rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx +
834 (priv->cur_rx % priv->dma_rx_size));
835
836 if (priv->dirty_tx != priv->cur_tx)
837 tx_work = 1;
838
839 if (likely(!rxret || tx_work))
840 has_work = 1;
841
842 return has_work;
843}
844
845static inline void _stmmac_schedule(struct stmmac_priv *priv)
846{
847 if (likely(stmmac_has_work(priv))) {
848 stmmac_disable_irq(priv);
849 napi_schedule(&priv->napi);
850 }
851}
852
853#ifdef CONFIG_STMMAC_TIMER
854void stmmac_schedule(struct net_device *dev)
855{
856 struct stmmac_priv *priv = netdev_priv(dev);
857
858 priv->xstats.sched_timer_n++;
859
860 _stmmac_schedule(priv);
861
862 return;
863}
864
865static void stmmac_no_timer_started(unsigned int x)
866{;
867};
868
869static void stmmac_no_timer_stopped(void)
870{;
871};
872#endif
873
874/**
875 * stmmac_tx_err:
876 * @priv: pointer to the private device structure
877 * Description: it cleans the descriptors and restarts the transmission
878 * in case of errors.
879 */
880static void stmmac_tx_err(struct stmmac_priv *priv)
881{
882 netif_stop_queue(priv->dev);
883
884 stmmac_dma_stop_tx(priv->dev->base_addr);
885 dma_free_tx_skbufs(priv);
886 priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
887 priv->dirty_tx = 0;
888 priv->cur_tx = 0;
889 stmmac_dma_start_tx(priv->dev->base_addr);
890
891 priv->dev->stats.tx_errors++;
892 netif_wake_queue(priv->dev);
893
894 return;
895}
896
897/**
898 * stmmac_dma_interrupt - Interrupt handler for the driver
899 * @dev: net device structure
900 * Description: Interrupt handler for the driver (DMA).
901 */
902static void stmmac_dma_interrupt(struct net_device *dev)
903{
904 unsigned long ioaddr = dev->base_addr;
905 struct stmmac_priv *priv = netdev_priv(dev);
906 /* read the status register (CSR5) */
907 u32 intr_status = readl(ioaddr + DMA_STATUS);
908
909 DBG(intr, INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
910
911#ifdef STMMAC_DEBUG
912 /* It displays the DMA transmit process state (CSR5 register) */
913 if (netif_msg_tx_done(priv))
914 show_tx_process_state(intr_status);
915 if (netif_msg_rx_status(priv))
916 show_rx_process_state(intr_status);
917#endif
918 /* ABNORMAL interrupts */
919 if (unlikely(intr_status & DMA_STATUS_AIS)) {
920 DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
921 if (unlikely(intr_status & DMA_STATUS_UNF)) {
922 DBG(intr, INFO, "transmit underflow\n");
923 if (unlikely(tc != SF_DMA_MODE)
924 && (tc <= 256)) {
925 /* Try to bump up the threshold */
926 tc += 64;
927 priv->mac_type->ops->dma_mode(ioaddr, tc,
928 SF_DMA_MODE);
929 priv->xstats.threshold = tc;
930 }
931 stmmac_tx_err(priv);
932 priv->xstats.tx_undeflow_irq++;
933 }
934 if (unlikely(intr_status & DMA_STATUS_TJT)) {
935 DBG(intr, INFO, "transmit jabber\n");
936 priv->xstats.tx_jabber_irq++;
937 }
938 if (unlikely(intr_status & DMA_STATUS_OVF)) {
939 DBG(intr, INFO, "recv overflow\n");
940 priv->xstats.rx_overflow_irq++;
941 }
942 if (unlikely(intr_status & DMA_STATUS_RU)) {
943 DBG(intr, INFO, "receive buffer unavailable\n");
944 priv->xstats.rx_buf_unav_irq++;
945 }
946 if (unlikely(intr_status & DMA_STATUS_RPS)) {
947 DBG(intr, INFO, "receive process stopped\n");
948 priv->xstats.rx_process_stopped_irq++;
949 }
950 if (unlikely(intr_status & DMA_STATUS_RWT)) {
951 DBG(intr, INFO, "receive watchdog\n");
952 priv->xstats.rx_watchdog_irq++;
953 }
954 if (unlikely(intr_status & DMA_STATUS_ETI)) {
955 DBG(intr, INFO, "transmit early interrupt\n");
956 priv->xstats.tx_early_irq++;
957 }
958 if (unlikely(intr_status & DMA_STATUS_TPS)) {
959 DBG(intr, INFO, "transmit process stopped\n");
960 priv->xstats.tx_process_stopped_irq++;
961 stmmac_tx_err(priv);
962 }
963 if (unlikely(intr_status & DMA_STATUS_FBI)) {
964 DBG(intr, INFO, "fatal bus error\n");
965 priv->xstats.fatal_bus_error_irq++;
966 stmmac_tx_err(priv);
967 }
968 }
969
970 /* TX/RX NORMAL interrupts */
971 if (intr_status & DMA_STATUS_NIS) {
972 priv->xstats.normal_irq_n++;
973 if (likely((intr_status & DMA_STATUS_RI) ||
974 (intr_status & (DMA_STATUS_TI))))
975 _stmmac_schedule(priv);
976 }
977
978 /* Optional hardware blocks, interrupts should be disabled */
979 if (unlikely(intr_status &
980 (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
981 pr_info("%s: unexpected status %08x\n", __func__, intr_status);
982
983 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
984 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
985
986 DBG(intr, INFO, "\n\n");
987
988 return;
989}
990
991/**
992 * stmmac_open - open entry point of the driver
993 * @dev : pointer to the device structure.
994 * Description:
995 * This function is the open entry point of the driver.
996 * Return value:
997 * 0 on success and an appropriate (-)ve integer as defined in errno.h
998 * file on failure.
999 */
1000static int stmmac_open(struct net_device *dev)
1001{
1002 struct stmmac_priv *priv = netdev_priv(dev);
1003 unsigned long ioaddr = dev->base_addr;
1004 int ret;
1005
1006 /* Check that the MAC address is valid. If its not, refuse
1007 * to bring the device up. The user must specify an
1008 * address using the following linux command:
1009 * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
1010 if (!is_valid_ether_addr(dev->dev_addr)) {
1011 random_ether_addr(dev->dev_addr);
1012 pr_warning("%s: generated random MAC address %pM\n", dev->name,
1013 dev->dev_addr);
1014 }
1015
1016 stmmac_verify_args();
1017
1018 ret = stmmac_init_phy(dev);
1019 if (unlikely(ret)) {
1020 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
1021 return ret;
1022 }
1023
1024 /* Request the IRQ lines */
1025 ret = request_irq(dev->irq, &stmmac_interrupt,
1026 IRQF_SHARED, dev->name, dev);
1027 if (unlikely(ret < 0)) {
1028 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
1029 __func__, dev->irq, ret);
1030 return ret;
1031 }
1032
1033#ifdef CONFIG_STMMAC_TIMER
1034 priv->tm = kmalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
1035 if (unlikely(priv->tm == NULL)) {
1036 pr_err("%s: ERROR: timer memory alloc failed \n", __func__);
1037 return -ENOMEM;
1038 }
1039 priv->tm->freq = tmrate;
1040
1041 /* Test if the HW timer can be actually used.
1042 * In case of failure continue with no timer. */
1043 if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) {
1044 pr_warning("stmmaceth: cannot attach the HW timer\n");
1045 tmrate = 0;
1046 priv->tm->freq = 0;
1047 priv->tm->timer_start = stmmac_no_timer_started;
1048 priv->tm->timer_stop = stmmac_no_timer_stopped;
1049 }
1050#endif
1051
1052 /* Create and initialize the TX/RX descriptors chains. */
1053 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
1054 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
1055 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1056 init_dma_desc_rings(dev);
1057
1058 /* DMA initialization and SW reset */
1059 if (unlikely(priv->mac_type->ops->dma_init(ioaddr,
1060 priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) {
1061
1062 pr_err("%s: DMA initialization failed\n", __func__);
1063 return -1;
1064 }
1065
1066 /* Copy the MAC addr into the HW */
1067 priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0);
1068 /* Initialize the MAC Core */
1069 priv->mac_type->ops->core_init(ioaddr);
1070
1071 priv->shutdown = 0;
1072
1073 /* Initialise the MMC (if present) to disable all interrupts. */
1074 writel(0xffffffff, ioaddr + MMC_HIGH_INTR_MASK);
1075 writel(0xffffffff, ioaddr + MMC_LOW_INTR_MASK);
1076
1077 /* Enable the MAC Rx/Tx */
1078 stmmac_mac_enable_rx(ioaddr);
1079 stmmac_mac_enable_tx(ioaddr);
1080
1081 /* Set the HW DMA mode and the COE */
1082 stmmac_dma_operation_mode(priv);
1083
1084 /* Extra statistics */
1085 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1086 priv->xstats.threshold = tc;
1087
1088 /* Start the ball rolling... */
1089 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
1090 stmmac_dma_start_tx(ioaddr);
1091 stmmac_dma_start_rx(ioaddr);
1092
1093#ifdef CONFIG_STMMAC_TIMER
1094 priv->tm->timer_start(tmrate);
1095#endif
1096 /* Dump DMA/MAC registers */
1097 if (netif_msg_hw(priv)) {
1098 priv->mac_type->ops->dump_mac_regs(ioaddr);
1099 priv->mac_type->ops->dump_dma_regs(ioaddr);
1100 }
1101
1102 if (priv->phydev)
1103 phy_start(priv->phydev);
1104
1105 napi_enable(&priv->napi);
1106 skb_queue_head_init(&priv->rx_recycle);
1107 netif_start_queue(dev);
1108 return 0;
1109}
1110
1111/**
1112 * stmmac_release - close entry point of the driver
1113 * @dev : device pointer.
1114 * Description:
1115 * This is the stop entry point of the driver.
1116 */
1117static int stmmac_release(struct net_device *dev)
1118{
1119 struct stmmac_priv *priv = netdev_priv(dev);
1120
1121 /* Stop and disconnect the PHY */
1122 if (priv->phydev) {
1123 phy_stop(priv->phydev);
1124 phy_disconnect(priv->phydev);
1125 priv->phydev = NULL;
1126 }
1127
1128 netif_stop_queue(dev);
1129
1130#ifdef CONFIG_STMMAC_TIMER
1131 /* Stop and release the timer */
1132 stmmac_close_ext_timer();
1133 if (priv->tm != NULL)
1134 kfree(priv->tm);
1135#endif
1136 napi_disable(&priv->napi);
1137 skb_queue_purge(&priv->rx_recycle);
1138
1139 /* Free the IRQ lines */
1140 free_irq(dev->irq, dev);
1141
1142 /* Stop TX/RX DMA and clear the descriptors */
1143 stmmac_dma_stop_tx(dev->base_addr);
1144 stmmac_dma_stop_rx(dev->base_addr);
1145
1146 /* Release and free the Rx/Tx resources */
1147 free_dma_desc_resources(priv);
1148
1149 /* Disable the MAC core */
1150 stmmac_mac_disable_tx(dev->base_addr);
1151 stmmac_mac_disable_rx(dev->base_addr);
1152
1153 netif_carrier_off(dev);
1154
1155 return 0;
1156}
1157
1158/*
1159 * To perform emulated hardware segmentation on skb.
1160 */
1161static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb)
1162{
1163 struct sk_buff *segs, *curr_skb;
1164 int gso_segs = skb_shinfo(skb)->gso_segs;
1165
1166 /* Estimate the number of fragments in the worst case */
1167 if (unlikely(stmmac_tx_avail(priv) < gso_segs)) {
1168 netif_stop_queue(priv->dev);
1169 TX_DBG(KERN_ERR "%s: TSO BUG! Tx Ring full when queue awake\n",
1170 __func__);
1171 if (stmmac_tx_avail(priv) < gso_segs)
1172 return NETDEV_TX_BUSY;
1173
1174 netif_wake_queue(priv->dev);
1175 }
1176 TX_DBG("\tstmmac_sw_tso: segmenting: skb %p (len %d)\n",
1177 skb, skb->len);
1178
1179 segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
1180 if (unlikely(IS_ERR(segs)))
1181 goto sw_tso_end;
1182
1183 do {
1184 curr_skb = segs;
1185 segs = segs->next;
1186 TX_DBG("\t\tcurrent skb->len: %d, *curr %p,"
1187 "*next %p\n", curr_skb->len, curr_skb, segs);
1188 curr_skb->next = NULL;
1189 stmmac_xmit(curr_skb, priv->dev);
1190 } while (segs);
1191
1192sw_tso_end:
1193 dev_kfree_skb(skb);
1194
1195 return NETDEV_TX_OK;
1196}
1197
1198static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
1199 struct net_device *dev,
1200 int csum_insertion)
1201{
1202 struct stmmac_priv *priv = netdev_priv(dev);
1203 unsigned int nopaged_len = skb_headlen(skb);
1204 unsigned int txsize = priv->dma_tx_size;
1205 unsigned int entry = priv->cur_tx % txsize;
1206 struct dma_desc *desc = priv->dma_tx + entry;
1207
1208 if (nopaged_len > BUF_SIZE_8KiB) {
1209
1210 int buf2_size = nopaged_len - BUF_SIZE_8KiB;
1211
1212 desc->des2 = dma_map_single(priv->device, skb->data,
1213 BUF_SIZE_8KiB, DMA_TO_DEVICE);
1214 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1215 priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
1216 csum_insertion);
1217
1218 entry = (++priv->cur_tx) % txsize;
1219 desc = priv->dma_tx + entry;
1220
1221 desc->des2 = dma_map_single(priv->device,
1222 skb->data + BUF_SIZE_8KiB,
1223 buf2_size, DMA_TO_DEVICE);
1224 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1225 priv->mac_type->ops->prepare_tx_desc(desc, 0,
1226 buf2_size, csum_insertion);
1227 priv->mac_type->ops->set_tx_owner(desc);
1228 priv->tx_skbuff[entry] = NULL;
1229 } else {
1230 desc->des2 = dma_map_single(priv->device, skb->data,
1231 nopaged_len, DMA_TO_DEVICE);
1232 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1233 priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
1234 csum_insertion);
1235 }
1236 return entry;
1237}
1238
1239/**
1240 * stmmac_xmit:
1241 * @skb : the socket buffer
1242 * @dev : device pointer
1243 * Description : Tx entry point of the driver.
1244 */
1245static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1246{
1247 struct stmmac_priv *priv = netdev_priv(dev);
1248 unsigned int txsize = priv->dma_tx_size;
1249 unsigned int entry;
1250 int i, csum_insertion = 0;
1251 int nfrags = skb_shinfo(skb)->nr_frags;
1252 struct dma_desc *desc, *first;
1253
1254 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
1255 if (!netif_queue_stopped(dev)) {
1256 netif_stop_queue(dev);
1257 /* This is a hard error, log it. */
1258 pr_err("%s: BUG! Tx Ring full when queue awake\n",
1259 __func__);
1260 }
1261 return NETDEV_TX_BUSY;
1262 }
1263
1264 entry = priv->cur_tx % txsize;
1265
1266#ifdef STMMAC_XMIT_DEBUG
1267 if ((skb->len > ETH_FRAME_LEN) || nfrags)
1268 pr_info("stmmac xmit:\n"
1269 "\tskb addr %p - len: %d - nopaged_len: %d\n"
1270 "\tn_frags: %d - ip_summed: %d - %s gso\n",
1271 skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed,
1272 !skb_is_gso(skb) ? "isn't" : "is");
1273#endif
1274
1275 if (unlikely(skb_is_gso(skb)))
1276 return stmmac_sw_tso(priv, skb);
1277
1278 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
1279 if (likely(priv->tx_coe == NO_HW_CSUM))
1280 skb_checksum_help(skb);
1281 else
1282 csum_insertion = 1;
1283 }
1284
1285 desc = priv->dma_tx + entry;
1286 first = desc;
1287
1288#ifdef STMMAC_XMIT_DEBUG
1289 if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
1290 pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n"
1291 "\t\tn_frags: %d, ip_summed: %d\n",
1292 skb->len, skb_headlen(skb), nfrags, skb->ip_summed);
1293#endif
1294 priv->tx_skbuff[entry] = skb;
1295 if (unlikely(skb->len >= BUF_SIZE_4KiB)) {
1296 entry = stmmac_handle_jumbo_frames(skb, dev, csum_insertion);
1297 desc = priv->dma_tx + entry;
1298 } else {
1299 unsigned int nopaged_len = skb_headlen(skb);
1300 desc->des2 = dma_map_single(priv->device, skb->data,
1301 nopaged_len, DMA_TO_DEVICE);
1302 priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
1303 csum_insertion);
1304 }
1305
1306 for (i = 0; i < nfrags; i++) {
1307 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1308 int len = frag->size;
1309
1310 entry = (++priv->cur_tx) % txsize;
1311 desc = priv->dma_tx + entry;
1312
1313 TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
1314 desc->des2 = dma_map_page(priv->device, frag->page,
1315 frag->page_offset,
1316 len, DMA_TO_DEVICE);
1317 priv->tx_skbuff[entry] = NULL;
1318 priv->mac_type->ops->prepare_tx_desc(desc, 0, len,
1319 csum_insertion);
1320 priv->mac_type->ops->set_tx_owner(desc);
1321 }
1322
1323 /* Interrupt on completition only for the latest segment */
1324 priv->mac_type->ops->close_tx_desc(desc);
1325#ifdef CONFIG_STMMAC_TIMER
1326 /* Clean IC while using timers */
1327 priv->mac_type->ops->clear_tx_ic(desc);
1328#endif
1329 /* To avoid raise condition */
1330 priv->mac_type->ops->set_tx_owner(first);
1331
1332 priv->cur_tx++;
1333
1334#ifdef STMMAC_XMIT_DEBUG
1335 if (netif_msg_pktdata(priv)) {
1336 pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, "
1337 "first=%p, nfrags=%d\n",
1338 (priv->cur_tx % txsize), (priv->dirty_tx % txsize),
1339 entry, first, nfrags);
1340 display_ring(priv->dma_tx, txsize);
1341 pr_info(">>> frame to be transmitted: ");
1342 print_pkt(skb->data, skb->len);
1343 }
1344#endif
1345 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
1346 TX_DBG("%s: stop transmitted packets\n", __func__);
1347 netif_stop_queue(dev);
1348 }
1349
1350 dev->stats.tx_bytes += skb->len;
1351
1352 /* CSR1 enables the transmit DMA to check for new descriptor */
1353 writel(1, dev->base_addr + DMA_XMT_POLL_DEMAND);
1354
1355 return NETDEV_TX_OK;
1356}
1357
1358static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1359{
1360 unsigned int rxsize = priv->dma_rx_size;
1361 int bfsize = priv->dma_buf_sz;
1362 struct dma_desc *p = priv->dma_rx;
1363
1364 for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
1365 unsigned int entry = priv->dirty_rx % rxsize;
1366 if (likely(priv->rx_skbuff[entry] == NULL)) {
1367 struct sk_buff *skb;
1368
1369 skb = __skb_dequeue(&priv->rx_recycle);
1370 if (skb == NULL)
1371 skb = netdev_alloc_skb_ip_align(priv->dev,
1372 bfsize);
1373
1374 if (unlikely(skb == NULL))
1375 break;
1376
1377 priv->rx_skbuff[entry] = skb;
1378 priv->rx_skbuff_dma[entry] =
1379 dma_map_single(priv->device, skb->data, bfsize,
1380 DMA_FROM_DEVICE);
1381
1382 (p + entry)->des2 = priv->rx_skbuff_dma[entry];
1383 if (unlikely(priv->is_gmac)) {
1384 if (bfsize >= BUF_SIZE_8KiB)
1385 (p + entry)->des3 =
1386 (p + entry)->des2 + BUF_SIZE_8KiB;
1387 }
1388 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
1389 }
1390 priv->mac_type->ops->set_rx_owner(p + entry);
1391 }
1392 return;
1393}
1394
1395static int stmmac_rx(struct stmmac_priv *priv, int limit)
1396{
1397 unsigned int rxsize = priv->dma_rx_size;
1398 unsigned int entry = priv->cur_rx % rxsize;
1399 unsigned int next_entry;
1400 unsigned int count = 0;
1401 struct dma_desc *p = priv->dma_rx + entry;
1402 struct dma_desc *p_next;
1403
1404#ifdef STMMAC_RX_DEBUG
1405 if (netif_msg_hw(priv)) {
1406 pr_debug(">>> stmmac_rx: descriptor ring:\n");
1407 display_ring(priv->dma_rx, rxsize);
1408 }
1409#endif
1410 count = 0;
1411 while (!priv->mac_type->ops->get_rx_owner(p)) {
1412 int status;
1413
1414 if (count >= limit)
1415 break;
1416
1417 count++;
1418
1419 next_entry = (++priv->cur_rx) % rxsize;
1420 p_next = priv->dma_rx + next_entry;
1421 prefetch(p_next);
1422
1423 /* read the status of the incoming frame */
1424 status = (priv->mac_type->ops->rx_status(&priv->dev->stats,
1425 &priv->xstats, p));
1426 if (unlikely(status == discard_frame))
1427 priv->dev->stats.rx_errors++;
1428 else {
1429 struct sk_buff *skb;
1430 /* Length should omit the CRC */
1431 int frame_len =
1432 priv->mac_type->ops->get_rx_frame_len(p) - 4;
1433
1434#ifdef STMMAC_RX_DEBUG
1435 if (frame_len > ETH_FRAME_LEN)
1436 pr_debug("\tRX frame size %d, COE status: %d\n",
1437 frame_len, status);
1438
1439 if (netif_msg_hw(priv))
1440 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
1441 p, entry, p->des2);
1442#endif
1443 skb = priv->rx_skbuff[entry];
1444 if (unlikely(!skb)) {
1445 pr_err("%s: Inconsistent Rx descriptor chain\n",
1446 priv->dev->name);
1447 priv->dev->stats.rx_dropped++;
1448 break;
1449 }
1450 prefetch(skb->data - NET_IP_ALIGN);
1451 priv->rx_skbuff[entry] = NULL;
1452
1453 skb_put(skb, frame_len);
1454 dma_unmap_single(priv->device,
1455 priv->rx_skbuff_dma[entry],
1456 priv->dma_buf_sz, DMA_FROM_DEVICE);
1457#ifdef STMMAC_RX_DEBUG
1458 if (netif_msg_pktdata(priv)) {
1459 pr_info(" frame received (%dbytes)", frame_len);
1460 print_pkt(skb->data, frame_len);
1461 }
1462#endif
1463 skb->protocol = eth_type_trans(skb, priv->dev);
1464
1465 if (unlikely(status == csum_none)) {
1466 /* always for the old mac 10/100 */
1467 skb->ip_summed = CHECKSUM_NONE;
1468 netif_receive_skb(skb);
1469 } else {
1470 skb->ip_summed = CHECKSUM_UNNECESSARY;
1471 napi_gro_receive(&priv->napi, skb);
1472 }
1473
1474 priv->dev->stats.rx_packets++;
1475 priv->dev->stats.rx_bytes += frame_len;
1476 priv->dev->last_rx = jiffies;
1477 }
1478 entry = next_entry;
1479 p = p_next; /* use prefetched values */
1480 }
1481
1482 stmmac_rx_refill(priv);
1483
1484 priv->xstats.rx_pkt_n += count;
1485
1486 return count;
1487}
1488
1489/**
1490 * stmmac_poll - stmmac poll method (NAPI)
1491 * @napi : pointer to the napi structure.
1492 * @budget : maximum number of packets that the current CPU can receive from
1493 * all interfaces.
1494 * Description :
1495 * This function implements the the reception process.
1496 * Also it runs the TX completion thread
1497 */
1498static int stmmac_poll(struct napi_struct *napi, int budget)
1499{
1500 struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
1501 int work_done = 0;
1502
1503 priv->xstats.poll_n++;
1504 stmmac_tx(priv);
1505 work_done = stmmac_rx(priv, budget);
1506
1507 if (work_done < budget) {
1508 napi_complete(napi);
1509 stmmac_enable_irq(priv);
1510 }
1511 return work_done;
1512}
1513
1514/**
1515 * stmmac_tx_timeout
1516 * @dev : Pointer to net device structure
1517 * Description: this function is called when a packet transmission fails to
1518 * complete within a reasonable tmrate. The driver will mark the error in the
1519 * netdev structure and arrange for the device to be reset to a sane state
1520 * in order to transmit a new packet.
1521 */
1522static void stmmac_tx_timeout(struct net_device *dev)
1523{
1524 struct stmmac_priv *priv = netdev_priv(dev);
1525
1526 /* Clear Tx resources and restart transmitting again */
1527 stmmac_tx_err(priv);
1528 return;
1529}
1530
1531/* Configuration changes (passed on by ifconfig) */
1532static int stmmac_config(struct net_device *dev, struct ifmap *map)
1533{
1534 if (dev->flags & IFF_UP) /* can't act on a running interface */
1535 return -EBUSY;
1536
1537 /* Don't allow changing the I/O address */
1538 if (map->base_addr != dev->base_addr) {
1539 pr_warning("%s: can't change I/O address\n", dev->name);
1540 return -EOPNOTSUPP;
1541 }
1542
1543 /* Don't allow changing the IRQ */
1544 if (map->irq != dev->irq) {
1545 pr_warning("%s: can't change IRQ number %d\n",
1546 dev->name, dev->irq);
1547 return -EOPNOTSUPP;
1548 }
1549
1550 /* ignore other fields */
1551 return 0;
1552}
1553
1554/**
1555 * stmmac_multicast_list - entry point for multicast addressing
1556 * @dev : pointer to the device structure
1557 * Description:
1558 * This function is a driver entry point which gets called by the kernel
1559 * whenever multicast addresses must be enabled/disabled.
1560 * Return value:
1561 * void.
1562 */
1563static void stmmac_multicast_list(struct net_device *dev)
1564{
1565 struct stmmac_priv *priv = netdev_priv(dev);
1566
1567 spin_lock(&priv->lock);
1568 priv->mac_type->ops->set_filter(dev);
1569 spin_unlock(&priv->lock);
1570 return;
1571}
1572
1573/**
1574 * stmmac_change_mtu - entry point to change MTU size for the device.
1575 * @dev : device pointer.
1576 * @new_mtu : the new MTU size for the device.
1577 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
1578 * to drive packet transmission. Ethernet has an MTU of 1500 octets
1579 * (ETH_DATA_LEN). This value can be changed with ifconfig.
1580 * Return value:
1581 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1582 * file on failure.
1583 */
1584static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1585{
1586 struct stmmac_priv *priv = netdev_priv(dev);
1587 int max_mtu;
1588
1589 if (netif_running(dev)) {
1590 pr_err("%s: must be stopped to change its MTU\n", dev->name);
1591 return -EBUSY;
1592 }
1593
1594 if (priv->is_gmac)
1595 max_mtu = JUMBO_LEN;
1596 else
1597 max_mtu = ETH_DATA_LEN;
1598
1599 if ((new_mtu < 46) || (new_mtu > max_mtu)) {
1600 pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
1601 return -EINVAL;
1602 }
1603
1604 dev->mtu = new_mtu;
1605
1606 return 0;
1607}
1608
1609static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1610{
1611 struct net_device *dev = (struct net_device *)dev_id;
1612 struct stmmac_priv *priv = netdev_priv(dev);
1613
1614 if (unlikely(!dev)) {
1615 pr_err("%s: invalid dev pointer\n", __func__);
1616 return IRQ_NONE;
1617 }
1618
1619 if (priv->is_gmac) {
1620 unsigned long ioaddr = dev->base_addr;
1621 /* To handle GMAC own interrupts */
1622 priv->mac_type->ops->host_irq_status(ioaddr);
1623 }
1624 stmmac_dma_interrupt(dev);
1625
1626 return IRQ_HANDLED;
1627}
1628
1629#ifdef CONFIG_NET_POLL_CONTROLLER
1630/* Polling receive - used by NETCONSOLE and other diagnostic tools
1631 * to allow network I/O with interrupts disabled. */
1632static void stmmac_poll_controller(struct net_device *dev)
1633{
1634 disable_irq(dev->irq);
1635 stmmac_interrupt(dev->irq, dev);
1636 enable_irq(dev->irq);
1637}
1638#endif
1639
1640/**
1641 * stmmac_ioctl - Entry point for the Ioctl
1642 * @dev: Device pointer.
1643 * @rq: An IOCTL specefic structure, that can contain a pointer to
1644 * a proprietary structure used to pass information to the driver.
1645 * @cmd: IOCTL command
1646 * Description:
1647 * Currently there are no special functionality supported in IOCTL, just the
1648 * phy_mii_ioctl(...) can be invoked.
1649 */
1650static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1651{
1652 struct stmmac_priv *priv = netdev_priv(dev);
1653 int ret = -EOPNOTSUPP;
1654
1655 if (!netif_running(dev))
1656 return -EINVAL;
1657
1658 switch (cmd) {
1659 case SIOCGMIIPHY:
1660 case SIOCGMIIREG:
1661 case SIOCSMIIREG:
1662 if (!priv->phydev)
1663 return -EINVAL;
1664
1665 spin_lock(&priv->lock);
1666 ret = phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
1667 spin_unlock(&priv->lock);
1668 default:
1669 break;
1670 }
1671 return ret;
1672}
1673
1674#ifdef STMMAC_VLAN_TAG_USED
1675static void stmmac_vlan_rx_register(struct net_device *dev,
1676 struct vlan_group *grp)
1677{
1678 struct stmmac_priv *priv = netdev_priv(dev);
1679
1680 DBG(probe, INFO, "%s: Setting vlgrp to %p\n", dev->name, grp);
1681
1682 spin_lock(&priv->lock);
1683 priv->vlgrp = grp;
1684 spin_unlock(&priv->lock);
1685
1686 return;
1687}
1688#endif
1689
1690static const struct net_device_ops stmmac_netdev_ops = {
1691 .ndo_open = stmmac_open,
1692 .ndo_start_xmit = stmmac_xmit,
1693 .ndo_stop = stmmac_release,
1694 .ndo_change_mtu = stmmac_change_mtu,
1695 .ndo_set_multicast_list = stmmac_multicast_list,
1696 .ndo_tx_timeout = stmmac_tx_timeout,
1697 .ndo_do_ioctl = stmmac_ioctl,
1698 .ndo_set_config = stmmac_config,
1699#ifdef STMMAC_VLAN_TAG_USED
1700 .ndo_vlan_rx_register = stmmac_vlan_rx_register,
1701#endif
1702#ifdef CONFIG_NET_POLL_CONTROLLER
1703 .ndo_poll_controller = stmmac_poll_controller,
1704#endif
1705 .ndo_set_mac_address = eth_mac_addr,
1706};
1707
1708/**
1709 * stmmac_probe - Initialization of the adapter .
1710 * @dev : device pointer
1711 * Description: The function initializes the network device structure for
1712 * the STMMAC driver. It also calls the low level routines
1713 * in order to init the HW (i.e. the DMA engine)
1714 */
1715static int stmmac_probe(struct net_device *dev)
1716{
1717 int ret = 0;
1718 struct stmmac_priv *priv = netdev_priv(dev);
1719
1720 ether_setup(dev);
1721
1722 dev->netdev_ops = &stmmac_netdev_ops;
1723 stmmac_set_ethtool_ops(dev);
1724
1725 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA);
1726 dev->watchdog_timeo = msecs_to_jiffies(watchdog);
1727#ifdef STMMAC_VLAN_TAG_USED
1728 /* Both mac100 and gmac support receive VLAN tag detection */
1729 dev->features |= NETIF_F_HW_VLAN_RX;
1730#endif
1731 priv->msg_enable = netif_msg_init(debug, default_msg_level);
1732
1733 if (priv->is_gmac)
1734 priv->rx_csum = 1;
1735
1736 if (flow_ctrl)
1737 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
1738
1739 priv->pause = pause;
1740 netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
1741
1742 /* Get the MAC address */
1743 priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
1744
1745 if (!is_valid_ether_addr(dev->dev_addr))
1746 pr_warning("\tno valid MAC address;"
1747 "please, use ifconfig or nwhwconfig!\n");
1748
1749 ret = register_netdev(dev);
1750 if (ret) {
1751 pr_err("%s: ERROR %i registering the device\n",
1752 __func__, ret);
1753 return -ENODEV;
1754 }
1755
1756 DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
1757 dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
1758 (dev->features & NETIF_F_HW_CSUM) ? "on" : "off");
1759
1760 spin_lock_init(&priv->lock);
1761
1762 return ret;
1763}
1764
1765/**
1766 * stmmac_mac_device_setup
1767 * @dev : device pointer
1768 * Description: select and initialise the mac device (mac100 or Gmac).
1769 */
1770static int stmmac_mac_device_setup(struct net_device *dev)
1771{
1772 struct stmmac_priv *priv = netdev_priv(dev);
1773 unsigned long ioaddr = dev->base_addr;
1774
1775 struct mac_device_info *device;
1776
1777 if (priv->is_gmac)
1778 device = gmac_setup(ioaddr);
1779 else
1780 device = mac100_setup(ioaddr);
1781
1782 if (!device)
1783 return -ENOMEM;
1784
1785 priv->mac_type = device;
1786
1787 priv->wolenabled = priv->mac_type->hw.pmt; /* PMT supported */
1788 if (priv->wolenabled == PMT_SUPPORTED)
1789 priv->wolopts = WAKE_MAGIC; /* Magic Frame */
1790
1791 return 0;
1792}
1793
1794static int stmmacphy_dvr_probe(struct platform_device *pdev)
1795{
1796 struct plat_stmmacphy_data *plat_dat;
1797 plat_dat = (struct plat_stmmacphy_data *)((pdev->dev).platform_data);
1798
1799 pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n",
1800 plat_dat->bus_id);
1801
1802 return 0;
1803}
1804
1805static int stmmacphy_dvr_remove(struct platform_device *pdev)
1806{
1807 return 0;
1808}
1809
1810static struct platform_driver stmmacphy_driver = {
1811 .driver = {
1812 .name = PHY_RESOURCE_NAME,
1813 },
1814 .probe = stmmacphy_dvr_probe,
1815 .remove = stmmacphy_dvr_remove,
1816};
1817
1818/**
1819 * stmmac_associate_phy
1820 * @dev: pointer to device structure
1821 * @data: points to the private structure.
1822 * Description: Scans through all the PHYs we have registered and checks if
1823 * any are associated with our MAC. If so, then just fill in
1824 * the blanks in our local context structure
1825 */
1826static int stmmac_associate_phy(struct device *dev, void *data)
1827{
1828 struct stmmac_priv *priv = (struct stmmac_priv *)data;
1829 struct plat_stmmacphy_data *plat_dat;
1830
1831 plat_dat = (struct plat_stmmacphy_data *)(dev->platform_data);
1832
1833 DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__,
1834 plat_dat->bus_id);
1835
1836 /* Check that this phy is for the MAC being initialised */
1837 if (priv->bus_id != plat_dat->bus_id)
1838 return 0;
1839
1840 /* OK, this PHY is connected to the MAC.
1841 Go ahead and get the parameters */
1842 DBG(probe, DEBUG, "%s: OK. Found PHY config\n", __func__);
1843 priv->phy_irq =
1844 platform_get_irq_byname(to_platform_device(dev), "phyirq");
1845 DBG(probe, DEBUG, "%s: PHY irq on bus %d is %d\n", __func__,
1846 plat_dat->bus_id, priv->phy_irq);
1847
1848 /* Override with kernel parameters if supplied XXX CRS XXX
1849 * this needs to have multiple instances */
1850 if ((phyaddr >= 0) && (phyaddr <= 31))
1851 plat_dat->phy_addr = phyaddr;
1852
1853 priv->phy_addr = plat_dat->phy_addr;
1854 priv->phy_mask = plat_dat->phy_mask;
1855 priv->phy_interface = plat_dat->interface;
1856 priv->phy_reset = plat_dat->phy_reset;
1857
1858 DBG(probe, DEBUG, "%s: exiting\n", __func__);
1859 return 1; /* forces exit of driver_for_each_device() */
1860}
1861
1862/**
1863 * stmmac_dvr_probe
1864 * @pdev: platform device pointer
1865 * Description: the driver is initialized through platform_device.
1866 */
1867static int stmmac_dvr_probe(struct platform_device *pdev)
1868{
1869 int ret = 0;
1870 struct resource *res;
1871 unsigned int *addr = NULL;
1872 struct net_device *ndev = NULL;
1873 struct stmmac_priv *priv;
1874 struct plat_stmmacenet_data *plat_dat;
1875
1876 pr_info("STMMAC driver:\n\tplatform registration... ");
1877 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1878 if (!res) {
1879 ret = -ENODEV;
1880 goto out;
1881 }
1882 pr_info("done!\n");
1883
1884 if (!request_mem_region(res->start, (res->end - res->start),
1885 pdev->name)) {
1886 pr_err("%s: ERROR: memory allocation failed"
1887 "cannot get the I/O addr 0x%x\n",
1888 __func__, (unsigned int)res->start);
1889 ret = -EBUSY;
1890 goto out;
1891 }
1892
1893 addr = ioremap(res->start, (res->end - res->start));
1894 if (!addr) {
1895 pr_err("%s: ERROR: memory mapping failed \n", __func__);
1896 ret = -ENOMEM;
1897 goto out;
1898 }
1899
1900 ndev = alloc_etherdev(sizeof(struct stmmac_priv));
1901 if (!ndev) {
1902 pr_err("%s: ERROR: allocating the device\n", __func__);
1903 ret = -ENOMEM;
1904 goto out;
1905 }
1906
1907 SET_NETDEV_DEV(ndev, &pdev->dev);
1908
1909 /* Get the MAC information */
1910 ndev->irq = platform_get_irq_byname(pdev, "macirq");
1911 if (ndev->irq == -ENXIO) {
1912 pr_err("%s: ERROR: MAC IRQ configuration "
1913 "information not found\n", __func__);
1914 ret = -ENODEV;
1915 goto out;
1916 }
1917
1918 priv = netdev_priv(ndev);
1919 priv->device = &(pdev->dev);
1920 priv->dev = ndev;
1921 plat_dat = (struct plat_stmmacenet_data *)((pdev->dev).platform_data);
1922 priv->bus_id = plat_dat->bus_id;
1923 priv->pbl = plat_dat->pbl; /* TLI */
1924 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
1925
1926 platform_set_drvdata(pdev, ndev);
1927
1928 /* Set the I/O base addr */
1929 ndev->base_addr = (unsigned long)addr;
1930
1931 /* MAC HW revice detection */
1932 ret = stmmac_mac_device_setup(ndev);
1933 if (ret < 0)
1934 goto out;
1935
1936 /* Network Device Registration */
1937 ret = stmmac_probe(ndev);
1938 if (ret < 0)
1939 goto out;
1940
1941 /* associate a PHY - it is provided by another platform bus */
1942 if (!driver_for_each_device
1943 (&(stmmacphy_driver.driver), NULL, (void *)priv,
1944 stmmac_associate_phy)) {
1945 pr_err("No PHY device is associated with this MAC!\n");
1946 ret = -ENODEV;
1947 goto out;
1948 }
1949
1950 priv->fix_mac_speed = plat_dat->fix_mac_speed;
1951 priv->bsp_priv = plat_dat->bsp_priv;
1952
1953 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
1954 "\tIO base addr: 0x%08x)\n", ndev->name, pdev->name,
1955 pdev->id, ndev->irq, (unsigned int)addr);
1956
1957 /* MDIO bus Registration */
1958 pr_debug("\tMDIO bus (id: %d)...", priv->bus_id);
1959 ret = stmmac_mdio_register(ndev);
1960 if (ret < 0)
1961 goto out;
1962 pr_debug("registered!\n");
1963
1964out:
1965 if (ret < 0) {
1966 platform_set_drvdata(pdev, NULL);
1967 release_mem_region(res->start, (res->end - res->start));
1968 if (addr != NULL)
1969 iounmap(addr);
1970 }
1971
1972 return ret;
1973}
1974
1975/**
1976 * stmmac_dvr_remove
1977 * @pdev: platform device pointer
1978 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
1979 * changes the link status, releases the DMA descriptor rings,
1980 * unregisters the MDIO bus and unmaps the allocated memory.
1981 */
1982static int stmmac_dvr_remove(struct platform_device *pdev)
1983{
1984 struct net_device *ndev = platform_get_drvdata(pdev);
1985 struct resource *res;
1986
1987 pr_info("%s:\n\tremoving driver", __func__);
1988
1989 stmmac_dma_stop_rx(ndev->base_addr);
1990 stmmac_dma_stop_tx(ndev->base_addr);
1991
1992 stmmac_mac_disable_rx(ndev->base_addr);
1993 stmmac_mac_disable_tx(ndev->base_addr);
1994
1995 netif_carrier_off(ndev);
1996
1997 stmmac_mdio_unregister(ndev);
1998
1999 platform_set_drvdata(pdev, NULL);
2000 unregister_netdev(ndev);
2001
2002 iounmap((void *)ndev->base_addr);
2003 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2004 release_mem_region(res->start, (res->end - res->start));
2005
2006 free_netdev(ndev);
2007
2008 return 0;
2009}
2010
2011#ifdef CONFIG_PM
2012static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
2013{
2014 struct net_device *dev = platform_get_drvdata(pdev);
2015 struct stmmac_priv *priv = netdev_priv(dev);
2016 int dis_ic = 0;
2017
2018 if (!dev || !netif_running(dev))
2019 return 0;
2020
2021 spin_lock(&priv->lock);
2022
2023 if (state.event == PM_EVENT_SUSPEND) {
2024 netif_device_detach(dev);
2025 netif_stop_queue(dev);
2026 if (priv->phydev)
2027 phy_stop(priv->phydev);
2028
2029#ifdef CONFIG_STMMAC_TIMER
2030 priv->tm->timer_stop();
2031 dis_ic = 1;
2032#endif
2033 napi_disable(&priv->napi);
2034
2035 /* Stop TX/RX DMA */
2036 stmmac_dma_stop_tx(dev->base_addr);
2037 stmmac_dma_stop_rx(dev->base_addr);
2038 /* Clear the Rx/Tx descriptors */
2039 priv->mac_type->ops->init_rx_desc(priv->dma_rx,
2040 priv->dma_rx_size, dis_ic);
2041 priv->mac_type->ops->init_tx_desc(priv->dma_tx,
2042 priv->dma_tx_size);
2043
2044 stmmac_mac_disable_tx(dev->base_addr);
2045
2046 if (device_may_wakeup(&(pdev->dev))) {
2047 /* Enable Power down mode by programming the PMT regs */
2048 if (priv->wolenabled == PMT_SUPPORTED)
2049 priv->mac_type->ops->pmt(dev->base_addr,
2050 priv->wolopts);
2051 } else {
2052 stmmac_mac_disable_rx(dev->base_addr);
2053 }
2054 } else {
2055 priv->shutdown = 1;
2056 /* Although this can appear slightly redundant it actually
2057 * makes fast the standby operation and guarantees the driver
2058 * working if hibernation is on media. */
2059 stmmac_release(dev);
2060 }
2061
2062 spin_unlock(&priv->lock);
2063 return 0;
2064}
2065
2066static int stmmac_resume(struct platform_device *pdev)
2067{
2068 struct net_device *dev = platform_get_drvdata(pdev);
2069 struct stmmac_priv *priv = netdev_priv(dev);
2070 unsigned long ioaddr = dev->base_addr;
2071
2072 if (!netif_running(dev))
2073 return 0;
2074
2075 spin_lock(&priv->lock);
2076
2077 if (priv->shutdown) {
2078 /* Re-open the interface and re-init the MAC/DMA
2079 and the rings. */
2080 stmmac_open(dev);
2081 goto out_resume;
2082 }
2083
2084 /* Power Down bit, into the PM register, is cleared
2085 * automatically as soon as a magic packet or a Wake-up frame
2086 * is received. Anyway, it's better to manually clear
2087 * this bit because it can generate problems while resuming
2088 * from another devices (e.g. serial console). */
2089 if (device_may_wakeup(&(pdev->dev)))
2090 if (priv->wolenabled == PMT_SUPPORTED)
2091 priv->mac_type->ops->pmt(dev->base_addr, 0);
2092
2093 netif_device_attach(dev);
2094
2095 /* Enable the MAC and DMA */
2096 stmmac_mac_enable_rx(ioaddr);
2097 stmmac_mac_enable_tx(ioaddr);
2098 stmmac_dma_start_tx(ioaddr);
2099 stmmac_dma_start_rx(ioaddr);
2100
2101#ifdef CONFIG_STMMAC_TIMER
2102 priv->tm->timer_start(tmrate);
2103#endif
2104 napi_enable(&priv->napi);
2105
2106 if (priv->phydev)
2107 phy_start(priv->phydev);
2108
2109 netif_start_queue(dev);
2110
2111out_resume:
2112 spin_unlock(&priv->lock);
2113 return 0;
2114}
2115#endif
2116
2117static struct platform_driver stmmac_driver = {
2118 .driver = {
2119 .name = STMMAC_RESOURCE_NAME,
2120 },
2121 .probe = stmmac_dvr_probe,
2122 .remove = stmmac_dvr_remove,
2123#ifdef CONFIG_PM
2124 .suspend = stmmac_suspend,
2125 .resume = stmmac_resume,
2126#endif
2127
2128};
2129
2130/**
2131 * stmmac_init_module - Entry point for the driver
2132 * Description: This function is the entry point for the driver.
2133 */
2134static int __init stmmac_init_module(void)
2135{
2136 int ret;
2137
2138 if (platform_driver_register(&stmmacphy_driver)) {
2139 pr_err("No PHY devices registered!\n");
2140 return -ENODEV;
2141 }
2142
2143 ret = platform_driver_register(&stmmac_driver);
2144 return ret;
2145}
2146
2147/**
2148 * stmmac_cleanup_module - Cleanup routine for the driver
2149 * Description: This function is the cleanup routine for the driver.
2150 */
2151static void __exit stmmac_cleanup_module(void)
2152{
2153 platform_driver_unregister(&stmmacphy_driver);
2154 platform_driver_unregister(&stmmac_driver);
2155}
2156
2157#ifndef MODULE
2158static int __init stmmac_cmdline_opt(char *str)
2159{
2160 char *opt;
2161
2162 if (!str || !*str)
2163 return -EINVAL;
2164 while ((opt = strsep(&str, ",")) != NULL) {
2165 if (!strncmp(opt, "debug:", 6))
2166 strict_strtoul(opt + 6, 0, (unsigned long *)&debug);
2167 else if (!strncmp(opt, "phyaddr:", 8))
2168 strict_strtoul(opt + 8, 0, (unsigned long *)&phyaddr);
2169 else if (!strncmp(opt, "dma_txsize:", 11))
2170 strict_strtoul(opt + 11, 0,
2171 (unsigned long *)&dma_txsize);
2172 else if (!strncmp(opt, "dma_rxsize:", 11))
2173 strict_strtoul(opt + 11, 0,
2174 (unsigned long *)&dma_rxsize);
2175 else if (!strncmp(opt, "buf_sz:", 7))
2176 strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz);
2177 else if (!strncmp(opt, "tc:", 3))
2178 strict_strtoul(opt + 3, 0, (unsigned long *)&tc);
2179 else if (!strncmp(opt, "tx_coe:", 7))
2180 strict_strtoul(opt + 7, 0, (unsigned long *)&tx_coe);
2181 else if (!strncmp(opt, "watchdog:", 9))
2182 strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog);
2183 else if (!strncmp(opt, "flow_ctrl:", 10))
2184 strict_strtoul(opt + 10, 0,
2185 (unsigned long *)&flow_ctrl);
2186 else if (!strncmp(opt, "pause:", 6))
2187 strict_strtoul(opt + 6, 0, (unsigned long *)&pause);
2188#ifdef CONFIG_STMMAC_TIMER
2189 else if (!strncmp(opt, "tmrate:", 7))
2190 strict_strtoul(opt + 7, 0, (unsigned long *)&tmrate);
2191#endif
2192 }
2193 return 0;
2194}
2195
2196__setup("stmmaceth=", stmmac_cmdline_opt);
2197#endif
2198
2199module_init(stmmac_init_module);
2200module_exit(stmmac_cleanup_module);
2201
2202MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver");
2203MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
2204MODULE_LICENSE("GPL");
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
new file mode 100644
index 000000000000..8498552a22fc
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -0,0 +1,217 @@
1/*******************************************************************************
2 STMMAC Ethernet Driver -- MDIO bus implementation
3 Provides Bus interface for MII registers
4
5 Copyright (C) 2007-2009 STMicroelectronics Ltd
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Author: Carl Shaw <carl.shaw@st.com>
24 Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25*******************************************************************************/
26
27#include <linux/netdevice.h>
28#include <linux/mii.h>
29#include <linux/phy.h>
30
31#include "stmmac.h"
32
33#define MII_BUSY 0x00000001
34#define MII_WRITE 0x00000002
35
36/**
37 * stmmac_mdio_read
38 * @bus: points to the mii_bus structure
39 * @phyaddr: MII addr reg bits 15-11
40 * @phyreg: MII addr reg bits 10-6
41 * Description: it reads data from the MII register from within the phy device.
42 * For the 7111 GMAC, we must set the bit 0 in the MII address register while
43 * accessing the PHY registers.
44 * Fortunately, it seems this has no drawback for the 7109 MAC.
45 */
46static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
47{
48 struct net_device *ndev = bus->priv;
49 struct stmmac_priv *priv = netdev_priv(ndev);
50 unsigned long ioaddr = ndev->base_addr;
51 unsigned int mii_address = priv->mac_type->hw.mii.addr;
52 unsigned int mii_data = priv->mac_type->hw.mii.data;
53
54 int data;
55 u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
56 ((phyreg << 6) & (0x000007C0)));
57 regValue |= MII_BUSY; /* in case of GMAC */
58
59 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
60 writel(regValue, ioaddr + mii_address);
61 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
62
63 /* Read the data from the MII data register */
64 data = (int)readl(ioaddr + mii_data);
65
66 return data;
67}
68
69/**
70 * stmmac_mdio_write
71 * @bus: points to the mii_bus structure
72 * @phyaddr: MII addr reg bits 15-11
73 * @phyreg: MII addr reg bits 10-6
74 * @phydata: phy data
75 * Description: it writes the data into the MII register from within the device.
76 */
77static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
78 u16 phydata)
79{
80 struct net_device *ndev = bus->priv;
81 struct stmmac_priv *priv = netdev_priv(ndev);
82 unsigned long ioaddr = ndev->base_addr;
83 unsigned int mii_address = priv->mac_type->hw.mii.addr;
84 unsigned int mii_data = priv->mac_type->hw.mii.data;
85
86 u16 value =
87 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
88 | MII_WRITE;
89
90 value |= MII_BUSY;
91
92 /* Wait until any existing MII operation is complete */
93 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
94
95 /* Set the MII address register to write */
96 writel(phydata, ioaddr + mii_data);
97 writel(value, ioaddr + mii_address);
98
99 /* Wait until any existing MII operation is complete */
100 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
101
102 return 0;
103}
104
105/**
106 * stmmac_mdio_reset
107 * @bus: points to the mii_bus structure
108 * Description: reset the MII bus
109 */
110static int stmmac_mdio_reset(struct mii_bus *bus)
111{
112 struct net_device *ndev = bus->priv;
113 struct stmmac_priv *priv = netdev_priv(ndev);
114 unsigned long ioaddr = ndev->base_addr;
115 unsigned int mii_address = priv->mac_type->hw.mii.addr;
116
117 if (priv->phy_reset) {
118 pr_debug("stmmac_mdio_reset: calling phy_reset\n");
119 priv->phy_reset(priv->bsp_priv);
120 }
121
122 /* This is a workaround for problems with the STE101P PHY.
123 * It doesn't complete its reset until at least one clock cycle
124 * on MDC, so perform a dummy mdio read.
125 */
126 writel(0, ioaddr + mii_address);
127
128 return 0;
129}
130
131/**
132 * stmmac_mdio_register
133 * @ndev: net device structure
134 * Description: it registers the MII bus
135 */
136int stmmac_mdio_register(struct net_device *ndev)
137{
138 int err = 0;
139 struct mii_bus *new_bus;
140 int *irqlist;
141 struct stmmac_priv *priv = netdev_priv(ndev);
142 int addr, found;
143
144 new_bus = mdiobus_alloc();
145 if (new_bus == NULL)
146 return -ENOMEM;
147
148 irqlist = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
149 if (irqlist == NULL) {
150 err = -ENOMEM;
151 goto irqlist_alloc_fail;
152 }
153
154 /* Assign IRQ to phy at address phy_addr */
155 if (priv->phy_addr != -1)
156 irqlist[priv->phy_addr] = priv->phy_irq;
157
158 new_bus->name = "STMMAC MII Bus";
159 new_bus->read = &stmmac_mdio_read;
160 new_bus->write = &stmmac_mdio_write;
161 new_bus->reset = &stmmac_mdio_reset;
162 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
163 new_bus->priv = ndev;
164 new_bus->irq = irqlist;
165 new_bus->phy_mask = priv->phy_mask;
166 new_bus->parent = priv->device;
167 err = mdiobus_register(new_bus);
168 if (err != 0) {
169 pr_err("%s: Cannot register as MDIO bus\n", new_bus->name);
170 goto bus_register_fail;
171 }
172
173 priv->mii = new_bus;
174
175 found = 0;
176 for (addr = 0; addr < 32; addr++) {
177 struct phy_device *phydev = new_bus->phy_map[addr];
178 if (phydev) {
179 if (priv->phy_addr == -1) {
180 priv->phy_addr = addr;
181 phydev->irq = priv->phy_irq;
182 irqlist[addr] = priv->phy_irq;
183 }
184 pr_info("%s: PHY ID %08x at %d IRQ %d (%s)%s\n",
185 ndev->name, phydev->phy_id, addr,
186 phydev->irq, dev_name(&phydev->dev),
187 (addr == priv->phy_addr) ? " active" : "");
188 found = 1;
189 }
190 }
191
192 if (!found)
193 pr_warning("%s: No PHY found\n", ndev->name);
194
195 return 0;
196bus_register_fail:
197 kfree(irqlist);
198irqlist_alloc_fail:
199 kfree(new_bus);
200 return err;
201}
202
203/**
204 * stmmac_mdio_unregister
205 * @ndev: net device structure
206 * Description: it unregisters the MII bus
207 */
208int stmmac_mdio_unregister(struct net_device *ndev)
209{
210 struct stmmac_priv *priv = netdev_priv(ndev);
211
212 mdiobus_unregister(priv->mii);
213 priv->mii->priv = NULL;
214 kfree(priv->mii);
215
216 return 0;
217}
diff --git a/drivers/net/stmmac/stmmac_timer.c b/drivers/net/stmmac/stmmac_timer.c
new file mode 100644
index 000000000000..b838c6582077
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_timer.c
@@ -0,0 +1,140 @@
1/*******************************************************************************
2 STMMAC external timer support.
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25#include <linux/kernel.h>
26#include <linux/etherdevice.h>
27#include "stmmac_timer.h"
28
29static void stmmac_timer_handler(void *data)
30{
31 struct net_device *dev = (struct net_device *)data;
32
33 stmmac_schedule(dev);
34
35 return;
36}
37
38#define STMMAC_TIMER_MSG(timer, freq) \
39printk(KERN_INFO "stmmac_timer: %s Timer ON (freq %dHz)\n", timer, freq);
40
41#if defined(CONFIG_STMMAC_RTC_TIMER)
42#include <linux/rtc.h>
43static struct rtc_device *stmmac_rtc;
44static rtc_task_t stmmac_task;
45
46static void stmmac_rtc_start(unsigned int new_freq)
47{
48 rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq);
49 rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1);
50 return;
51}
52
53static void stmmac_rtc_stop(void)
54{
55 rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
56 return;
57}
58
59int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
60{
61 stmmac_task.private_data = dev;
62 stmmac_task.func = stmmac_timer_handler;
63
64 stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
65 if (stmmac_rtc == NULL) {
66 pr_error("open rtc device failed\n");
67 return -ENODEV;
68 }
69
70 rtc_irq_register(stmmac_rtc, &stmmac_task);
71
72 /* Periodic mode is not supported */
73 if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) {
74 pr_error("set periodic failed\n");
75 rtc_irq_unregister(stmmac_rtc, &stmmac_task);
76 rtc_class_close(stmmac_rtc);
77 return -1;
78 }
79
80 STMMAC_TIMER_MSG(CONFIG_RTC_HCTOSYS_DEVICE, tm->freq);
81
82 tm->timer_start = stmmac_rtc_start;
83 tm->timer_stop = stmmac_rtc_stop;
84
85 return 0;
86}
87
88int stmmac_close_ext_timer(void)
89{
90 rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
91 rtc_irq_unregister(stmmac_rtc, &stmmac_task);
92 rtc_class_close(stmmac_rtc);
93 return 0;
94}
95
96#elif defined(CONFIG_STMMAC_TMU_TIMER)
97#include <linux/clk.h>
98#define TMU_CHANNEL "tmu2_clk"
99static struct clk *timer_clock;
100
101static void stmmac_tmu_start(unsigned int new_freq)
102{
103 clk_set_rate(timer_clock, new_freq);
104 clk_enable(timer_clock);
105 return;
106}
107
108static void stmmac_tmu_stop(void)
109{
110 clk_disable(timer_clock);
111 return;
112}
113
114int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
115{
116 timer_clock = clk_get(NULL, TMU_CHANNEL);
117
118 if (timer_clock == NULL)
119 return -1;
120
121 if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) {
122 timer_clock = NULL;
123 return -1;
124 }
125
126 STMMAC_TIMER_MSG("TMU2", tm->freq);
127 tm->timer_start = stmmac_tmu_start;
128 tm->timer_stop = stmmac_tmu_stop;
129
130 return 0;
131}
132
133int stmmac_close_ext_timer(void)
134{
135 clk_disable(timer_clock);
136 tmu2_unregister_user();
137 clk_put(timer_clock);
138 return 0;
139}
140#endif
diff --git a/drivers/net/stmmac/stmmac_timer.h b/drivers/net/stmmac/stmmac_timer.h
new file mode 100644
index 000000000000..f795cae33725
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_timer.h
@@ -0,0 +1,41 @@
1/*******************************************************************************
2 STMMAC external timer Header File.
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25struct stmmac_timer {
26 void (*timer_start) (unsigned int new_freq);
27 void (*timer_stop) (void);
28 unsigned int freq;
29};
30
31/* Open the HW timer device and return 0 in case of success */
32int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm);
33/* Stop the timer and release it */
34int stmmac_close_ext_timer(void);
35/* Function used for scheduling task within the stmmac */
36void stmmac_schedule(struct net_device *dev);
37
38#if defined(CONFIG_STMMAC_TMU_TIMER)
39extern int tmu2_register_user(void *fnt, void *data);
40extern void tmu2_unregister_user(void);
41#endif
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 305ec3d783db..7019a0d1a82b 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -38,6 +38,7 @@
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
39#include <linux/ioport.h> 39#include <linux/ioport.h>
40#include <linux/in.h> 40#include <linux/in.h>
41#include <linux/sched.h>
41#include <linux/slab.h> 42#include <linux/slab.h>
42#include <linux/string.h> 43#include <linux/string.h>
43#include <linux/delay.h> 44#include <linux/delay.h>
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index f1e5e4542c2a..bc74db0d12f3 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -1016,7 +1016,6 @@ static const struct net_device_ops vnet_ops = {
1016 .ndo_open = vnet_open, 1016 .ndo_open = vnet_open,
1017 .ndo_stop = vnet_close, 1017 .ndo_stop = vnet_close,
1018 .ndo_set_multicast_list = vnet_set_rx_mode, 1018 .ndo_set_multicast_list = vnet_set_rx_mode,
1019 .ndo_change_mtu = eth_change_mtu,
1020 .ndo_set_mac_address = vnet_set_mac_addr, 1019 .ndo_set_mac_address = vnet_set_mac_addr,
1021 .ndo_validate_addr = eth_validate_addr, 1020 .ndo_validate_addr = eth_validate_addr,
1022 .ndo_tx_timeout = vnet_tx_timeout, 1021 .ndo_tx_timeout = vnet_tx_timeout,
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f09bc5dfe8b2..ba5d3fe753b6 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -902,11 +902,12 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
902 struct tg3 *tp = bp->priv; 902 struct tg3 *tp = bp->priv;
903 u32 val; 903 u32 val;
904 904
905 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) 905 spin_lock_bh(&tp->lock);
906 return -EAGAIN;
907 906
908 if (tg3_readphy(tp, reg, &val)) 907 if (tg3_readphy(tp, reg, &val))
909 return -EIO; 908 val = -EIO;
909
910 spin_unlock_bh(&tp->lock);
910 911
911 return val; 912 return val;
912} 913}
@@ -914,14 +915,16 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
914static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 915static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
915{ 916{
916 struct tg3 *tp = bp->priv; 917 struct tg3 *tp = bp->priv;
918 u32 ret = 0;
917 919
918 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) 920 spin_lock_bh(&tp->lock);
919 return -EAGAIN;
920 921
921 if (tg3_writephy(tp, reg, val)) 922 if (tg3_writephy(tp, reg, val))
922 return -EIO; 923 ret = -EIO;
923 924
924 return 0; 925 spin_unlock_bh(&tp->lock);
926
927 return ret;
925} 928}
926 929
927static int tg3_mdio_reset(struct mii_bus *bp) 930static int tg3_mdio_reset(struct mii_bus *bp)
@@ -1011,12 +1014,6 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
1011 1014
1012static void tg3_mdio_start(struct tg3 *tp) 1015static void tg3_mdio_start(struct tg3 *tp)
1013{ 1016{
1014 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1015 mutex_lock(&tp->mdio_bus->mdio_lock);
1016 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1017 mutex_unlock(&tp->mdio_bus->mdio_lock);
1018 }
1019
1020 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; 1017 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1021 tw32_f(MAC_MI_MODE, tp->mi_mode); 1018 tw32_f(MAC_MI_MODE, tp->mi_mode);
1022 udelay(80); 1019 udelay(80);
@@ -1041,15 +1038,6 @@ static void tg3_mdio_start(struct tg3 *tp)
1041 tg3_mdio_config_5785(tp); 1038 tg3_mdio_config_5785(tp);
1042} 1039}
1043 1040
1044static void tg3_mdio_stop(struct tg3 *tp)
1045{
1046 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1047 mutex_lock(&tp->mdio_bus->mdio_lock);
1048 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
1049 mutex_unlock(&tp->mdio_bus->mdio_lock);
1050 }
1051}
1052
1053static int tg3_mdio_init(struct tg3 *tp) 1041static int tg3_mdio_init(struct tg3 *tp)
1054{ 1042{
1055 int i; 1043 int i;
@@ -1141,7 +1129,6 @@ static void tg3_mdio_fini(struct tg3 *tp)
1141 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; 1129 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1142 mdiobus_unregister(tp->mdio_bus); 1130 mdiobus_unregister(tp->mdio_bus);
1143 mdiobus_free(tp->mdio_bus); 1131 mdiobus_free(tp->mdio_bus);
1144 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1145 } 1132 }
1146} 1133}
1147 1134
@@ -1363,7 +1350,7 @@ static void tg3_adjust_link(struct net_device *dev)
1363 struct tg3 *tp = netdev_priv(dev); 1350 struct tg3 *tp = netdev_priv(dev);
1364 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1351 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1365 1352
1366 spin_lock(&tp->lock); 1353 spin_lock_bh(&tp->lock);
1367 1354
1368 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | 1355 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1369 MAC_MODE_HALF_DUPLEX); 1356 MAC_MODE_HALF_DUPLEX);
@@ -1431,7 +1418,7 @@ static void tg3_adjust_link(struct net_device *dev)
1431 tp->link_config.active_speed = phydev->speed; 1418 tp->link_config.active_speed = phydev->speed;
1432 tp->link_config.active_duplex = phydev->duplex; 1419 tp->link_config.active_duplex = phydev->duplex;
1433 1420
1434 spin_unlock(&tp->lock); 1421 spin_unlock_bh(&tp->lock);
1435 1422
1436 if (linkmesg) 1423 if (linkmesg)
1437 tg3_link_report(tp); 1424 tg3_link_report(tp);
@@ -6392,8 +6379,6 @@ static int tg3_chip_reset(struct tg3 *tp)
6392 6379
6393 tg3_nvram_lock(tp); 6380 tg3_nvram_lock(tp);
6394 6381
6395 tg3_mdio_stop(tp);
6396
6397 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 6382 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6398 6383
6399 /* No matching tg3_nvram_unlock() after this because 6384 /* No matching tg3_nvram_unlock() after this because
@@ -8698,6 +8683,8 @@ static int tg3_close(struct net_device *dev)
8698 8683
8699 del_timer_sync(&tp->timer); 8684 del_timer_sync(&tp->timer);
8700 8685
8686 tg3_phy_stop(tp);
8687
8701 tg3_full_lock(tp, 1); 8688 tg3_full_lock(tp, 1);
8702#if 0 8689#if 0
8703 tg3_dump_state(tp); 8690 tg3_dump_state(tp);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 82b45d8797b4..bab7940158e6 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2412,7 +2412,6 @@ struct ring_info {
2412 2412
2413struct tx_ring_info { 2413struct tx_ring_info {
2414 struct sk_buff *skb; 2414 struct sk_buff *skb;
2415 u32 prev_vlan_tag;
2416}; 2415};
2417 2416
2418struct tg3_config_info { 2417struct tg3_config_info {
@@ -2749,7 +2748,6 @@ struct tg3 {
2749#define TG3_FLG3_5701_DMA_BUG 0x00000008 2748#define TG3_FLG3_5701_DMA_BUG 0x00000008
2750#define TG3_FLG3_USE_PHYLIB 0x00000010 2749#define TG3_FLG3_USE_PHYLIB 0x00000010
2751#define TG3_FLG3_MDIOBUS_INITED 0x00000020 2750#define TG3_FLG3_MDIOBUS_INITED 0x00000020
2752#define TG3_FLG3_MDIOBUS_PAUSED 0x00000040
2753#define TG3_FLG3_PHY_CONNECTED 0x00000080 2751#define TG3_FLG3_PHY_CONNECTED 0x00000080
2754#define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100 2752#define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100
2755#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200 2753#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 525bbc5b9c9d..36cb2423bcf1 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -108,6 +108,7 @@ in the event that chatty debug messages are desired - jjs 12/30/98 */
108#define IBMTR_DEBUG_MESSAGES 0 108#define IBMTR_DEBUG_MESSAGES 0
109 109
110#include <linux/module.h> 110#include <linux/module.h>
111#include <linux/sched.h>
111 112
112#ifdef PCMCIA /* required for ibmtr_cs.c to build */ 113#ifdef PCMCIA /* required for ibmtr_cs.c to build */
113#undef MODULE /* yes, really */ 114#undef MODULE /* yes, really */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index d3ee1994b02f..4fdfa2ae5418 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -946,8 +946,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
946 char *name; 946 char *name;
947 unsigned long flags = 0; 947 unsigned long flags = 0;
948 948
949 err = -EINVAL;
950
951 if (!capable(CAP_NET_ADMIN)) 949 if (!capable(CAP_NET_ADMIN))
952 return -EPERM; 950 return -EPERM;
953 err = security_tun_dev_create(); 951 err = security_tun_dev_create();
@@ -964,7 +962,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
964 flags |= TUN_TAP_DEV; 962 flags |= TUN_TAP_DEV;
965 name = "tap%d"; 963 name = "tap%d";
966 } else 964 } else
967 goto failed; 965 return -EINVAL;
968 966
969 if (*ifr->ifr_name) 967 if (*ifr->ifr_name)
970 name = ifr->ifr_name; 968 name = ifr->ifr_name;
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index d6d345229fe9..5921f5bdd764 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -108,6 +108,7 @@ static const int multicast_filter_limit = 32;
108 108
109#include <linux/module.h> 109#include <linux/module.h>
110#include <linux/kernel.h> 110#include <linux/kernel.h>
111#include <linux/sched.h>
111#include <linux/string.h> 112#include <linux/string.h>
112#include <linux/timer.h> 113#include <linux/timer.h>
113#include <linux/errno.h> 114#include <linux/errno.h>
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 45cebfb302cf..23300656c266 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -300,20 +300,23 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
300 return 0; 300 return 0;
301 } 301 }
302 302
303 crc = get_unaligned_le32(skb2->data
304 + len - ETH_FCS_LEN);
305 skb_trim(skb2, len - ETH_FCS_LEN);
306
307 /* 303 /*
308 * The bmCRC helps to denote when the CRC field in 304 * The bmCRC helps to denote when the CRC field in
309 * the Ethernet frame contains a calculated CRC: 305 * the Ethernet frame contains a calculated CRC:
310 * bmCRC = 1 : CRC is calculated 306 * bmCRC = 1 : CRC is calculated
311 * bmCRC = 0 : CRC = 0xDEADBEEF 307 * bmCRC = 0 : CRC = 0xDEADBEEF
312 */ 308 */
313 if (header & BIT(14)) 309 if (header & BIT(14)) {
314 crc2 = ~crc32_le(~0, skb2->data, skb2->len); 310 crc = get_unaligned_le32(skb2->data
315 else 311 + len - ETH_FCS_LEN);
312 crc2 = ~crc32_le(~0, skb2->data, skb2->len
313 - ETH_FCS_LEN);
314 } else {
315 crc = get_unaligned_be32(skb2->data
316 + len - ETH_FCS_LEN);
316 crc2 = 0xdeadbeef; 317 crc2 = 0xdeadbeef;
318 }
319 skb_trim(skb2, len - ETH_FCS_LEN);
317 320
318 if (is_last) 321 if (is_last)
319 return crc == crc2; 322 return crc == crc2;
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 72470f77f556..a2b30a10064f 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -649,6 +649,10 @@ static const struct usb_device_id products[] = {
649 USB_DEVICE(0x0fe6, 0x8101), /* DM9601 USB to Fast Ethernet Adapter */ 649 USB_DEVICE(0x0fe6, 0x8101), /* DM9601 USB to Fast Ethernet Adapter */
650 .driver_info = (unsigned long)&dm9601_info, 650 .driver_info = (unsigned long)&dm9601_info,
651 }, 651 },
652 {
653 USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
654 .driver_info = (unsigned long)&dm9601_info,
655 },
652 {}, // END 656 {}, // END
653}; 657};
654 658
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index e2a39b9be96e..e391ef969c28 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -263,6 +263,7 @@ static int kaweth_control(struct kaweth_device *kaweth,
263 int timeout) 263 int timeout)
264{ 264{
265 struct usb_ctrlrequest *dr; 265 struct usb_ctrlrequest *dr;
266 int retval;
266 267
267 dbg("kaweth_control()"); 268 dbg("kaweth_control()");
268 269
@@ -278,18 +279,21 @@ static int kaweth_control(struct kaweth_device *kaweth,
278 return -ENOMEM; 279 return -ENOMEM;
279 } 280 }
280 281
281 dr->bRequestType= requesttype; 282 dr->bRequestType = requesttype;
282 dr->bRequest = request; 283 dr->bRequest = request;
283 dr->wValue = cpu_to_le16(value); 284 dr->wValue = cpu_to_le16(value);
284 dr->wIndex = cpu_to_le16(index); 285 dr->wIndex = cpu_to_le16(index);
285 dr->wLength = cpu_to_le16(size); 286 dr->wLength = cpu_to_le16(size);
286 287
287 return kaweth_internal_control_msg(kaweth->dev, 288 retval = kaweth_internal_control_msg(kaweth->dev,
288 pipe, 289 pipe,
289 dr, 290 dr,
290 data, 291 data,
291 size, 292 size,
292 timeout); 293 timeout);
294
295 kfree(dr);
296 return retval;
293} 297}
294 298
295/**************************************************************** 299/****************************************************************
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6fdaba8674b9..ed4a508ef262 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -62,8 +62,11 @@ static char *devid=NULL;
62static struct usb_eth_dev usb_dev_id[] = { 62static struct usb_eth_dev usb_dev_id[] = {
63#define PEGASUS_DEV(pn, vid, pid, flags) \ 63#define PEGASUS_DEV(pn, vid, pid, flags) \
64 {.name = pn, .vendor = vid, .device = pid, .private = flags}, 64 {.name = pn, .vendor = vid, .device = pid, .private = flags},
65#define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \
66 PEGASUS_DEV(pn, vid, pid, flags)
65#include "pegasus.h" 67#include "pegasus.h"
66#undef PEGASUS_DEV 68#undef PEGASUS_DEV
69#undef PEGASUS_DEV_CLASS
67 {NULL, 0, 0, 0}, 70 {NULL, 0, 0, 0},
68 {NULL, 0, 0, 0} 71 {NULL, 0, 0, 0}
69}; 72};
@@ -71,8 +74,18 @@ static struct usb_eth_dev usb_dev_id[] = {
71static struct usb_device_id pegasus_ids[] = { 74static struct usb_device_id pegasus_ids[] = {
72#define PEGASUS_DEV(pn, vid, pid, flags) \ 75#define PEGASUS_DEV(pn, vid, pid, flags) \
73 {.match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = vid, .idProduct = pid}, 76 {.match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = vid, .idProduct = pid},
77/*
78 * The Belkin F8T012xx1 bluetooth adaptor has the same vendor and product
79 * IDs as the Belkin F5D5050, so we need to teach the pegasus driver to
80 * ignore adaptors belonging to the "Wireless" class 0xE0. For this one
81 * case anyway, seeing as the pegasus is for "Wired" adaptors.
82 */
83#define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \
84 {.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_CLASS), \
85 .idVendor = vid, .idProduct = pid, .bDeviceClass = dclass},
74#include "pegasus.h" 86#include "pegasus.h"
75#undef PEGASUS_DEV 87#undef PEGASUS_DEV
88#undef PEGASUS_DEV_CLASS
76 {}, 89 {},
77 {} 90 {}
78}; 91};
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
index f968c834ff63..5d02f0200737 100644
--- a/drivers/net/usb/pegasus.h
+++ b/drivers/net/usb/pegasus.h
@@ -202,7 +202,11 @@ PEGASUS_DEV( "AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701,
202 DEFAULT_GPIO_RESET | PEGASUS_II ) 202 DEFAULT_GPIO_RESET | PEGASUS_II )
203PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100, 203PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100,
204 DEFAULT_GPIO_RESET | PEGASUS_II ) 204 DEFAULT_GPIO_RESET | PEGASUS_II )
205PEGASUS_DEV( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 205/*
206 * Distinguish between this Belkin adaptor and the Belkin bluetooth adaptors
207 * with the same product IDs by checking the device class too.
208 */
209PEGASUS_DEV_CLASS( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 0x00,
206 DEFAULT_GPIO_RESET | PEGASUS_II ) 210 DEFAULT_GPIO_RESET | PEGASUS_II )
207PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986, 211PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986,
208 DEFAULT_GPIO_RESET ) 212 DEFAULT_GPIO_RESET )
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index d032bba9bc4c..0caa8008c51c 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -418,6 +418,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
418 goto halt_fail_and_release; 418 goto halt_fail_and_release;
419 } 419 }
420 memcpy(net->dev_addr, bp, ETH_ALEN); 420 memcpy(net->dev_addr, bp, ETH_ALEN);
421 memcpy(net->perm_addr, bp, ETH_ALEN);
421 422
422 /* set a nonzero filter to enable data transfers */ 423 /* set a nonzero filter to enable data transfers */
423 memset(u.set, 0, sizeof *u.set); 424 memset(u.set, 0, sizeof *u.set);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 938fb3530a7a..c6c922247d05 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1227,7 +1227,7 @@ static const struct driver_info smsc95xx_info = {
1227 .rx_fixup = smsc95xx_rx_fixup, 1227 .rx_fixup = smsc95xx_rx_fixup,
1228 .tx_fixup = smsc95xx_tx_fixup, 1228 .tx_fixup = smsc95xx_tx_fixup,
1229 .status = smsc95xx_status, 1229 .status = smsc95xx_status,
1230 .flags = FLAG_ETHER, 1230 .flags = FLAG_ETHER | FLAG_SEND_ZLP,
1231}; 1231};
1232 1232
1233static const struct usb_device_id products[] = { 1233static const struct usb_device_id products[] = {
@@ -1237,10 +1237,75 @@ static const struct usb_device_id products[] = {
1237 .driver_info = (unsigned long) &smsc95xx_info, 1237 .driver_info = (unsigned long) &smsc95xx_info,
1238 }, 1238 },
1239 { 1239 {
1240 /* SMSC9505 USB Ethernet Device */
1241 USB_DEVICE(0x0424, 0x9505),
1242 .driver_info = (unsigned long) &smsc95xx_info,
1243 },
1244 {
1245 /* SMSC9500A USB Ethernet Device */
1246 USB_DEVICE(0x0424, 0x9E00),
1247 .driver_info = (unsigned long) &smsc95xx_info,
1248 },
1249 {
1250 /* SMSC9505A USB Ethernet Device */
1251 USB_DEVICE(0x0424, 0x9E01),
1252 .driver_info = (unsigned long) &smsc95xx_info,
1253 },
1254 {
1240 /* SMSC9512/9514 USB Hub & Ethernet Device */ 1255 /* SMSC9512/9514 USB Hub & Ethernet Device */
1241 USB_DEVICE(0x0424, 0xec00), 1256 USB_DEVICE(0x0424, 0xec00),
1242 .driver_info = (unsigned long) &smsc95xx_info, 1257 .driver_info = (unsigned long) &smsc95xx_info,
1243 }, 1258 },
1259 {
1260 /* SMSC9500 USB Ethernet Device (SAL10) */
1261 USB_DEVICE(0x0424, 0x9900),
1262 .driver_info = (unsigned long) &smsc95xx_info,
1263 },
1264 {
1265 /* SMSC9505 USB Ethernet Device (SAL10) */
1266 USB_DEVICE(0x0424, 0x9901),
1267 .driver_info = (unsigned long) &smsc95xx_info,
1268 },
1269 {
1270 /* SMSC9500A USB Ethernet Device (SAL10) */
1271 USB_DEVICE(0x0424, 0x9902),
1272 .driver_info = (unsigned long) &smsc95xx_info,
1273 },
1274 {
1275 /* SMSC9505A USB Ethernet Device (SAL10) */
1276 USB_DEVICE(0x0424, 0x9903),
1277 .driver_info = (unsigned long) &smsc95xx_info,
1278 },
1279 {
1280 /* SMSC9512/9514 USB Hub & Ethernet Device (SAL10) */
1281 USB_DEVICE(0x0424, 0x9904),
1282 .driver_info = (unsigned long) &smsc95xx_info,
1283 },
1284 {
1285 /* SMSC9500A USB Ethernet Device (HAL) */
1286 USB_DEVICE(0x0424, 0x9905),
1287 .driver_info = (unsigned long) &smsc95xx_info,
1288 },
1289 {
1290 /* SMSC9505A USB Ethernet Device (HAL) */
1291 USB_DEVICE(0x0424, 0x9906),
1292 .driver_info = (unsigned long) &smsc95xx_info,
1293 },
1294 {
1295 /* SMSC9500 USB Ethernet Device (Alternate ID) */
1296 USB_DEVICE(0x0424, 0x9907),
1297 .driver_info = (unsigned long) &smsc95xx_info,
1298 },
1299 {
1300 /* SMSC9500A USB Ethernet Device (Alternate ID) */
1301 USB_DEVICE(0x0424, 0x9908),
1302 .driver_info = (unsigned long) &smsc95xx_info,
1303 },
1304 {
1305 /* SMSC9512/9514 USB Hub & Ethernet Device (Alternate ID) */
1306 USB_DEVICE(0x0424, 0x9909),
1307 .driver_info = (unsigned long) &smsc95xx_info,
1308 },
1244 { }, /* END */ 1309 { }, /* END */
1245}; 1310};
1246MODULE_DEVICE_TABLE(usb, products); 1311MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 24b36f795151..ca5ca5ae061d 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1049,7 +1049,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1049 * NOTE: strictly conforming cdc-ether devices should expect 1049 * NOTE: strictly conforming cdc-ether devices should expect
1050 * the ZLP here, but ignore the one-byte packet. 1050 * the ZLP here, but ignore the one-byte packet.
1051 */ 1051 */
1052 if ((length % dev->maxpacket) == 0) { 1052 if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
1053 urb->transfer_buffer_length++; 1053 urb->transfer_buffer_length++;
1054 if (skb_tailroom(skb)) { 1054 if (skb_tailroom(skb)) {
1055 skb->data[skb->len] = 0; 1055 skb->data[skb->len] = 0;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 32266fb89c20..05630f2f6930 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1,4 +1,4 @@
1/* A simple network driver using virtio. 1/* A network driver using virtio.
2 * 2 *
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4 * 4 *
@@ -47,19 +47,9 @@ struct virtnet_info
47 struct napi_struct napi; 47 struct napi_struct napi;
48 unsigned int status; 48 unsigned int status;
49 49
50 /* The skb we couldn't send because buffers were full. */
51 struct sk_buff *last_xmit_skb;
52
53 /* If we need to free in a timer, this is it. */
54 struct timer_list xmit_free_timer;
55
56 /* Number of input buffers, and max we've ever had. */ 50 /* Number of input buffers, and max we've ever had. */
57 unsigned int num, max; 51 unsigned int num, max;
58 52
59 /* For cleaning up after transmission. */
60 struct tasklet_struct tasklet;
61 bool free_in_tasklet;
62
63 /* I like... big packets and I cannot lie! */ 53 /* I like... big packets and I cannot lie! */
64 bool big_packets; 54 bool big_packets;
65 55
@@ -77,9 +67,17 @@ struct virtnet_info
77 struct page *pages; 67 struct page *pages;
78}; 68};
79 69
80static inline void *skb_vnet_hdr(struct sk_buff *skb) 70struct skb_vnet_hdr {
71 union {
72 struct virtio_net_hdr hdr;
73 struct virtio_net_hdr_mrg_rxbuf mhdr;
74 };
75 unsigned int num_sg;
76};
77
78static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
81{ 79{
82 return (struct virtio_net_hdr *)skb->cb; 80 return (struct skb_vnet_hdr *)skb->cb;
83} 81}
84 82
85static void give_a_page(struct virtnet_info *vi, struct page *page) 83static void give_a_page(struct virtnet_info *vi, struct page *page)
@@ -118,17 +116,13 @@ static void skb_xmit_done(struct virtqueue *svq)
118 116
119 /* We were probably waiting for more output buffers. */ 117 /* We were probably waiting for more output buffers. */
120 netif_wake_queue(vi->dev); 118 netif_wake_queue(vi->dev);
121
122 /* Make sure we re-xmit last_xmit_skb: if there are no more packets
123 * queued, start_xmit won't be called. */
124 tasklet_schedule(&vi->tasklet);
125} 119}
126 120
127static void receive_skb(struct net_device *dev, struct sk_buff *skb, 121static void receive_skb(struct net_device *dev, struct sk_buff *skb,
128 unsigned len) 122 unsigned len)
129{ 123{
130 struct virtnet_info *vi = netdev_priv(dev); 124 struct virtnet_info *vi = netdev_priv(dev);
131 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb); 125 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
132 int err; 126 int err;
133 int i; 127 int i;
134 128
@@ -139,7 +133,6 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
139 } 133 }
140 134
141 if (vi->mergeable_rx_bufs) { 135 if (vi->mergeable_rx_bufs) {
142 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
143 unsigned int copy; 136 unsigned int copy;
144 char *p = page_address(skb_shinfo(skb)->frags[0].page); 137 char *p = page_address(skb_shinfo(skb)->frags[0].page);
145 138
@@ -147,8 +140,8 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
147 len = PAGE_SIZE; 140 len = PAGE_SIZE;
148 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf); 141 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
149 142
150 memcpy(hdr, p, sizeof(*mhdr)); 143 memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
151 p += sizeof(*mhdr); 144 p += sizeof(hdr->mhdr);
152 145
153 copy = len; 146 copy = len;
154 if (copy > skb_tailroom(skb)) 147 if (copy > skb_tailroom(skb))
@@ -163,13 +156,13 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
163 skb_shinfo(skb)->nr_frags--; 156 skb_shinfo(skb)->nr_frags--;
164 } else { 157 } else {
165 skb_shinfo(skb)->frags[0].page_offset += 158 skb_shinfo(skb)->frags[0].page_offset +=
166 sizeof(*mhdr) + copy; 159 sizeof(hdr->mhdr) + copy;
167 skb_shinfo(skb)->frags[0].size = len; 160 skb_shinfo(skb)->frags[0].size = len;
168 skb->data_len += len; 161 skb->data_len += len;
169 skb->len += len; 162 skb->len += len;
170 } 163 }
171 164
172 while (--mhdr->num_buffers) { 165 while (--hdr->mhdr.num_buffers) {
173 struct sk_buff *nskb; 166 struct sk_buff *nskb;
174 167
175 i = skb_shinfo(skb)->nr_frags; 168 i = skb_shinfo(skb)->nr_frags;
@@ -183,7 +176,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
183 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len); 176 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
184 if (!nskb) { 177 if (!nskb) {
185 pr_debug("%s: rx error: %d buffers missing\n", 178 pr_debug("%s: rx error: %d buffers missing\n",
186 dev->name, mhdr->num_buffers); 179 dev->name, hdr->mhdr.num_buffers);
187 dev->stats.rx_length_errors++; 180 dev->stats.rx_length_errors++;
188 goto drop; 181 goto drop;
189 } 182 }
@@ -204,7 +197,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
204 skb->len += len; 197 skb->len += len;
205 } 198 }
206 } else { 199 } else {
207 len -= sizeof(struct virtio_net_hdr); 200 len -= sizeof(hdr->hdr);
208 201
209 if (len <= MAX_PACKET_LEN) 202 if (len <= MAX_PACKET_LEN)
210 trim_pages(vi, skb); 203 trim_pages(vi, skb);
@@ -222,9 +215,11 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
222 dev->stats.rx_bytes += skb->len; 215 dev->stats.rx_bytes += skb->len;
223 dev->stats.rx_packets++; 216 dev->stats.rx_packets++;
224 217
225 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 218 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
226 pr_debug("Needs csum!\n"); 219 pr_debug("Needs csum!\n");
227 if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset)) 220 if (!skb_partial_csum_set(skb,
221 hdr->hdr.csum_start,
222 hdr->hdr.csum_offset))
228 goto frame_err; 223 goto frame_err;
229 } 224 }
230 225
@@ -232,9 +227,9 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
232 pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 227 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
233 ntohs(skb->protocol), skb->len, skb->pkt_type); 228 ntohs(skb->protocol), skb->len, skb->pkt_type);
234 229
235 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 230 if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
236 pr_debug("GSO!\n"); 231 pr_debug("GSO!\n");
237 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 232 switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
238 case VIRTIO_NET_HDR_GSO_TCPV4: 233 case VIRTIO_NET_HDR_GSO_TCPV4:
239 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 234 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
240 break; 235 break;
@@ -247,14 +242,14 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
247 default: 242 default:
248 if (net_ratelimit()) 243 if (net_ratelimit())
249 printk(KERN_WARNING "%s: bad gso type %u.\n", 244 printk(KERN_WARNING "%s: bad gso type %u.\n",
250 dev->name, hdr->gso_type); 245 dev->name, hdr->hdr.gso_type);
251 goto frame_err; 246 goto frame_err;
252 } 247 }
253 248
254 if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) 249 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
255 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 250 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
256 251
257 skb_shinfo(skb)->gso_size = hdr->gso_size; 252 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
258 if (skb_shinfo(skb)->gso_size == 0) { 253 if (skb_shinfo(skb)->gso_size == 0) {
259 if (net_ratelimit()) 254 if (net_ratelimit())
260 printk(KERN_WARNING "%s: zero gso size.\n", 255 printk(KERN_WARNING "%s: zero gso size.\n",
@@ -284,8 +279,8 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
284 bool oom = false; 279 bool oom = false;
285 280
286 sg_init_table(sg, 2+MAX_SKB_FRAGS); 281 sg_init_table(sg, 2+MAX_SKB_FRAGS);
287 for (;;) { 282 do {
288 struct virtio_net_hdr *hdr; 283 struct skb_vnet_hdr *hdr;
289 284
290 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN); 285 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
291 if (unlikely(!skb)) { 286 if (unlikely(!skb)) {
@@ -297,7 +292,7 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
297 skb_put(skb, MAX_PACKET_LEN); 292 skb_put(skb, MAX_PACKET_LEN);
298 293
299 hdr = skb_vnet_hdr(skb); 294 hdr = skb_vnet_hdr(skb);
300 sg_set_buf(sg, hdr, sizeof(*hdr)); 295 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
301 296
302 if (vi->big_packets) { 297 if (vi->big_packets) {
303 for (i = 0; i < MAX_SKB_FRAGS; i++) { 298 for (i = 0; i < MAX_SKB_FRAGS; i++) {
@@ -320,14 +315,14 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
320 skb_queue_head(&vi->recv, skb); 315 skb_queue_head(&vi->recv, skb);
321 316
322 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb); 317 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
323 if (err) { 318 if (err < 0) {
324 skb_unlink(skb, &vi->recv); 319 skb_unlink(skb, &vi->recv);
325 trim_pages(vi, skb); 320 trim_pages(vi, skb);
326 kfree_skb(skb); 321 kfree_skb(skb);
327 break; 322 break;
328 } 323 }
329 vi->num++; 324 vi->num++;
330 } 325 } while (err >= num);
331 if (unlikely(vi->num > vi->max)) 326 if (unlikely(vi->num > vi->max))
332 vi->max = vi->num; 327 vi->max = vi->num;
333 vi->rvq->vq_ops->kick(vi->rvq); 328 vi->rvq->vq_ops->kick(vi->rvq);
@@ -345,7 +340,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
345 if (!vi->mergeable_rx_bufs) 340 if (!vi->mergeable_rx_bufs)
346 return try_fill_recv_maxbufs(vi, gfp); 341 return try_fill_recv_maxbufs(vi, gfp);
347 342
348 for (;;) { 343 do {
349 skb_frag_t *f; 344 skb_frag_t *f;
350 345
351 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); 346 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
@@ -373,13 +368,13 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
373 skb_queue_head(&vi->recv, skb); 368 skb_queue_head(&vi->recv, skb);
374 369
375 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb); 370 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
376 if (err) { 371 if (err < 0) {
377 skb_unlink(skb, &vi->recv); 372 skb_unlink(skb, &vi->recv);
378 kfree_skb(skb); 373 kfree_skb(skb);
379 break; 374 break;
380 } 375 }
381 vi->num++; 376 vi->num++;
382 } 377 } while (err > 0);
383 if (unlikely(vi->num > vi->max)) 378 if (unlikely(vi->num > vi->max))
384 vi->max = vi->num; 379 vi->max = vi->num;
385 vi->rvq->vq_ops->kick(vi->rvq); 380 vi->rvq->vq_ops->kick(vi->rvq);
@@ -447,42 +442,26 @@ again:
447 return received; 442 return received;
448} 443}
449 444
450static void free_old_xmit_skbs(struct virtnet_info *vi) 445static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
451{ 446{
452 struct sk_buff *skb; 447 struct sk_buff *skb;
453 unsigned int len; 448 unsigned int len, tot_sgs = 0;
454 449
455 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { 450 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
456 pr_debug("Sent skb %p\n", skb); 451 pr_debug("Sent skb %p\n", skb);
457 __skb_unlink(skb, &vi->send); 452 __skb_unlink(skb, &vi->send);
458 vi->dev->stats.tx_bytes += skb->len; 453 vi->dev->stats.tx_bytes += skb->len;
459 vi->dev->stats.tx_packets++; 454 vi->dev->stats.tx_packets++;
460 kfree_skb(skb); 455 tot_sgs += skb_vnet_hdr(skb)->num_sg;
456 dev_kfree_skb_any(skb);
461 } 457 }
462} 458 return tot_sgs;
463
464/* If the virtio transport doesn't always notify us when all in-flight packets
465 * are consumed, we fall back to using this function on a timer to free them. */
466static void xmit_free(unsigned long data)
467{
468 struct virtnet_info *vi = (void *)data;
469
470 netif_tx_lock(vi->dev);
471
472 free_old_xmit_skbs(vi);
473
474 if (!skb_queue_empty(&vi->send))
475 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
476
477 netif_tx_unlock(vi->dev);
478} 459}
479 460
480static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) 461static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
481{ 462{
482 int num, err;
483 struct scatterlist sg[2+MAX_SKB_FRAGS]; 463 struct scatterlist sg[2+MAX_SKB_FRAGS];
484 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb); 464 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
485 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
486 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 465 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
487 466
488 sg_init_table(sg, 2+MAX_SKB_FRAGS); 467 sg_init_table(sg, 2+MAX_SKB_FRAGS);
@@ -490,108 +469,97 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
490 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 469 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
491 470
492 if (skb->ip_summed == CHECKSUM_PARTIAL) { 471 if (skb->ip_summed == CHECKSUM_PARTIAL) {
493 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 472 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
494 hdr->csum_start = skb->csum_start - skb_headroom(skb); 473 hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
495 hdr->csum_offset = skb->csum_offset; 474 hdr->hdr.csum_offset = skb->csum_offset;
496 } else { 475 } else {
497 hdr->flags = 0; 476 hdr->hdr.flags = 0;
498 hdr->csum_offset = hdr->csum_start = 0; 477 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
499 } 478 }
500 479
501 if (skb_is_gso(skb)) { 480 if (skb_is_gso(skb)) {
502 hdr->hdr_len = skb_headlen(skb); 481 hdr->hdr.hdr_len = skb_headlen(skb);
503 hdr->gso_size = skb_shinfo(skb)->gso_size; 482 hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
504 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 483 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
505 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 484 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
506 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 485 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
507 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 486 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
508 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 487 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
509 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; 488 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
510 else 489 else
511 BUG(); 490 BUG();
512 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) 491 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
513 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 492 hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
514 } else { 493 } else {
515 hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; 494 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
516 hdr->gso_size = hdr->hdr_len = 0; 495 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
517 } 496 }
518 497
519 mhdr->num_buffers = 0; 498 hdr->mhdr.num_buffers = 0;
520 499
521 /* Encode metadata header at front. */ 500 /* Encode metadata header at front. */
522 if (vi->mergeable_rx_bufs) 501 if (vi->mergeable_rx_bufs)
523 sg_set_buf(sg, mhdr, sizeof(*mhdr)); 502 sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
524 else 503 else
525 sg_set_buf(sg, hdr, sizeof(*hdr)); 504 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
526
527 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
528 505
529 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); 506 hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
530 if (!err && !vi->free_in_tasklet) 507 return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
531 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
532
533 return err;
534}
535
536static void xmit_tasklet(unsigned long data)
537{
538 struct virtnet_info *vi = (void *)data;
539
540 netif_tx_lock_bh(vi->dev);
541 if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) {
542 vi->svq->vq_ops->kick(vi->svq);
543 vi->last_xmit_skb = NULL;
544 }
545 if (vi->free_in_tasklet)
546 free_old_xmit_skbs(vi);
547 netif_tx_unlock_bh(vi->dev);
548} 508}
549 509
550static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 510static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
551{ 511{
552 struct virtnet_info *vi = netdev_priv(dev); 512 struct virtnet_info *vi = netdev_priv(dev);
513 int capacity;
553 514
554again: 515again:
555 /* Free up any pending old buffers before queueing new ones. */ 516 /* Free up any pending old buffers before queueing new ones. */
556 free_old_xmit_skbs(vi); 517 free_old_xmit_skbs(vi);
557 518
558 /* If we has a buffer left over from last time, send it now. */ 519 /* Try to transmit */
559 if (unlikely(vi->last_xmit_skb) && 520 capacity = xmit_skb(vi, skb);
560 xmit_skb(vi, vi->last_xmit_skb) != 0)
561 goto stop_queue;
562
563 vi->last_xmit_skb = NULL;
564 521
565 /* Put new one in send queue and do transmit */ 522 /* This can happen with OOM and indirect buffers. */
566 if (likely(skb)) { 523 if (unlikely(capacity < 0)) {
567 __skb_queue_head(&vi->send, skb); 524 netif_stop_queue(dev);
568 if (xmit_skb(vi, skb) != 0) { 525 dev_warn(&dev->dev, "Unexpected full queue\n");
569 vi->last_xmit_skb = skb; 526 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
570 skb = NULL; 527 vi->svq->vq_ops->disable_cb(vi->svq);
571 goto stop_queue; 528 netif_start_queue(dev);
529 goto again;
572 } 530 }
531 return NETDEV_TX_BUSY;
573 } 532 }
574done:
575 vi->svq->vq_ops->kick(vi->svq); 533 vi->svq->vq_ops->kick(vi->svq);
576 return NETDEV_TX_OK;
577
578stop_queue:
579 pr_debug("%s: virtio not prepared to send\n", dev->name);
580 netif_stop_queue(dev);
581 534
582 /* Activate callback for using skbs: if this returns false it 535 /*
583 * means some were used in the meantime. */ 536 * Put new one in send queue. You'd expect we'd need this before
584 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { 537 * xmit_skb calls add_buf(), since the callback can be triggered
585 vi->svq->vq_ops->disable_cb(vi->svq); 538 * immediately after that. But since the callback just triggers
586 netif_start_queue(dev); 539 * another call back here, normal network xmit locking prevents the
587 goto again; 540 * race.
588 } 541 */
589 if (skb) { 542 __skb_queue_head(&vi->send, skb);
590 /* Drop this skb: we only queue one. */ 543
591 vi->dev->stats.tx_dropped++; 544 /* Don't wait up for transmitted skbs to be freed. */
592 kfree_skb(skb); 545 skb_orphan(skb);
546 nf_reset(skb);
547
548 /* Apparently nice girls don't return TX_BUSY; stop the queue
549 * before it gets out of hand. Naturally, this wastes entries. */
550 if (capacity < 2+MAX_SKB_FRAGS) {
551 netif_stop_queue(dev);
552 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
553 /* More just got used, free them then recheck. */
554 capacity += free_old_xmit_skbs(vi);
555 if (capacity >= 2+MAX_SKB_FRAGS) {
556 netif_start_queue(dev);
557 vi->svq->vq_ops->disable_cb(vi->svq);
558 }
559 }
593 } 560 }
594 goto done; 561
562 return NETDEV_TX_OK;
595} 563}
596 564
597static int virtnet_set_mac_address(struct net_device *dev, void *p) 565static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -668,7 +636,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
668 sg_set_buf(&sg[i + 1], sg_virt(s), s->length); 636 sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
669 sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); 637 sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
670 638
671 BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi)); 639 BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
672 640
673 vi->cvq->vq_ops->kick(vi->cvq); 641 vi->cvq->vq_ops->kick(vi->cvq);
674 642
@@ -924,10 +892,6 @@ static int virtnet_probe(struct virtio_device *vdev)
924 vi->pages = NULL; 892 vi->pages = NULL;
925 INIT_DELAYED_WORK(&vi->refill, refill_work); 893 INIT_DELAYED_WORK(&vi->refill, refill_work);
926 894
927 /* If they give us a callback when all buffers are done, we don't need
928 * the timer. */
929 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
930
931 /* If we can receive ANY GSO packets, we must allocate large ones. */ 895 /* If we can receive ANY GSO packets, we must allocate large ones. */
932 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) 896 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
933 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) 897 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
@@ -959,11 +923,6 @@ static int virtnet_probe(struct virtio_device *vdev)
959 skb_queue_head_init(&vi->recv); 923 skb_queue_head_init(&vi->recv);
960 skb_queue_head_init(&vi->send); 924 skb_queue_head_init(&vi->send);
961 925
962 tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
963
964 if (!vi->free_in_tasklet)
965 setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi);
966
967 err = register_netdev(dev); 926 err = register_netdev(dev);
968 if (err) { 927 if (err) {
969 pr_debug("virtio_net: registering device failed\n"); 928 pr_debug("virtio_net: registering device failed\n");
@@ -996,7 +955,7 @@ free:
996 return err; 955 return err;
997} 956}
998 957
999static void virtnet_remove(struct virtio_device *vdev) 958static void __devexit virtnet_remove(struct virtio_device *vdev)
1000{ 959{
1001 struct virtnet_info *vi = vdev->priv; 960 struct virtnet_info *vi = vdev->priv;
1002 struct sk_buff *skb; 961 struct sk_buff *skb;
@@ -1004,9 +963,6 @@ static void virtnet_remove(struct virtio_device *vdev)
1004 /* Stop all the virtqueues. */ 963 /* Stop all the virtqueues. */
1005 vdev->config->reset(vdev); 964 vdev->config->reset(vdev);
1006 965
1007 if (!vi->free_in_tasklet)
1008 del_timer_sync(&vi->xmit_free_timer);
1009
1010 /* Free our skbs in send and recv queues, if any. */ 966 /* Free our skbs in send and recv queues, if any. */
1011 while ((skb = __skb_dequeue(&vi->recv)) != NULL) { 967 while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
1012 kfree_skb(skb); 968 kfree_skb(skb);
@@ -1040,7 +996,6 @@ static unsigned int features[] = {
1040 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, 996 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1041 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 997 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1042 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 998 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1043 VIRTIO_F_NOTIFY_ON_EMPTY,
1044}; 999};
1045 1000
1046static struct virtio_driver virtio_net = { 1001static struct virtio_driver virtio_net = {
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
new file mode 100644
index 000000000000..880f5098eac9
--- /dev/null
+++ b/drivers/net/vmxnet3/Makefile
@@ -0,0 +1,35 @@
1################################################################################
2#
3# Linux driver for VMware's vmxnet3 ethernet NIC.
4#
5# Copyright (C) 2007-2009, VMware, Inc. All Rights Reserved.
6#
7# This program is free software; you can redistribute it and/or modify it
8# under the terms of the GNU General Public License as published by the
9# Free Software Foundation; version 2 of the License and no later version.
10#
11# This program is distributed in the hope that it will be useful, but
12# WITHOUT ANY WARRANTY; without even the implied warranty of
13# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14# NON INFRINGEMENT. See the GNU General Public License for more
15# details.
16#
17# You should have received a copy of the GNU General Public License
18# along with this program; if not, write to the Free Software
19# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20#
21# The full GNU General Public License is included in this distribution in
22# the file called "COPYING".
23#
24# Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
25#
26#
27################################################################################
28
29#
30# Makefile for the VMware vmxnet3 ethernet NIC driver
31#
32
33obj-$(CONFIG_VMXNET3) += vmxnet3.o
34
35vmxnet3-objs := vmxnet3_drv.o vmxnet3_ethtool.o
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
new file mode 100644
index 000000000000..37108fb226d3
--- /dev/null
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -0,0 +1,96 @@
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
27#ifndef _UPT1_DEFS_H
28#define _UPT1_DEFS_H
29
30struct UPT1_TxStats {
31 u64 TSOPktsTxOK; /* TSO pkts post-segmentation */
32 u64 TSOBytesTxOK;
33 u64 ucastPktsTxOK;
34 u64 ucastBytesTxOK;
35 u64 mcastPktsTxOK;
36 u64 mcastBytesTxOK;
37 u64 bcastPktsTxOK;
38 u64 bcastBytesTxOK;
39 u64 pktsTxError;
40 u64 pktsTxDiscard;
41};
42
43struct UPT1_RxStats {
44 u64 LROPktsRxOK; /* LRO pkts */
45 u64 LROBytesRxOK; /* bytes from LRO pkts */
46 /* the following counters are for pkts from the wire, i.e., pre-LRO */
47 u64 ucastPktsRxOK;
48 u64 ucastBytesRxOK;
49 u64 mcastPktsRxOK;
50 u64 mcastBytesRxOK;
51 u64 bcastPktsRxOK;
52 u64 bcastBytesRxOK;
53 u64 pktsRxOutOfBuf;
54 u64 pktsRxError;
55};
56
57/* interrupt moderation level */
58enum {
59 UPT1_IML_NONE = 0, /* no interrupt moderation */
60 UPT1_IML_HIGHEST = 7, /* least intr generated */
61 UPT1_IML_ADAPTIVE = 8, /* adpative intr moderation */
62};
63/* values for UPT1_RSSConf.hashFunc */
64enum {
65 UPT1_RSS_HASH_TYPE_NONE = 0x0,
66 UPT1_RSS_HASH_TYPE_IPV4 = 0x01,
67 UPT1_RSS_HASH_TYPE_TCP_IPV4 = 0x02,
68 UPT1_RSS_HASH_TYPE_IPV6 = 0x04,
69 UPT1_RSS_HASH_TYPE_TCP_IPV6 = 0x08,
70};
71
72enum {
73 UPT1_RSS_HASH_FUNC_NONE = 0x0,
74 UPT1_RSS_HASH_FUNC_TOEPLITZ = 0x01,
75};
76
77#define UPT1_RSS_MAX_KEY_SIZE 40
78#define UPT1_RSS_MAX_IND_TABLE_SIZE 128
79
80struct UPT1_RSSConf {
81 u16 hashType;
82 u16 hashFunc;
83 u16 hashKeySize;
84 u16 indTableSize;
85 u8 hashKey[UPT1_RSS_MAX_KEY_SIZE];
86 u8 indTable[UPT1_RSS_MAX_IND_TABLE_SIZE];
87};
88
89/* features */
90enum {
91 UPT1_F_RXCSUM = 0x0001, /* rx csum verification */
92 UPT1_F_RSS = 0x0002,
93 UPT1_F_RXVLAN = 0x0004, /* VLAN tag stripping */
94 UPT1_F_LRO = 0x0008,
95};
96#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
new file mode 100644
index 000000000000..dc8ee4438a4f
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -0,0 +1,535 @@
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
27#ifndef _VMXNET3_DEFS_H_
28#define _VMXNET3_DEFS_H_
29
30#include "upt1_defs.h"
31
32/* all registers are 32 bit wide */
33/* BAR 1 */
34enum {
35 VMXNET3_REG_VRRS = 0x0, /* Vmxnet3 Revision Report Selection */
36 VMXNET3_REG_UVRS = 0x8, /* UPT Version Report Selection */
37 VMXNET3_REG_DSAL = 0x10, /* Driver Shared Address Low */
38 VMXNET3_REG_DSAH = 0x18, /* Driver Shared Address High */
39 VMXNET3_REG_CMD = 0x20, /* Command */
40 VMXNET3_REG_MACL = 0x28, /* MAC Address Low */
41 VMXNET3_REG_MACH = 0x30, /* MAC Address High */
42 VMXNET3_REG_ICR = 0x38, /* Interrupt Cause Register */
43 VMXNET3_REG_ECR = 0x40 /* Event Cause Register */
44};
45
46/* BAR 0 */
47enum {
48 VMXNET3_REG_IMR = 0x0, /* Interrupt Mask Register */
49 VMXNET3_REG_TXPROD = 0x600, /* Tx Producer Index */
50 VMXNET3_REG_RXPROD = 0x800, /* Rx Producer Index for ring 1 */
51 VMXNET3_REG_RXPROD2 = 0xA00 /* Rx Producer Index for ring 2 */
52};
53
54#define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */
55#define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */
56
57#define VMXNET3_REG_ALIGN 8 /* All registers are 8-byte aligned. */
58#define VMXNET3_REG_ALIGN_MASK 0x7
59
60/* I/O Mapped access to registers */
61#define VMXNET3_IO_TYPE_PT 0
62#define VMXNET3_IO_TYPE_VD 1
63#define VMXNET3_IO_ADDR(type, reg) (((type) << 24) | ((reg) & 0xFFFFFF))
64#define VMXNET3_IO_TYPE(addr) ((addr) >> 24)
65#define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF)
66
67enum {
68 VMXNET3_CMD_FIRST_SET = 0xCAFE0000,
69 VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET,
70 VMXNET3_CMD_QUIESCE_DEV,
71 VMXNET3_CMD_RESET_DEV,
72 VMXNET3_CMD_UPDATE_RX_MODE,
73 VMXNET3_CMD_UPDATE_MAC_FILTERS,
74 VMXNET3_CMD_UPDATE_VLAN_FILTERS,
75 VMXNET3_CMD_UPDATE_RSSIDT,
76 VMXNET3_CMD_UPDATE_IML,
77 VMXNET3_CMD_UPDATE_PMCFG,
78 VMXNET3_CMD_UPDATE_FEATURE,
79 VMXNET3_CMD_LOAD_PLUGIN,
80
81 VMXNET3_CMD_FIRST_GET = 0xF00D0000,
82 VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
83 VMXNET3_CMD_GET_STATS,
84 VMXNET3_CMD_GET_LINK,
85 VMXNET3_CMD_GET_PERM_MAC_LO,
86 VMXNET3_CMD_GET_PERM_MAC_HI,
87 VMXNET3_CMD_GET_DID_LO,
88 VMXNET3_CMD_GET_DID_HI,
89 VMXNET3_CMD_GET_DEV_EXTRA_INFO,
90 VMXNET3_CMD_GET_CONF_INTR
91};
92
93struct Vmxnet3_TxDesc {
94 u64 addr;
95
96 u32 len:14;
97 u32 gen:1; /* generation bit */
98 u32 rsvd:1;
99 u32 dtype:1; /* descriptor type */
100 u32 ext1:1;
101 u32 msscof:14; /* MSS, checksum offset, flags */
102
103 u32 hlen:10; /* header len */
104 u32 om:2; /* offload mode */
105 u32 eop:1; /* End Of Packet */
106 u32 cq:1; /* completion request */
107 u32 ext2:1;
108 u32 ti:1; /* VLAN Tag Insertion */
109 u32 tci:16; /* Tag to Insert */
110};
111
112/* TxDesc.OM values */
113#define VMXNET3_OM_NONE 0
114#define VMXNET3_OM_CSUM 2
115#define VMXNET3_OM_TSO 3
116
117/* fields in TxDesc we access w/o using bit fields */
118#define VMXNET3_TXD_EOP_SHIFT 12
119#define VMXNET3_TXD_CQ_SHIFT 13
120#define VMXNET3_TXD_GEN_SHIFT 14
121
122#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT)
123#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT)
124#define VMXNET3_TXD_GEN (1 << VMXNET3_TXD_GEN_SHIFT)
125
126#define VMXNET3_HDR_COPY_SIZE 128
127
128
129struct Vmxnet3_TxDataDesc {
130 u8 data[VMXNET3_HDR_COPY_SIZE];
131};
132
133
134struct Vmxnet3_TxCompDesc {
135 u32 txdIdx:12; /* Index of the EOP TxDesc */
136 u32 ext1:20;
137
138 u32 ext2;
139 u32 ext3;
140
141 u32 rsvd:24;
142 u32 type:7; /* completion type */
143 u32 gen:1; /* generation bit */
144};
145
146
147struct Vmxnet3_RxDesc {
148 u64 addr;
149
150 u32 len:14;
151 u32 btype:1; /* Buffer Type */
152 u32 dtype:1; /* Descriptor type */
153 u32 rsvd:15;
154 u32 gen:1; /* Generation bit */
155
156 u32 ext1;
157};
158
159/* values of RXD.BTYPE */
160#define VMXNET3_RXD_BTYPE_HEAD 0 /* head only */
161#define VMXNET3_RXD_BTYPE_BODY 1 /* body only */
162
163/* fields in RxDesc we access w/o using bit fields */
164#define VMXNET3_RXD_BTYPE_SHIFT 14
165#define VMXNET3_RXD_GEN_SHIFT 31
166
167
168struct Vmxnet3_RxCompDesc {
169 u32 rxdIdx:12; /* Index of the RxDesc */
170 u32 ext1:2;
171 u32 eop:1; /* End of Packet */
172 u32 sop:1; /* Start of Packet */
173 u32 rqID:10; /* rx queue/ring ID */
174 u32 rssType:4; /* RSS hash type used */
175 u32 cnc:1; /* Checksum Not Calculated */
176 u32 ext2:1;
177
178 u32 rssHash; /* RSS hash value */
179
180 u32 len:14; /* data length */
181 u32 err:1; /* Error */
182 u32 ts:1; /* Tag is stripped */
183 u32 tci:16; /* Tag stripped */
184
185 u32 csum:16;
186 u32 tuc:1; /* TCP/UDP Checksum Correct */
187 u32 udp:1; /* UDP packet */
188 u32 tcp:1; /* TCP packet */
189 u32 ipc:1; /* IP Checksum Correct */
190 u32 v6:1; /* IPv6 */
191 u32 v4:1; /* IPv4 */
192 u32 frg:1; /* IP Fragment */
193 u32 fcs:1; /* Frame CRC correct */
194 u32 type:7; /* completion type */
195 u32 gen:1; /* generation bit */
196};
197
198/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */
199#define VMXNET3_RCD_TUC_SHIFT 16
200#define VMXNET3_RCD_IPC_SHIFT 19
201
202/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.qword[1] */
203#define VMXNET3_RCD_TYPE_SHIFT 56
204#define VMXNET3_RCD_GEN_SHIFT 63
205
206/* csum OK for TCP/UDP pkts over IP */
207#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \
208 1 << VMXNET3_RCD_IPC_SHIFT)
209
210/* value of RxCompDesc.rssType */
211enum {
212 VMXNET3_RCD_RSS_TYPE_NONE = 0,
213 VMXNET3_RCD_RSS_TYPE_IPV4 = 1,
214 VMXNET3_RCD_RSS_TYPE_TCPIPV4 = 2,
215 VMXNET3_RCD_RSS_TYPE_IPV6 = 3,
216 VMXNET3_RCD_RSS_TYPE_TCPIPV6 = 4,
217};
218
219
220/* a union for accessing all cmd/completion descriptors */
221union Vmxnet3_GenericDesc {
222 u64 qword[2];
223 u32 dword[4];
224 u16 word[8];
225 struct Vmxnet3_TxDesc txd;
226 struct Vmxnet3_RxDesc rxd;
227 struct Vmxnet3_TxCompDesc tcd;
228 struct Vmxnet3_RxCompDesc rcd;
229};
230
231#define VMXNET3_INIT_GEN 1
232
233/* Max size of a single tx buffer */
234#define VMXNET3_MAX_TX_BUF_SIZE (1 << 14)
235
236/* # of tx desc needed for a tx buffer size */
237#define VMXNET3_TXD_NEEDED(size) (((size) + VMXNET3_MAX_TX_BUF_SIZE - 1) / \
238 VMXNET3_MAX_TX_BUF_SIZE)
239
240/* max # of tx descs for a non-tso pkt */
241#define VMXNET3_MAX_TXD_PER_PKT 16
242
243/* Max size of a single rx buffer */
244#define VMXNET3_MAX_RX_BUF_SIZE ((1 << 14) - 1)
245/* Minimum size of a type 0 buffer */
246#define VMXNET3_MIN_T0_BUF_SIZE 128
247#define VMXNET3_MAX_CSUM_OFFSET 1024
248
249/* Ring base address alignment */
250#define VMXNET3_RING_BA_ALIGN 512
251#define VMXNET3_RING_BA_MASK (VMXNET3_RING_BA_ALIGN - 1)
252
253/* Ring size must be a multiple of 32 */
254#define VMXNET3_RING_SIZE_ALIGN 32
255#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1)
256
257/* Max ring size */
258#define VMXNET3_TX_RING_MAX_SIZE 4096
259#define VMXNET3_TC_RING_MAX_SIZE 4096
260#define VMXNET3_RX_RING_MAX_SIZE 4096
261#define VMXNET3_RC_RING_MAX_SIZE 8192
262
263/* a list of reasons for queue stop */
264
265enum {
266 VMXNET3_ERR_NOEOP = 0x80000000, /* cannot find the EOP desc of a pkt */
267 VMXNET3_ERR_TXD_REUSE = 0x80000001, /* reuse TxDesc before tx completion */
268 VMXNET3_ERR_BIG_PKT = 0x80000002, /* too many TxDesc for a pkt */
269 VMXNET3_ERR_DESC_NOT_SPT = 0x80000003, /* descriptor type not supported */
270 VMXNET3_ERR_SMALL_BUF = 0x80000004, /* type 0 buffer too small */
271 VMXNET3_ERR_STRESS = 0x80000005, /* stress option firing in vmkernel */
272 VMXNET3_ERR_SWITCH = 0x80000006, /* mode switch failure */
273 VMXNET3_ERR_TXD_INVALID = 0x80000007, /* invalid TxDesc */
274};
275
276/* completion descriptor types */
277#define VMXNET3_CDTYPE_TXCOMP 0 /* Tx Completion Descriptor */
278#define VMXNET3_CDTYPE_RXCOMP 3 /* Rx Completion Descriptor */
279
280enum {
281 VMXNET3_GOS_BITS_UNK = 0, /* unknown */
282 VMXNET3_GOS_BITS_32 = 1,
283 VMXNET3_GOS_BITS_64 = 2,
284};
285
286#define VMXNET3_GOS_TYPE_LINUX 1
287
288
289struct Vmxnet3_GOSInfo {
290 u32 gosBits:2; /* 32-bit or 64-bit? */
291 u32 gosType:4; /* which guest */
292 u32 gosVer:16; /* gos version */
293 u32 gosMisc:10; /* other info about gos */
294};
295
296
297struct Vmxnet3_DriverInfo {
298 u32 version;
299 struct Vmxnet3_GOSInfo gos;
300 u32 vmxnet3RevSpt;
301 u32 uptVerSpt;
302};
303
304
305#define VMXNET3_REV1_MAGIC 0xbabefee1
306
307/*
308 * QueueDescPA must be 128 bytes aligned. It points to an array of
309 * Vmxnet3_TxQueueDesc followed by an array of Vmxnet3_RxQueueDesc.
310 * The number of Vmxnet3_TxQueueDesc/Vmxnet3_RxQueueDesc are specified by
311 * Vmxnet3_MiscConf.numTxQueues/numRxQueues, respectively.
312 */
313#define VMXNET3_QUEUE_DESC_ALIGN 128
314
315
316struct Vmxnet3_MiscConf {
317 struct Vmxnet3_DriverInfo driverInfo;
318 u64 uptFeatures;
319 u64 ddPA; /* driver data PA */
320 u64 queueDescPA; /* queue descriptor table PA */
321 u32 ddLen; /* driver data len */
322 u32 queueDescLen; /* queue desc. table len in bytes */
323 u32 mtu;
324 u16 maxNumRxSG;
325 u8 numTxQueues;
326 u8 numRxQueues;
327 u32 reserved[4];
328};
329
330
331struct Vmxnet3_TxQueueConf {
332 u64 txRingBasePA;
333 u64 dataRingBasePA;
334 u64 compRingBasePA;
335 u64 ddPA; /* driver data */
336 u64 reserved;
337 u32 txRingSize; /* # of tx desc */
338 u32 dataRingSize; /* # of data desc */
339 u32 compRingSize; /* # of comp desc */
340 u32 ddLen; /* size of driver data */
341 u8 intrIdx;
342 u8 _pad[7];
343};
344
345
346struct Vmxnet3_RxQueueConf {
347 u64 rxRingBasePA[2];
348 u64 compRingBasePA;
349 u64 ddPA; /* driver data */
350 u64 reserved;
351 u32 rxRingSize[2]; /* # of rx desc */
352 u32 compRingSize; /* # of rx comp desc */
353 u32 ddLen; /* size of driver data */
354 u8 intrIdx;
355 u8 _pad[7];
356};
357
358
359enum vmxnet3_intr_mask_mode {
360 VMXNET3_IMM_AUTO = 0,
361 VMXNET3_IMM_ACTIVE = 1,
362 VMXNET3_IMM_LAZY = 2
363};
364
365enum vmxnet3_intr_type {
366 VMXNET3_IT_AUTO = 0,
367 VMXNET3_IT_INTX = 1,
368 VMXNET3_IT_MSI = 2,
369 VMXNET3_IT_MSIX = 3
370};
371
372#define VMXNET3_MAX_TX_QUEUES 8
373#define VMXNET3_MAX_RX_QUEUES 16
374/* addition 1 for events */
375#define VMXNET3_MAX_INTRS 25
376
377
378struct Vmxnet3_IntrConf {
379 bool autoMask;
380 u8 numIntrs; /* # of interrupts */
381 u8 eventIntrIdx;
382 u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for
383 * each intr */
384 u32 reserved[3];
385};
386
387/* one bit per VLAN ID, the size is in the units of u32 */
388#define VMXNET3_VFT_SIZE (4096 / (sizeof(u32) * 8))
389
390
391struct Vmxnet3_QueueStatus {
392 bool stopped;
393 u8 _pad[3];
394 u32 error;
395};
396
397
398struct Vmxnet3_TxQueueCtrl {
399 u32 txNumDeferred;
400 u32 txThreshold;
401 u64 reserved;
402};
403
404
405struct Vmxnet3_RxQueueCtrl {
406 bool updateRxProd;
407 u8 _pad[7];
408 u64 reserved;
409};
410
411enum {
412 VMXNET3_RXM_UCAST = 0x01, /* unicast only */
413 VMXNET3_RXM_MCAST = 0x02, /* multicast passing the filters */
414 VMXNET3_RXM_BCAST = 0x04, /* broadcast only */
415 VMXNET3_RXM_ALL_MULTI = 0x08, /* all multicast */
416 VMXNET3_RXM_PROMISC = 0x10 /* promiscuous */
417};
418
419struct Vmxnet3_RxFilterConf {
420 u32 rxMode; /* VMXNET3_RXM_xxx */
421 u16 mfTableLen; /* size of the multicast filter table */
422 u16 _pad1;
423 u64 mfTablePA; /* PA of the multicast filters table */
424 u32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */
425};
426
427
428#define VMXNET3_PM_MAX_FILTERS 6
429#define VMXNET3_PM_MAX_PATTERN_SIZE 128
430#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
431
432#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */
433#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching
434 * filters */
435
436
437struct Vmxnet3_PM_PktFilter {
438 u8 maskSize;
439 u8 patternSize;
440 u8 mask[VMXNET3_PM_MAX_MASK_SIZE];
441 u8 pattern[VMXNET3_PM_MAX_PATTERN_SIZE];
442 u8 pad[6];
443};
444
445
446struct Vmxnet3_PMConf {
447 u16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */
448 u8 numFilters;
449 u8 pad[5];
450 struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS];
451};
452
453
454struct Vmxnet3_VariableLenConfDesc {
455 u32 confVer;
456 u32 confLen;
457 u64 confPA;
458};
459
460
461struct Vmxnet3_TxQueueDesc {
462 struct Vmxnet3_TxQueueCtrl ctrl;
463 struct Vmxnet3_TxQueueConf conf;
464
465 /* Driver read after a GET command */
466 struct Vmxnet3_QueueStatus status;
467 struct UPT1_TxStats stats;
468 u8 _pad[88]; /* 128 aligned */
469};
470
471
472struct Vmxnet3_RxQueueDesc {
473 struct Vmxnet3_RxQueueCtrl ctrl;
474 struct Vmxnet3_RxQueueConf conf;
475 /* Driver read after a GET commad */
476 struct Vmxnet3_QueueStatus status;
477 struct UPT1_RxStats stats;
478 u8 __pad[88]; /* 128 aligned */
479};
480
481
482struct Vmxnet3_DSDevRead {
483 /* read-only region for device, read by dev in response to a SET cmd */
484 struct Vmxnet3_MiscConf misc;
485 struct Vmxnet3_IntrConf intrConf;
486 struct Vmxnet3_RxFilterConf rxFilterConf;
487 struct Vmxnet3_VariableLenConfDesc rssConfDesc;
488 struct Vmxnet3_VariableLenConfDesc pmConfDesc;
489 struct Vmxnet3_VariableLenConfDesc pluginConfDesc;
490};
491
492/* All structures in DriverShared are padded to multiples of 8 bytes */
493struct Vmxnet3_DriverShared {
494 u32 magic;
495 /* make devRead start at 64bit boundaries */
496 u32 pad;
497 struct Vmxnet3_DSDevRead devRead;
498 u32 ecr;
499 u32 reserved[5];
500};
501
502
503#define VMXNET3_ECR_RQERR (1 << 0)
504#define VMXNET3_ECR_TQERR (1 << 1)
505#define VMXNET3_ECR_LINK (1 << 2)
506#define VMXNET3_ECR_DIC (1 << 3)
507#define VMXNET3_ECR_DEBUG (1 << 4)
508
509/* flip the gen bit of a ring */
510#define VMXNET3_FLIP_RING_GEN(gen) ((gen) = (gen) ^ 0x1)
511
512/* only use this if moving the idx won't affect the gen bit */
513#define VMXNET3_INC_RING_IDX_ONLY(idx, ring_size) \
514 do {\
515 (idx)++;\
516 if (unlikely((idx) == (ring_size))) {\
517 (idx) = 0;\
518 } \
519 } while (0)
520
521#define VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid) \
522 (vfTable[vid >> 5] |= (1 << (vid & 31)))
523#define VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid) \
524 (vfTable[vid >> 5] &= ~(1 << (vid & 31)))
525
526#define VMXNET3_VFTABLE_ENTRY_IS_SET(vfTable, vid) \
527 ((vfTable[vid >> 5] & (1 << (vid & 31))) != 0)
528
529#define VMXNET3_MAX_MTU 9000
530#define VMXNET3_MIN_MTU 60
531
532#define VMXNET3_LINK_UP (10000 << 16 | 1) /* 10 Gbps, up */
533#define VMXNET3_LINK_DOWN 0
534
535#endif /* _VMXNET3_DEFS_H_ */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
new file mode 100644
index 000000000000..004353a46af0
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -0,0 +1,2574 @@
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
27#include "vmxnet3_int.h"
28
29char vmxnet3_driver_name[] = "vmxnet3";
30#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
31
32
33/*
34 * PCI Device ID Table
35 * Last entry must be all 0s
36 */
37static const struct pci_device_id vmxnet3_pciid_table[] = {
38 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
39 {0}
40};
41
42MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
43
44static atomic_t devices_found;
45
46
47/*
48 * Enable/Disable the given intr
49 */
50static void
51vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
52{
53 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
54}
55
56
57static void
58vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
59{
60 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
61}
62
63
64/*
65 * Enable/Disable all intrs used by the device
66 */
67static void
68vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
69{
70 int i;
71
72 for (i = 0; i < adapter->intr.num_intrs; i++)
73 vmxnet3_enable_intr(adapter, i);
74}
75
76
77static void
78vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
79{
80 int i;
81
82 for (i = 0; i < adapter->intr.num_intrs; i++)
83 vmxnet3_disable_intr(adapter, i);
84}
85
86
87static void
88vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
89{
90 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
91}
92
93
94static bool
95vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
96{
97 return netif_queue_stopped(adapter->netdev);
98}
99
100
101static void
102vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
103{
104 tq->stopped = false;
105 netif_start_queue(adapter->netdev);
106}
107
108
109static void
110vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
111{
112 tq->stopped = false;
113 netif_wake_queue(adapter->netdev);
114}
115
116
117static void
118vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
119{
120 tq->stopped = true;
121 tq->num_stop++;
122 netif_stop_queue(adapter->netdev);
123}
124
125
126/*
127 * Check the link state. This may start or stop the tx queue.
128 */
129static void
130vmxnet3_check_link(struct vmxnet3_adapter *adapter)
131{
132 u32 ret;
133
134 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
135 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
136 adapter->link_speed = ret >> 16;
137 if (ret & 1) { /* Link is up. */
138 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
139 adapter->netdev->name, adapter->link_speed);
140 if (!netif_carrier_ok(adapter->netdev))
141 netif_carrier_on(adapter->netdev);
142
143 vmxnet3_tq_start(&adapter->tx_queue, adapter);
144 } else {
145 printk(KERN_INFO "%s: NIC Link is Down\n",
146 adapter->netdev->name);
147 if (netif_carrier_ok(adapter->netdev))
148 netif_carrier_off(adapter->netdev);
149
150 vmxnet3_tq_stop(&adapter->tx_queue, adapter);
151 }
152}
153
154
155static void
156vmxnet3_process_events(struct vmxnet3_adapter *adapter)
157{
158 u32 events = adapter->shared->ecr;
159 if (!events)
160 return;
161
162 vmxnet3_ack_events(adapter, events);
163
164 /* Check if link state has changed */
165 if (events & VMXNET3_ECR_LINK)
166 vmxnet3_check_link(adapter);
167
168 /* Check if there is an error on xmit/recv queues */
169 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
170 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
171 VMXNET3_CMD_GET_QUEUE_STATUS);
172
173 if (adapter->tqd_start->status.stopped) {
174 printk(KERN_ERR "%s: tq error 0x%x\n",
175 adapter->netdev->name,
176 adapter->tqd_start->status.error);
177 }
178 if (adapter->rqd_start->status.stopped) {
179 printk(KERN_ERR "%s: rq error 0x%x\n",
180 adapter->netdev->name,
181 adapter->rqd_start->status.error);
182 }
183
184 schedule_work(&adapter->work);
185 }
186}
187
188
189static void
190vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
191 struct pci_dev *pdev)
192{
193 if (tbi->map_type == VMXNET3_MAP_SINGLE)
194 pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
195 PCI_DMA_TODEVICE);
196 else if (tbi->map_type == VMXNET3_MAP_PAGE)
197 pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
198 PCI_DMA_TODEVICE);
199 else
200 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
201
202 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
203}
204
205
206static int
207vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
208 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
209{
210 struct sk_buff *skb;
211 int entries = 0;
212
213 /* no out of order completion */
214 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
215 BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1);
216
217 skb = tq->buf_info[eop_idx].skb;
218 BUG_ON(skb == NULL);
219 tq->buf_info[eop_idx].skb = NULL;
220
221 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
222
223 while (tq->tx_ring.next2comp != eop_idx) {
224 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
225 pdev);
226
227 /* update next2comp w/o tx_lock. Since we are marking more,
228 * instead of less, tx ring entries avail, the worst case is
229 * that the tx routine incorrectly re-queues a pkt due to
230 * insufficient tx ring entries.
231 */
232 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
233 entries++;
234 }
235
236 dev_kfree_skb_any(skb);
237 return entries;
238}
239
240
241static int
242vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
243 struct vmxnet3_adapter *adapter)
244{
245 int completed = 0;
246 union Vmxnet3_GenericDesc *gdesc;
247
248 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
249 while (gdesc->tcd.gen == tq->comp_ring.gen) {
250 completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq,
251 adapter->pdev, adapter);
252
253 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
254 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
255 }
256
257 if (completed) {
258 spin_lock(&tq->tx_lock);
259 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
260 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
261 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
262 netif_carrier_ok(adapter->netdev))) {
263 vmxnet3_tq_wake(tq, adapter);
264 }
265 spin_unlock(&tq->tx_lock);
266 }
267 return completed;
268}
269
270
271static void
272vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
273 struct vmxnet3_adapter *adapter)
274{
275 int i;
276
277 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
278 struct vmxnet3_tx_buf_info *tbi;
279 union Vmxnet3_GenericDesc *gdesc;
280
281 tbi = tq->buf_info + tq->tx_ring.next2comp;
282 gdesc = tq->tx_ring.base + tq->tx_ring.next2comp;
283
284 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
285 if (tbi->skb) {
286 dev_kfree_skb_any(tbi->skb);
287 tbi->skb = NULL;
288 }
289 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
290 }
291
292 /* sanity check, verify all buffers are indeed unmapped and freed */
293 for (i = 0; i < tq->tx_ring.size; i++) {
294 BUG_ON(tq->buf_info[i].skb != NULL ||
295 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
296 }
297
298 tq->tx_ring.gen = VMXNET3_INIT_GEN;
299 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
300
301 tq->comp_ring.gen = VMXNET3_INIT_GEN;
302 tq->comp_ring.next2proc = 0;
303}
304
305
306void
307vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
308 struct vmxnet3_adapter *adapter)
309{
310 if (tq->tx_ring.base) {
311 pci_free_consistent(adapter->pdev, tq->tx_ring.size *
312 sizeof(struct Vmxnet3_TxDesc),
313 tq->tx_ring.base, tq->tx_ring.basePA);
314 tq->tx_ring.base = NULL;
315 }
316 if (tq->data_ring.base) {
317 pci_free_consistent(adapter->pdev, tq->data_ring.size *
318 sizeof(struct Vmxnet3_TxDataDesc),
319 tq->data_ring.base, tq->data_ring.basePA);
320 tq->data_ring.base = NULL;
321 }
322 if (tq->comp_ring.base) {
323 pci_free_consistent(adapter->pdev, tq->comp_ring.size *
324 sizeof(struct Vmxnet3_TxCompDesc),
325 tq->comp_ring.base, tq->comp_ring.basePA);
326 tq->comp_ring.base = NULL;
327 }
328 kfree(tq->buf_info);
329 tq->buf_info = NULL;
330}
331
332
333static void
334vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
335 struct vmxnet3_adapter *adapter)
336{
337 int i;
338
339 /* reset the tx ring contents to 0 and reset the tx ring states */
340 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
341 sizeof(struct Vmxnet3_TxDesc));
342 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
343 tq->tx_ring.gen = VMXNET3_INIT_GEN;
344
345 memset(tq->data_ring.base, 0, tq->data_ring.size *
346 sizeof(struct Vmxnet3_TxDataDesc));
347
348 /* reset the tx comp ring contents to 0 and reset comp ring states */
349 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
350 sizeof(struct Vmxnet3_TxCompDesc));
351 tq->comp_ring.next2proc = 0;
352 tq->comp_ring.gen = VMXNET3_INIT_GEN;
353
354 /* reset the bookkeeping data */
355 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
356 for (i = 0; i < tq->tx_ring.size; i++)
357 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
358
359 /* stats are not reset */
360}
361
362
363static int
364vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
365 struct vmxnet3_adapter *adapter)
366{
367 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
368 tq->comp_ring.base || tq->buf_info);
369
370 tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
371 * sizeof(struct Vmxnet3_TxDesc),
372 &tq->tx_ring.basePA);
373 if (!tq->tx_ring.base) {
374 printk(KERN_ERR "%s: failed to allocate tx ring\n",
375 adapter->netdev->name);
376 goto err;
377 }
378
379 tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
380 tq->data_ring.size *
381 sizeof(struct Vmxnet3_TxDataDesc),
382 &tq->data_ring.basePA);
383 if (!tq->data_ring.base) {
384 printk(KERN_ERR "%s: failed to allocate data ring\n",
385 adapter->netdev->name);
386 goto err;
387 }
388
389 tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
390 tq->comp_ring.size *
391 sizeof(struct Vmxnet3_TxCompDesc),
392 &tq->comp_ring.basePA);
393 if (!tq->comp_ring.base) {
394 printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
395 adapter->netdev->name);
396 goto err;
397 }
398
399 tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
400 GFP_KERNEL);
401 if (!tq->buf_info) {
402 printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
403 adapter->netdev->name);
404 goto err;
405 }
406
407 return 0;
408
409err:
410 vmxnet3_tq_destroy(tq, adapter);
411 return -ENOMEM;
412}
413
414
415/*
416 * starting from ring->next2fill, allocate rx buffers for the given ring
417 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
418 * are allocated or allocation fails
419 */
420
421static int
422vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
423 int num_to_alloc, struct vmxnet3_adapter *adapter)
424{
425 int num_allocated = 0;
426 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
427 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
428 u32 val;
429
430 while (num_allocated < num_to_alloc) {
431 struct vmxnet3_rx_buf_info *rbi;
432 union Vmxnet3_GenericDesc *gd;
433
434 rbi = rbi_base + ring->next2fill;
435 gd = ring->base + ring->next2fill;
436
437 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
438 if (rbi->skb == NULL) {
439 rbi->skb = dev_alloc_skb(rbi->len +
440 NET_IP_ALIGN);
441 if (unlikely(rbi->skb == NULL)) {
442 rq->stats.rx_buf_alloc_failure++;
443 break;
444 }
445 rbi->skb->dev = adapter->netdev;
446
447 skb_reserve(rbi->skb, NET_IP_ALIGN);
448 rbi->dma_addr = pci_map_single(adapter->pdev,
449 rbi->skb->data, rbi->len,
450 PCI_DMA_FROMDEVICE);
451 } else {
452 /* rx buffer skipped by the device */
453 }
454 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
455 } else {
456 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
457 rbi->len != PAGE_SIZE);
458
459 if (rbi->page == NULL) {
460 rbi->page = alloc_page(GFP_ATOMIC);
461 if (unlikely(rbi->page == NULL)) {
462 rq->stats.rx_buf_alloc_failure++;
463 break;
464 }
465 rbi->dma_addr = pci_map_page(adapter->pdev,
466 rbi->page, 0, PAGE_SIZE,
467 PCI_DMA_FROMDEVICE);
468 } else {
469 /* rx buffers skipped by the device */
470 }
471 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
472 }
473
474 BUG_ON(rbi->dma_addr == 0);
475 gd->rxd.addr = rbi->dma_addr;
476 gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val |
477 rbi->len;
478
479 num_allocated++;
480 vmxnet3_cmd_ring_adv_next2fill(ring);
481 }
482 rq->uncommitted[ring_idx] += num_allocated;
483
484 dev_dbg(&adapter->netdev->dev,
485 "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
486 "%u, uncommited %u\n", num_allocated, ring->next2fill,
487 ring->next2comp, rq->uncommitted[ring_idx]);
488
489 /* so that the device can distinguish a full ring and an empty ring */
490 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
491
492 return num_allocated;
493}
494
495
496static void
497vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
498 struct vmxnet3_rx_buf_info *rbi)
499{
500 struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
501 skb_shinfo(skb)->nr_frags;
502
503 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
504
505 frag->page = rbi->page;
506 frag->page_offset = 0;
507 frag->size = rcd->len;
508 skb->data_len += frag->size;
509 skb_shinfo(skb)->nr_frags++;
510}
511
512
513static void
514vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
515 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
516 struct vmxnet3_adapter *adapter)
517{
518 u32 dw2, len;
519 unsigned long buf_offset;
520 int i;
521 union Vmxnet3_GenericDesc *gdesc;
522 struct vmxnet3_tx_buf_info *tbi = NULL;
523
524 BUG_ON(ctx->copy_size > skb_headlen(skb));
525
526 /* use the previous gen bit for the SOP desc */
527 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
528
529 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
530 gdesc = ctx->sop_txd; /* both loops below can be skipped */
531
532 /* no need to map the buffer if headers are copied */
533 if (ctx->copy_size) {
534 ctx->sop_txd->txd.addr = tq->data_ring.basePA +
535 tq->tx_ring.next2fill *
536 sizeof(struct Vmxnet3_TxDataDesc);
537 ctx->sop_txd->dword[2] = dw2 | ctx->copy_size;
538 ctx->sop_txd->dword[3] = 0;
539
540 tbi = tq->buf_info + tq->tx_ring.next2fill;
541 tbi->map_type = VMXNET3_MAP_NONE;
542
543 dev_dbg(&adapter->netdev->dev,
544 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
545 tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
546 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
547 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
548
549 /* use the right gen for non-SOP desc */
550 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
551 }
552
553 /* linear part can use multiple tx desc if it's big */
554 len = skb_headlen(skb) - ctx->copy_size;
555 buf_offset = ctx->copy_size;
556 while (len) {
557 u32 buf_size;
558
559 buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ?
560 VMXNET3_MAX_TX_BUF_SIZE : len;
561
562 tbi = tq->buf_info + tq->tx_ring.next2fill;
563 tbi->map_type = VMXNET3_MAP_SINGLE;
564 tbi->dma_addr = pci_map_single(adapter->pdev,
565 skb->data + buf_offset, buf_size,
566 PCI_DMA_TODEVICE);
567
568 tbi->len = buf_size; /* this automatically convert 2^14 to 0 */
569
570 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
571 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
572
573 gdesc->txd.addr = tbi->dma_addr;
574 gdesc->dword[2] = dw2 | buf_size;
575 gdesc->dword[3] = 0;
576
577 dev_dbg(&adapter->netdev->dev,
578 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
579 tq->tx_ring.next2fill, gdesc->txd.addr,
580 gdesc->dword[2], gdesc->dword[3]);
581 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
582 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
583
584 len -= buf_size;
585 buf_offset += buf_size;
586 }
587
588 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
589 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
590
591 tbi = tq->buf_info + tq->tx_ring.next2fill;
592 tbi->map_type = VMXNET3_MAP_PAGE;
593 tbi->dma_addr = pci_map_page(adapter->pdev, frag->page,
594 frag->page_offset, frag->size,
595 PCI_DMA_TODEVICE);
596
597 tbi->len = frag->size;
598
599 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
600 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
601
602 gdesc->txd.addr = tbi->dma_addr;
603 gdesc->dword[2] = dw2 | frag->size;
604 gdesc->dword[3] = 0;
605
606 dev_dbg(&adapter->netdev->dev,
607 "txd[%u]: 0x%llu %u %u\n",
608 tq->tx_ring.next2fill, gdesc->txd.addr,
609 gdesc->dword[2], gdesc->dword[3]);
610 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
611 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
612 }
613
614 ctx->eop_txd = gdesc;
615
616 /* set the last buf_info for the pkt */
617 tbi->skb = skb;
618 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
619}
620
621
622/*
623 * parse and copy relevant protocol headers:
624 * For a tso pkt, relevant headers are L2/3/4 including options
625 * For a pkt requesting csum offloading, they are L2/3 and may include L4
626 * if it's a TCP/UDP pkt
627 *
628 * Returns:
629 * -1: error happens during parsing
630 * 0: protocol headers parsed, but too big to be copied
631 * 1: protocol headers parsed and copied
632 *
633 * Other effects:
634 * 1. related *ctx fields are updated.
635 * 2. ctx->copy_size is # of bytes copied
636 * 3. the portion copied is guaranteed to be in the linear part
637 *
638 */
639static int
640vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
641 struct vmxnet3_tx_ctx *ctx,
642 struct vmxnet3_adapter *adapter)
643{
644 struct Vmxnet3_TxDataDesc *tdd;
645
646 if (ctx->mss) {
647 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
648 ctx->l4_hdr_size = ((struct tcphdr *)
649 skb_transport_header(skb))->doff * 4;
650 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
651 } else {
652 unsigned int pull_size;
653
654 if (skb->ip_summed == CHECKSUM_PARTIAL) {
655 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
656
657 if (ctx->ipv4) {
658 struct iphdr *iph = (struct iphdr *)
659 skb_network_header(skb);
660 if (iph->protocol == IPPROTO_TCP) {
661 pull_size = ctx->eth_ip_hdr_size +
662 sizeof(struct tcphdr);
663
664 if (unlikely(!pskb_may_pull(skb,
665 pull_size))) {
666 goto err;
667 }
668 ctx->l4_hdr_size = ((struct tcphdr *)
669 skb_transport_header(skb))->doff * 4;
670 } else if (iph->protocol == IPPROTO_UDP) {
671 ctx->l4_hdr_size =
672 sizeof(struct udphdr);
673 } else {
674 ctx->l4_hdr_size = 0;
675 }
676 } else {
677 /* for simplicity, don't copy L4 headers */
678 ctx->l4_hdr_size = 0;
679 }
680 ctx->copy_size = ctx->eth_ip_hdr_size +
681 ctx->l4_hdr_size;
682 } else {
683 ctx->eth_ip_hdr_size = 0;
684 ctx->l4_hdr_size = 0;
685 /* copy as much as allowed */
686 ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
687 , skb_headlen(skb));
688 }
689
690 /* make sure headers are accessible directly */
691 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
692 goto err;
693 }
694
695 if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
696 tq->stats.oversized_hdr++;
697 ctx->copy_size = 0;
698 return 0;
699 }
700
701 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
702
703 memcpy(tdd->data, skb->data, ctx->copy_size);
704 dev_dbg(&adapter->netdev->dev,
705 "copy %u bytes to dataRing[%u]\n",
706 ctx->copy_size, tq->tx_ring.next2fill);
707 return 1;
708
709err:
710 return -1;
711}
712
713
714static void
715vmxnet3_prepare_tso(struct sk_buff *skb,
716 struct vmxnet3_tx_ctx *ctx)
717{
718 struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
719 if (ctx->ipv4) {
720 struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
721 iph->check = 0;
722 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
723 IPPROTO_TCP, 0);
724 } else {
725 struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
726 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
727 IPPROTO_TCP, 0);
728 }
729}
730
731
732/*
733 * Transmits a pkt thru a given tq
734 * Returns:
735 * NETDEV_TX_OK: descriptors are setup successfully
736 * NETDEV_TX_OK: error occured, the pkt is dropped
737 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
738 *
739 * Side-effects:
740 * 1. tx ring may be changed
741 * 2. tq stats may be updated accordingly
742 * 3. shared->txNumDeferred may be updated
743 */
744
745static int
746vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
747 struct vmxnet3_adapter *adapter, struct net_device *netdev)
748{
749 int ret;
750 u32 count;
751 unsigned long flags;
752 struct vmxnet3_tx_ctx ctx;
753 union Vmxnet3_GenericDesc *gdesc;
754
755 /* conservatively estimate # of descriptors to use */
756 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
757 skb_shinfo(skb)->nr_frags + 1;
758
759 ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP));
760
761 ctx.mss = skb_shinfo(skb)->gso_size;
762 if (ctx.mss) {
763 if (skb_header_cloned(skb)) {
764 if (unlikely(pskb_expand_head(skb, 0, 0,
765 GFP_ATOMIC) != 0)) {
766 tq->stats.drop_tso++;
767 goto drop_pkt;
768 }
769 tq->stats.copy_skb_header++;
770 }
771 vmxnet3_prepare_tso(skb, &ctx);
772 } else {
773 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
774
775 /* non-tso pkts must not use more than
776 * VMXNET3_MAX_TXD_PER_PKT entries
777 */
778 if (skb_linearize(skb) != 0) {
779 tq->stats.drop_too_many_frags++;
780 goto drop_pkt;
781 }
782 tq->stats.linearized++;
783
784 /* recalculate the # of descriptors to use */
785 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
786 }
787 }
788
789 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
790 if (ret >= 0) {
791 BUG_ON(ret <= 0 && ctx.copy_size != 0);
792 /* hdrs parsed, check against other limits */
793 if (ctx.mss) {
794 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
795 VMXNET3_MAX_TX_BUF_SIZE)) {
796 goto hdr_too_big;
797 }
798 } else {
799 if (skb->ip_summed == CHECKSUM_PARTIAL) {
800 if (unlikely(ctx.eth_ip_hdr_size +
801 skb->csum_offset >
802 VMXNET3_MAX_CSUM_OFFSET)) {
803 goto hdr_too_big;
804 }
805 }
806 }
807 } else {
808 tq->stats.drop_hdr_inspect_err++;
809 goto drop_pkt;
810 }
811
812 spin_lock_irqsave(&tq->tx_lock, flags);
813
814 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
815 tq->stats.tx_ring_full++;
816 dev_dbg(&adapter->netdev->dev,
817 "tx queue stopped on %s, next2comp %u"
818 " next2fill %u\n", adapter->netdev->name,
819 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
820
821 vmxnet3_tq_stop(tq, adapter);
822 spin_unlock_irqrestore(&tq->tx_lock, flags);
823 return NETDEV_TX_BUSY;
824 }
825
826 /* fill tx descs related to addr & len */
827 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
828
829 /* setup the EOP desc */
830 ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP;
831
832 /* setup the SOP desc */
833 gdesc = ctx.sop_txd;
834 if (ctx.mss) {
835 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
836 gdesc->txd.om = VMXNET3_OM_TSO;
837 gdesc->txd.msscof = ctx.mss;
838 tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen +
839 ctx.mss - 1) / ctx.mss;
840 } else {
841 if (skb->ip_summed == CHECKSUM_PARTIAL) {
842 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
843 gdesc->txd.om = VMXNET3_OM_CSUM;
844 gdesc->txd.msscof = ctx.eth_ip_hdr_size +
845 skb->csum_offset;
846 } else {
847 gdesc->txd.om = 0;
848 gdesc->txd.msscof = 0;
849 }
850 tq->shared->txNumDeferred++;
851 }
852
853 if (vlan_tx_tag_present(skb)) {
854 gdesc->txd.ti = 1;
855 gdesc->txd.tci = vlan_tx_tag_get(skb);
856 }
857
858 wmb();
859
860 /* finally flips the GEN bit of the SOP desc */
861 gdesc->dword[2] ^= VMXNET3_TXD_GEN;
862 dev_dbg(&adapter->netdev->dev,
863 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
864 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
865 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
866 gdesc->dword[3]);
867
868 spin_unlock_irqrestore(&tq->tx_lock, flags);
869
870 if (tq->shared->txNumDeferred >= tq->shared->txThreshold) {
871 tq->shared->txNumDeferred = 0;
872 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
873 tq->tx_ring.next2fill);
874 }
875 netdev->trans_start = jiffies;
876
877 return NETDEV_TX_OK;
878
879hdr_too_big:
880 tq->stats.drop_oversized_hdr++;
881drop_pkt:
882 tq->stats.drop_total++;
883 dev_kfree_skb(skb);
884 return NETDEV_TX_OK;
885}
886
887
888static netdev_tx_t
889vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
890{
891 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
892 struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
893
894 return vmxnet3_tq_xmit(skb, tq, adapter, netdev);
895}
896
897
898static void
899vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
900 struct sk_buff *skb,
901 union Vmxnet3_GenericDesc *gdesc)
902{
903 if (!gdesc->rcd.cnc && adapter->rxcsum) {
904 /* typical case: TCP/UDP over IP and both csums are correct */
905 if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) ==
906 VMXNET3_RCD_CSUM_OK) {
907 skb->ip_summed = CHECKSUM_UNNECESSARY;
908 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
909 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
910 BUG_ON(gdesc->rcd.frg);
911 } else {
912 if (gdesc->rcd.csum) {
913 skb->csum = htons(gdesc->rcd.csum);
914 skb->ip_summed = CHECKSUM_PARTIAL;
915 } else {
916 skb->ip_summed = CHECKSUM_NONE;
917 }
918 }
919 } else {
920 skb->ip_summed = CHECKSUM_NONE;
921 }
922}
923
924
925static void
926vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
927 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
928{
929 rq->stats.drop_err++;
930 if (!rcd->fcs)
931 rq->stats.drop_fcs++;
932
933 rq->stats.drop_total++;
934
935 /*
936 * We do not unmap and chain the rx buffer to the skb.
937 * We basically pretend this buffer is not used and will be recycled
938 * by vmxnet3_rq_alloc_rx_buf()
939 */
940
941 /*
942 * ctx->skb may be NULL if this is the first and the only one
943 * desc for the pkt
944 */
945 if (ctx->skb)
946 dev_kfree_skb_irq(ctx->skb);
947
948 ctx->skb = NULL;
949}
950
951
952static int
953vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
954 struct vmxnet3_adapter *adapter, int quota)
955{
956 static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
957 u32 num_rxd = 0;
958 struct Vmxnet3_RxCompDesc *rcd;
959 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
960
961 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
962 while (rcd->gen == rq->comp_ring.gen) {
963 struct vmxnet3_rx_buf_info *rbi;
964 struct sk_buff *skb;
965 int num_to_alloc;
966 struct Vmxnet3_RxDesc *rxd;
967 u32 idx, ring_idx;
968
969 if (num_rxd >= quota) {
970 /* we may stop even before we see the EOP desc of
971 * the current pkt
972 */
973 break;
974 }
975 num_rxd++;
976
977 idx = rcd->rxdIdx;
978 ring_idx = rcd->rqID == rq->qid ? 0 : 1;
979
980 rxd = &rq->rx_ring[ring_idx].base[idx].rxd;
981 rbi = rq->buf_info[ring_idx] + idx;
982
983 BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len);
984
985 if (unlikely(rcd->eop && rcd->err)) {
986 vmxnet3_rx_error(rq, rcd, ctx, adapter);
987 goto rcd_done;
988 }
989
990 if (rcd->sop) { /* first buf of the pkt */
991 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
992 rcd->rqID != rq->qid);
993
994 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
995 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
996
997 if (unlikely(rcd->len == 0)) {
998 /* Pretend the rx buffer is skipped. */
999 BUG_ON(!(rcd->sop && rcd->eop));
1000 dev_dbg(&adapter->netdev->dev,
1001 "rxRing[%u][%u] 0 length\n",
1002 ring_idx, idx);
1003 goto rcd_done;
1004 }
1005
1006 ctx->skb = rbi->skb;
1007 rbi->skb = NULL;
1008
1009 pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
1010 PCI_DMA_FROMDEVICE);
1011
1012 skb_put(ctx->skb, rcd->len);
1013 } else {
1014 BUG_ON(ctx->skb == NULL);
1015 /* non SOP buffer must be type 1 in most cases */
1016 if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
1017 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1018
1019 if (rcd->len) {
1020 pci_unmap_page(adapter->pdev,
1021 rbi->dma_addr, rbi->len,
1022 PCI_DMA_FROMDEVICE);
1023
1024 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1025 rbi->page = NULL;
1026 }
1027 } else {
1028 /*
1029 * The only time a non-SOP buffer is type 0 is
1030 * when it's EOP and error flag is raised, which
1031 * has already been handled.
1032 */
1033 BUG_ON(true);
1034 }
1035 }
1036
1037 skb = ctx->skb;
1038 if (rcd->eop) {
1039 skb->len += skb->data_len;
1040 skb->truesize += skb->data_len;
1041
1042 vmxnet3_rx_csum(adapter, skb,
1043 (union Vmxnet3_GenericDesc *)rcd);
1044 skb->protocol = eth_type_trans(skb, adapter->netdev);
1045
1046 if (unlikely(adapter->vlan_grp && rcd->ts)) {
1047 vlan_hwaccel_receive_skb(skb,
1048 adapter->vlan_grp, rcd->tci);
1049 } else {
1050 netif_receive_skb(skb);
1051 }
1052
1053 adapter->netdev->last_rx = jiffies;
1054 ctx->skb = NULL;
1055 }
1056
1057rcd_done:
1058 /* device may skip some rx descs */
1059 rq->rx_ring[ring_idx].next2comp = idx;
1060 VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
1061 rq->rx_ring[ring_idx].size);
1062
1063 /* refill rx buffers frequently to avoid starving the h/w */
1064 num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
1065 ring_idx);
1066 if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
1067 ring_idx, adapter))) {
1068 vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
1069 adapter);
1070
1071 /* if needed, update the register */
1072 if (unlikely(rq->shared->updateRxProd)) {
1073 VMXNET3_WRITE_BAR0_REG(adapter,
1074 rxprod_reg[ring_idx] + rq->qid * 8,
1075 rq->rx_ring[ring_idx].next2fill);
1076 rq->uncommitted[ring_idx] = 0;
1077 }
1078 }
1079
1080 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1081 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
1082 }
1083
1084 return num_rxd;
1085}
1086
1087
1088static void
1089vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1090 struct vmxnet3_adapter *adapter)
1091{
1092 u32 i, ring_idx;
1093 struct Vmxnet3_RxDesc *rxd;
1094
1095 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1096 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1097 rxd = &rq->rx_ring[ring_idx].base[i].rxd;
1098
1099 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1100 rq->buf_info[ring_idx][i].skb) {
1101 pci_unmap_single(adapter->pdev, rxd->addr,
1102 rxd->len, PCI_DMA_FROMDEVICE);
1103 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1104 rq->buf_info[ring_idx][i].skb = NULL;
1105 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1106 rq->buf_info[ring_idx][i].page) {
1107 pci_unmap_page(adapter->pdev, rxd->addr,
1108 rxd->len, PCI_DMA_FROMDEVICE);
1109 put_page(rq->buf_info[ring_idx][i].page);
1110 rq->buf_info[ring_idx][i].page = NULL;
1111 }
1112 }
1113
1114 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1115 rq->rx_ring[ring_idx].next2fill =
1116 rq->rx_ring[ring_idx].next2comp = 0;
1117 rq->uncommitted[ring_idx] = 0;
1118 }
1119
1120 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1121 rq->comp_ring.next2proc = 0;
1122}
1123
1124
1125void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1126 struct vmxnet3_adapter *adapter)
1127{
1128 int i;
1129 int j;
1130
1131 /* all rx buffers must have already been freed */
1132 for (i = 0; i < 2; i++) {
1133 if (rq->buf_info[i]) {
1134 for (j = 0; j < rq->rx_ring[i].size; j++)
1135 BUG_ON(rq->buf_info[i][j].page != NULL);
1136 }
1137 }
1138
1139
1140 kfree(rq->buf_info[0]);
1141
1142 for (i = 0; i < 2; i++) {
1143 if (rq->rx_ring[i].base) {
1144 pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
1145 * sizeof(struct Vmxnet3_RxDesc),
1146 rq->rx_ring[i].base,
1147 rq->rx_ring[i].basePA);
1148 rq->rx_ring[i].base = NULL;
1149 }
1150 rq->buf_info[i] = NULL;
1151 }
1152
1153 if (rq->comp_ring.base) {
1154 pci_free_consistent(adapter->pdev, rq->comp_ring.size *
1155 sizeof(struct Vmxnet3_RxCompDesc),
1156 rq->comp_ring.base, rq->comp_ring.basePA);
1157 rq->comp_ring.base = NULL;
1158 }
1159}
1160
1161
1162static int
1163vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1164 struct vmxnet3_adapter *adapter)
1165{
1166 int i;
1167
1168 /* initialize buf_info */
1169 for (i = 0; i < rq->rx_ring[0].size; i++) {
1170
1171 /* 1st buf for a pkt is skbuff */
1172 if (i % adapter->rx_buf_per_pkt == 0) {
1173 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1174 rq->buf_info[0][i].len = adapter->skb_buf_size;
1175 } else { /* subsequent bufs for a pkt is frag */
1176 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1177 rq->buf_info[0][i].len = PAGE_SIZE;
1178 }
1179 }
1180 for (i = 0; i < rq->rx_ring[1].size; i++) {
1181 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1182 rq->buf_info[1][i].len = PAGE_SIZE;
1183 }
1184
1185 /* reset internal state and allocate buffers for both rings */
1186 for (i = 0; i < 2; i++) {
1187 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1188 rq->uncommitted[i] = 0;
1189
1190 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1191 sizeof(struct Vmxnet3_RxDesc));
1192 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1193 }
1194 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1195 adapter) == 0) {
1196 /* at least has 1 rx buffer for the 1st ring */
1197 return -ENOMEM;
1198 }
1199 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1200
1201 /* reset the comp ring */
1202 rq->comp_ring.next2proc = 0;
1203 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1204 sizeof(struct Vmxnet3_RxCompDesc));
1205 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1206
1207 /* reset rxctx */
1208 rq->rx_ctx.skb = NULL;
1209
1210 /* stats are not reset */
1211 return 0;
1212}
1213
1214
1215static int
1216vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1217{
1218 int i;
1219 size_t sz;
1220 struct vmxnet3_rx_buf_info *bi;
1221
1222 for (i = 0; i < 2; i++) {
1223
1224 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1225 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
1226 &rq->rx_ring[i].basePA);
1227 if (!rq->rx_ring[i].base) {
1228 printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
1229 adapter->netdev->name, i);
1230 goto err;
1231 }
1232 }
1233
1234 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1235 rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
1236 &rq->comp_ring.basePA);
1237 if (!rq->comp_ring.base) {
1238 printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
1239 adapter->netdev->name);
1240 goto err;
1241 }
1242
1243 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1244 rq->rx_ring[1].size);
1245 bi = kmalloc(sz, GFP_KERNEL);
1246 if (!bi) {
1247 printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
1248 adapter->netdev->name);
1249 goto err;
1250 }
1251 memset(bi, 0, sz);
1252 rq->buf_info[0] = bi;
1253 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1254
1255 return 0;
1256
1257err:
1258 vmxnet3_rq_destroy(rq, adapter);
1259 return -ENOMEM;
1260}
1261
1262
1263static int
1264vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1265{
1266 if (unlikely(adapter->shared->ecr))
1267 vmxnet3_process_events(adapter);
1268
1269 vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter);
1270 return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget);
1271}
1272
1273
1274static int
1275vmxnet3_poll(struct napi_struct *napi, int budget)
1276{
1277 struct vmxnet3_adapter *adapter = container_of(napi,
1278 struct vmxnet3_adapter, napi);
1279 int rxd_done;
1280
1281 rxd_done = vmxnet3_do_poll(adapter, budget);
1282
1283 if (rxd_done < budget) {
1284 napi_complete(napi);
1285 vmxnet3_enable_intr(adapter, 0);
1286 }
1287 return rxd_done;
1288}
1289
1290
1291/* Interrupt handler for vmxnet3 */
1292static irqreturn_t
1293vmxnet3_intr(int irq, void *dev_id)
1294{
1295 struct net_device *dev = dev_id;
1296 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1297
1298 if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) {
1299 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1300 if (unlikely(icr == 0))
1301 /* not ours */
1302 return IRQ_NONE;
1303 }
1304
1305
1306 /* disable intr if needed */
1307 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1308 vmxnet3_disable_intr(adapter, 0);
1309
1310 napi_schedule(&adapter->napi);
1311
1312 return IRQ_HANDLED;
1313}
1314
1315#ifdef CONFIG_NET_POLL_CONTROLLER
1316
1317
1318/* netpoll callback. */
1319static void
1320vmxnet3_netpoll(struct net_device *netdev)
1321{
1322 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1323 int irq;
1324
1325#ifdef CONFIG_PCI_MSI
1326 if (adapter->intr.type == VMXNET3_IT_MSIX)
1327 irq = adapter->intr.msix_entries[0].vector;
1328 else
1329#endif
1330 irq = adapter->pdev->irq;
1331
1332 disable_irq(irq);
1333 vmxnet3_intr(irq, netdev);
1334 enable_irq(irq);
1335}
1336#endif
1337
1338static int
1339vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1340{
1341 int err;
1342
1343#ifdef CONFIG_PCI_MSI
1344 if (adapter->intr.type == VMXNET3_IT_MSIX) {
1345 /* we only use 1 MSI-X vector */
1346 err = request_irq(adapter->intr.msix_entries[0].vector,
1347 vmxnet3_intr, 0, adapter->netdev->name,
1348 adapter->netdev);
1349 } else
1350#endif
1351 if (adapter->intr.type == VMXNET3_IT_MSI) {
1352 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1353 adapter->netdev->name, adapter->netdev);
1354 } else {
1355 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1356 IRQF_SHARED, adapter->netdev->name,
1357 adapter->netdev);
1358 }
1359
1360 if (err)
1361 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
1362 ":%d\n", adapter->netdev->name, adapter->intr.type, err);
1363
1364
1365 if (!err) {
1366 int i;
1367 /* init our intr settings */
1368 for (i = 0; i < adapter->intr.num_intrs; i++)
1369 adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
1370
1371 /* next setup intr index for all intr sources */
1372 adapter->tx_queue.comp_ring.intr_idx = 0;
1373 adapter->rx_queue.comp_ring.intr_idx = 0;
1374 adapter->intr.event_intr_idx = 0;
1375
1376 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
1377 "allocated\n", adapter->netdev->name, adapter->intr.type,
1378 adapter->intr.mask_mode, adapter->intr.num_intrs);
1379 }
1380
1381 return err;
1382}
1383
1384
1385static void
1386vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1387{
1388 BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO ||
1389 adapter->intr.num_intrs <= 0);
1390
1391 switch (adapter->intr.type) {
1392#ifdef CONFIG_PCI_MSI
1393 case VMXNET3_IT_MSIX:
1394 {
1395 int i;
1396
1397 for (i = 0; i < adapter->intr.num_intrs; i++)
1398 free_irq(adapter->intr.msix_entries[i].vector,
1399 adapter->netdev);
1400 break;
1401 }
1402#endif
1403 case VMXNET3_IT_MSI:
1404 free_irq(adapter->pdev->irq, adapter->netdev);
1405 break;
1406 case VMXNET3_IT_INTX:
1407 free_irq(adapter->pdev->irq, adapter->netdev);
1408 break;
1409 default:
1410 BUG_ON(true);
1411 }
1412}
1413
1414
1415static void
1416vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1417{
1418 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1419 struct Vmxnet3_DriverShared *shared = adapter->shared;
1420 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1421
1422 if (grp) {
1423 /* add vlan rx stripping. */
1424 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
1425 int i;
1426 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1427 adapter->vlan_grp = grp;
1428
1429 /* update FEATURES to device */
1430 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1431 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1432 VMXNET3_CMD_UPDATE_FEATURE);
1433 /*
1434 * Clear entire vfTable; then enable untagged pkts.
1435 * Note: setting one entry in vfTable to non-zero turns
1436 * on VLAN rx filtering.
1437 */
1438 for (i = 0; i < VMXNET3_VFT_SIZE; i++)
1439 vfTable[i] = 0;
1440
1441 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1442 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1443 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1444 } else {
1445 printk(KERN_ERR "%s: vlan_rx_register when device has "
1446 "no NETIF_F_HW_VLAN_RX\n", netdev->name);
1447 }
1448 } else {
1449 /* remove vlan rx stripping. */
1450 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1451 adapter->vlan_grp = NULL;
1452
1453 if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
1454 int i;
1455
1456 for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
1457 /* clear entire vfTable; this also disables
1458 * VLAN rx filtering
1459 */
1460 vfTable[i] = 0;
1461 }
1462 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1463 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1464
1465 /* update FEATURES to device */
1466 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1467 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1468 VMXNET3_CMD_UPDATE_FEATURE);
1469 }
1470 }
1471}
1472
1473
1474static void
1475vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1476{
1477 if (adapter->vlan_grp) {
1478 u16 vid;
1479 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1480 bool activeVlan = false;
1481
1482 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1483 if (vlan_group_get_device(adapter->vlan_grp, vid)) {
1484 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1485 activeVlan = true;
1486 }
1487 }
1488 if (activeVlan) {
1489 /* continue to allow untagged pkts */
1490 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1491 }
1492 }
1493}
1494
1495
1496static void
1497vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1498{
1499 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1500 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1501
1502 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1503 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1504 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1505}
1506
1507
1508static void
1509vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1510{
1511 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1512 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1513
1514 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1515 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1516 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1517}
1518
1519
1520static u8 *
1521vmxnet3_copy_mc(struct net_device *netdev)
1522{
1523 u8 *buf = NULL;
1524 u32 sz = netdev->mc_count * ETH_ALEN;
1525
1526 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1527 if (sz <= 0xffff) {
1528 /* We may be called with BH disabled */
1529 buf = kmalloc(sz, GFP_ATOMIC);
1530 if (buf) {
1531 int i;
1532 struct dev_mc_list *mc = netdev->mc_list;
1533
1534 for (i = 0; i < netdev->mc_count; i++) {
1535 BUG_ON(!mc);
1536 memcpy(buf + i * ETH_ALEN, mc->dmi_addr,
1537 ETH_ALEN);
1538 mc = mc->next;
1539 }
1540 }
1541 }
1542 return buf;
1543}
1544
1545
1546static void
1547vmxnet3_set_mc(struct net_device *netdev)
1548{
1549 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1550 struct Vmxnet3_RxFilterConf *rxConf =
1551 &adapter->shared->devRead.rxFilterConf;
1552 u8 *new_table = NULL;
1553 u32 new_mode = VMXNET3_RXM_UCAST;
1554
1555 if (netdev->flags & IFF_PROMISC)
1556 new_mode |= VMXNET3_RXM_PROMISC;
1557
1558 if (netdev->flags & IFF_BROADCAST)
1559 new_mode |= VMXNET3_RXM_BCAST;
1560
1561 if (netdev->flags & IFF_ALLMULTI)
1562 new_mode |= VMXNET3_RXM_ALL_MULTI;
1563 else
1564 if (netdev->mc_count > 0) {
1565 new_table = vmxnet3_copy_mc(netdev);
1566 if (new_table) {
1567 new_mode |= VMXNET3_RXM_MCAST;
1568 rxConf->mfTableLen = netdev->mc_count *
1569 ETH_ALEN;
1570 rxConf->mfTablePA = virt_to_phys(new_table);
1571 } else {
1572 printk(KERN_INFO "%s: failed to copy mcast list"
1573 ", setting ALL_MULTI\n", netdev->name);
1574 new_mode |= VMXNET3_RXM_ALL_MULTI;
1575 }
1576 }
1577
1578
1579 if (!(new_mode & VMXNET3_RXM_MCAST)) {
1580 rxConf->mfTableLen = 0;
1581 rxConf->mfTablePA = 0;
1582 }
1583
1584 if (new_mode != rxConf->rxMode) {
1585 rxConf->rxMode = new_mode;
1586 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1587 VMXNET3_CMD_UPDATE_RX_MODE);
1588 }
1589
1590 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1591 VMXNET3_CMD_UPDATE_MAC_FILTERS);
1592
1593 kfree(new_table);
1594}
1595
1596
1597/*
1598 * Set up driver_shared based on settings in adapter.
1599 */
1600
1601static void
1602vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1603{
1604 struct Vmxnet3_DriverShared *shared = adapter->shared;
1605 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1606 struct Vmxnet3_TxQueueConf *tqc;
1607 struct Vmxnet3_RxQueueConf *rqc;
1608 int i;
1609
1610 memset(shared, 0, sizeof(*shared));
1611
1612 /* driver settings */
1613 shared->magic = VMXNET3_REV1_MAGIC;
1614 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
1615 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
1616 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
1617 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
1618 devRead->misc.driverInfo.vmxnet3RevSpt = 1;
1619 devRead->misc.driverInfo.uptVerSpt = 1;
1620
1621 devRead->misc.ddPA = virt_to_phys(adapter);
1622 devRead->misc.ddLen = sizeof(struct vmxnet3_adapter);
1623
1624 /* set up feature flags */
1625 if (adapter->rxcsum)
1626 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
1627
1628 if (adapter->lro) {
1629 devRead->misc.uptFeatures |= UPT1_F_LRO;
1630 devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS;
1631 }
1632 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX)
1633 && adapter->vlan_grp) {
1634 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1635 }
1636
1637 devRead->misc.mtu = adapter->netdev->mtu;
1638 devRead->misc.queueDescPA = adapter->queue_desc_pa;
1639 devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) +
1640 sizeof(struct Vmxnet3_RxQueueDesc);
1641
1642 /* tx queue settings */
1643 BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
1644
1645 devRead->misc.numTxQueues = 1;
1646 tqc = &adapter->tqd_start->conf;
1647 tqc->txRingBasePA = adapter->tx_queue.tx_ring.basePA;
1648 tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA;
1649 tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA;
1650 tqc->ddPA = virt_to_phys(adapter->tx_queue.buf_info);
1651 tqc->txRingSize = adapter->tx_queue.tx_ring.size;
1652 tqc->dataRingSize = adapter->tx_queue.data_ring.size;
1653 tqc->compRingSize = adapter->tx_queue.comp_ring.size;
1654 tqc->ddLen = sizeof(struct vmxnet3_tx_buf_info) *
1655 tqc->txRingSize;
1656 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
1657
1658 /* rx queue settings */
1659 devRead->misc.numRxQueues = 1;
1660 rqc = &adapter->rqd_start->conf;
1661 rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA;
1662 rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA;
1663 rqc->compRingBasePA = adapter->rx_queue.comp_ring.basePA;
1664 rqc->ddPA = virt_to_phys(adapter->rx_queue.buf_info);
1665 rqc->rxRingSize[0] = adapter->rx_queue.rx_ring[0].size;
1666 rqc->rxRingSize[1] = adapter->rx_queue.rx_ring[1].size;
1667 rqc->compRingSize = adapter->rx_queue.comp_ring.size;
1668 rqc->ddLen = sizeof(struct vmxnet3_rx_buf_info) *
1669 (rqc->rxRingSize[0] + rqc->rxRingSize[1]);
1670 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
1671
1672 /* intr settings */
1673 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
1674 VMXNET3_IMM_AUTO;
1675 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
1676 for (i = 0; i < adapter->intr.num_intrs; i++)
1677 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
1678
1679 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
1680
1681 /* rx filter settings */
1682 devRead->rxFilterConf.rxMode = 0;
1683 vmxnet3_restore_vlan(adapter);
1684 /* the rest are already zeroed */
1685}
1686
1687
1688int
1689vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1690{
1691 int err;
1692 u32 ret;
1693
1694 dev_dbg(&adapter->netdev->dev,
1695 "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
1696 " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
1697 adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
1698 adapter->rx_queue.rx_ring[0].size,
1699 adapter->rx_queue.rx_ring[1].size);
1700
1701 vmxnet3_tq_init(&adapter->tx_queue, adapter);
1702 err = vmxnet3_rq_init(&adapter->rx_queue, adapter);
1703 if (err) {
1704 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
1705 adapter->netdev->name, err);
1706 goto rq_err;
1707 }
1708
1709 err = vmxnet3_request_irqs(adapter);
1710 if (err) {
1711 printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
1712 adapter->netdev->name, err);
1713 goto irq_err;
1714 }
1715
1716 vmxnet3_setup_driver_shared(adapter);
1717
1718 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL,
1719 VMXNET3_GET_ADDR_LO(adapter->shared_pa));
1720 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH,
1721 VMXNET3_GET_ADDR_HI(adapter->shared_pa));
1722
1723 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1724 VMXNET3_CMD_ACTIVATE_DEV);
1725 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
1726
1727 if (ret != 0) {
1728 printk(KERN_ERR "Failed to activate dev %s: error %u\n",
1729 adapter->netdev->name, ret);
1730 err = -EINVAL;
1731 goto activate_err;
1732 }
1733 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD,
1734 adapter->rx_queue.rx_ring[0].next2fill);
1735 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2,
1736 adapter->rx_queue.rx_ring[1].next2fill);
1737
1738 /* Apply the rx filter settins last. */
1739 vmxnet3_set_mc(adapter->netdev);
1740
1741 /*
1742 * Check link state when first activating device. It will start the
1743 * tx queue if the link is up.
1744 */
1745 vmxnet3_check_link(adapter);
1746
1747 napi_enable(&adapter->napi);
1748 vmxnet3_enable_all_intrs(adapter);
1749 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
1750 return 0;
1751
1752activate_err:
1753 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
1754 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
1755 vmxnet3_free_irqs(adapter);
1756irq_err:
1757rq_err:
1758 /* free up buffers we allocated */
1759 vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
1760 return err;
1761}
1762
1763
1764void
1765vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
1766{
1767 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
1768}
1769
1770
1771int
1772vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
1773{
1774 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
1775 return 0;
1776
1777
1778 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1779 VMXNET3_CMD_QUIESCE_DEV);
1780 vmxnet3_disable_all_intrs(adapter);
1781
1782 napi_disable(&adapter->napi);
1783 netif_tx_disable(adapter->netdev);
1784 adapter->link_speed = 0;
1785 netif_carrier_off(adapter->netdev);
1786
1787 vmxnet3_tq_cleanup(&adapter->tx_queue, adapter);
1788 vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
1789 vmxnet3_free_irqs(adapter);
1790 return 0;
1791}
1792
1793
1794static void
1795vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
1796{
1797 u32 tmp;
1798
1799 tmp = *(u32 *)mac;
1800 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
1801
1802 tmp = (mac[5] << 8) | mac[4];
1803 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
1804}
1805
1806
1807static int
1808vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
1809{
1810 struct sockaddr *addr = p;
1811 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1812
1813 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1814 vmxnet3_write_mac_addr(adapter, addr->sa_data);
1815
1816 return 0;
1817}
1818
1819
1820/* ==================== initialization and cleanup routines ============ */
1821
1822static int
1823vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
1824{
1825 int err;
1826 unsigned long mmio_start, mmio_len;
1827 struct pci_dev *pdev = adapter->pdev;
1828
1829 err = pci_enable_device(pdev);
1830 if (err) {
1831 printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
1832 pci_name(pdev), err);
1833 return err;
1834 }
1835
1836 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
1837 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
1838 printk(KERN_ERR "pci_set_consistent_dma_mask failed "
1839 "for adapter %s\n", pci_name(pdev));
1840 err = -EIO;
1841 goto err_set_mask;
1842 }
1843 *dma64 = true;
1844 } else {
1845 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
1846 printk(KERN_ERR "pci_set_dma_mask failed for adapter "
1847 "%s\n", pci_name(pdev));
1848 err = -EIO;
1849 goto err_set_mask;
1850 }
1851 *dma64 = false;
1852 }
1853
1854 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
1855 vmxnet3_driver_name);
1856 if (err) {
1857 printk(KERN_ERR "Failed to request region for adapter %s: "
1858 "error %d\n", pci_name(pdev), err);
1859 goto err_set_mask;
1860 }
1861
1862 pci_set_master(pdev);
1863
1864 mmio_start = pci_resource_start(pdev, 0);
1865 mmio_len = pci_resource_len(pdev, 0);
1866 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
1867 if (!adapter->hw_addr0) {
1868 printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
1869 pci_name(pdev));
1870 err = -EIO;
1871 goto err_ioremap;
1872 }
1873
1874 mmio_start = pci_resource_start(pdev, 1);
1875 mmio_len = pci_resource_len(pdev, 1);
1876 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
1877 if (!adapter->hw_addr1) {
1878 printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
1879 pci_name(pdev));
1880 err = -EIO;
1881 goto err_bar1;
1882 }
1883 return 0;
1884
1885err_bar1:
1886 iounmap(adapter->hw_addr0);
1887err_ioremap:
1888 pci_release_selected_regions(pdev, (1 << 2) - 1);
1889err_set_mask:
1890 pci_disable_device(pdev);
1891 return err;
1892}
1893
1894
1895static void
1896vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
1897{
1898 BUG_ON(!adapter->pdev);
1899
1900 iounmap(adapter->hw_addr0);
1901 iounmap(adapter->hw_addr1);
1902 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
1903 pci_disable_device(adapter->pdev);
1904}
1905
1906
1907static void
1908vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
1909{
1910 size_t sz;
1911
1912 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
1913 VMXNET3_MAX_ETH_HDR_SIZE) {
1914 adapter->skb_buf_size = adapter->netdev->mtu +
1915 VMXNET3_MAX_ETH_HDR_SIZE;
1916 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
1917 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
1918
1919 adapter->rx_buf_per_pkt = 1;
1920 } else {
1921 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
1922 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
1923 VMXNET3_MAX_ETH_HDR_SIZE;
1924 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
1925 }
1926
1927 /*
1928 * for simplicity, force the ring0 size to be a multiple of
1929 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
1930 */
1931 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
1932 adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size +
1933 sz - 1) / sz * sz;
1934 adapter->rx_queue.rx_ring[0].size = min_t(u32,
1935 adapter->rx_queue.rx_ring[0].size,
1936 VMXNET3_RX_RING_MAX_SIZE / sz * sz);
1937}
1938
1939
1940int
1941vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
1942 u32 rx_ring_size, u32 rx_ring2_size)
1943{
1944 int err;
1945
1946 adapter->tx_queue.tx_ring.size = tx_ring_size;
1947 adapter->tx_queue.data_ring.size = tx_ring_size;
1948 adapter->tx_queue.comp_ring.size = tx_ring_size;
1949 adapter->tx_queue.shared = &adapter->tqd_start->ctrl;
1950 adapter->tx_queue.stopped = true;
1951 err = vmxnet3_tq_create(&adapter->tx_queue, adapter);
1952 if (err)
1953 return err;
1954
1955 adapter->rx_queue.rx_ring[0].size = rx_ring_size;
1956 adapter->rx_queue.rx_ring[1].size = rx_ring2_size;
1957 vmxnet3_adjust_rx_ring_size(adapter);
1958 adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size +
1959 adapter->rx_queue.rx_ring[1].size;
1960 adapter->rx_queue.qid = 0;
1961 adapter->rx_queue.qid2 = 1;
1962 adapter->rx_queue.shared = &adapter->rqd_start->ctrl;
1963 err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
1964 if (err)
1965 vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
1966
1967 return err;
1968}
1969
1970static int
1971vmxnet3_open(struct net_device *netdev)
1972{
1973 struct vmxnet3_adapter *adapter;
1974 int err;
1975
1976 adapter = netdev_priv(netdev);
1977
1978 spin_lock_init(&adapter->tx_queue.tx_lock);
1979
1980 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
1981 VMXNET3_DEF_RX_RING_SIZE,
1982 VMXNET3_DEF_RX_RING_SIZE);
1983 if (err)
1984 goto queue_err;
1985
1986 err = vmxnet3_activate_dev(adapter);
1987 if (err)
1988 goto activate_err;
1989
1990 return 0;
1991
1992activate_err:
1993 vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
1994 vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
1995queue_err:
1996 return err;
1997}
1998
1999
2000static int
2001vmxnet3_close(struct net_device *netdev)
2002{
2003 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2004
2005 /*
2006 * Reset_work may be in the middle of resetting the device, wait for its
2007 * completion.
2008 */
2009 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2010 msleep(1);
2011
2012 vmxnet3_quiesce_dev(adapter);
2013
2014 vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
2015 vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
2016
2017 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2018
2019
2020 return 0;
2021}
2022
2023
2024void
2025vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2026{
2027 /*
2028 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2029 * vmxnet3_close() will deadlock.
2030 */
2031 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2032
2033 /* we need to enable NAPI, otherwise dev_close will deadlock */
2034 napi_enable(&adapter->napi);
2035 dev_close(adapter->netdev);
2036}
2037
2038
2039static int
2040vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2041{
2042 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2043 int err = 0;
2044
2045 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2046 return -EINVAL;
2047
2048 if (new_mtu > 1500 && !adapter->jumbo_frame)
2049 return -EINVAL;
2050
2051 netdev->mtu = new_mtu;
2052
2053 /*
2054 * Reset_work may be in the middle of resetting the device, wait for its
2055 * completion.
2056 */
2057 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2058 msleep(1);
2059
2060 if (netif_running(netdev)) {
2061 vmxnet3_quiesce_dev(adapter);
2062 vmxnet3_reset_dev(adapter);
2063
2064 /* we need to re-create the rx queue based on the new mtu */
2065 vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
2066 vmxnet3_adjust_rx_ring_size(adapter);
2067 adapter->rx_queue.comp_ring.size =
2068 adapter->rx_queue.rx_ring[0].size +
2069 adapter->rx_queue.rx_ring[1].size;
2070 err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
2071 if (err) {
2072 printk(KERN_ERR "%s: failed to re-create rx queue,"
2073 " error %d. Closing it.\n", netdev->name, err);
2074 goto out;
2075 }
2076
2077 err = vmxnet3_activate_dev(adapter);
2078 if (err) {
2079 printk(KERN_ERR "%s: failed to re-activate, error %d. "
2080 "Closing it\n", netdev->name, err);
2081 goto out;
2082 }
2083 }
2084
2085out:
2086 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2087 if (err)
2088 vmxnet3_force_close(adapter);
2089
2090 return err;
2091}
2092
2093
2094static void
2095vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2096{
2097 struct net_device *netdev = adapter->netdev;
2098
2099 netdev->features = NETIF_F_SG |
2100 NETIF_F_HW_CSUM |
2101 NETIF_F_HW_VLAN_TX |
2102 NETIF_F_HW_VLAN_RX |
2103 NETIF_F_HW_VLAN_FILTER |
2104 NETIF_F_TSO |
2105 NETIF_F_TSO6 |
2106 NETIF_F_LRO;
2107
2108 printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro");
2109
2110 adapter->rxcsum = true;
2111 adapter->jumbo_frame = true;
2112 adapter->lro = true;
2113
2114 if (dma64) {
2115 netdev->features |= NETIF_F_HIGHDMA;
2116 printk(" highDMA");
2117 }
2118
2119 netdev->vlan_features = netdev->features;
2120 printk("\n");
2121}
2122
2123
2124static void
2125vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2126{
2127 u32 tmp;
2128
2129 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
2130 *(u32 *)mac = tmp;
2131
2132 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
2133 mac[4] = tmp & 0xff;
2134 mac[5] = (tmp >> 8) & 0xff;
2135}
2136
2137
2138static void
2139vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2140{
2141 u32 cfg;
2142
2143 /* intr settings */
2144 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2145 VMXNET3_CMD_GET_CONF_INTR);
2146 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2147 adapter->intr.type = cfg & 0x3;
2148 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2149
2150 if (adapter->intr.type == VMXNET3_IT_AUTO) {
2151 int err;
2152
2153#ifdef CONFIG_PCI_MSI
2154 adapter->intr.msix_entries[0].entry = 0;
2155 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2156 VMXNET3_LINUX_MAX_MSIX_VECT);
2157 if (!err) {
2158 adapter->intr.num_intrs = 1;
2159 adapter->intr.type = VMXNET3_IT_MSIX;
2160 return;
2161 }
2162#endif
2163
2164 err = pci_enable_msi(adapter->pdev);
2165 if (!err) {
2166 adapter->intr.num_intrs = 1;
2167 adapter->intr.type = VMXNET3_IT_MSI;
2168 return;
2169 }
2170 }
2171
2172 adapter->intr.type = VMXNET3_IT_INTX;
2173
2174 /* INT-X related setting */
2175 adapter->intr.num_intrs = 1;
2176}
2177
2178
2179static void
2180vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
2181{
2182 if (adapter->intr.type == VMXNET3_IT_MSIX)
2183 pci_disable_msix(adapter->pdev);
2184 else if (adapter->intr.type == VMXNET3_IT_MSI)
2185 pci_disable_msi(adapter->pdev);
2186 else
2187 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
2188}
2189
2190
2191static void
2192vmxnet3_tx_timeout(struct net_device *netdev)
2193{
2194 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2195 adapter->tx_timeout_count++;
2196
2197 printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
2198 schedule_work(&adapter->work);
2199}
2200
2201
2202static void
2203vmxnet3_reset_work(struct work_struct *data)
2204{
2205 struct vmxnet3_adapter *adapter;
2206
2207 adapter = container_of(data, struct vmxnet3_adapter, work);
2208
2209 /* if another thread is resetting the device, no need to proceed */
2210 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2211 return;
2212
2213 /* if the device is closed, we must leave it alone */
2214 if (netif_running(adapter->netdev)) {
2215 printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
2216 vmxnet3_quiesce_dev(adapter);
2217 vmxnet3_reset_dev(adapter);
2218 vmxnet3_activate_dev(adapter);
2219 } else {
2220 printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
2221 }
2222
2223 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2224}
2225
2226
2227static int __devinit
2228vmxnet3_probe_device(struct pci_dev *pdev,
2229 const struct pci_device_id *id)
2230{
2231 static const struct net_device_ops vmxnet3_netdev_ops = {
2232 .ndo_open = vmxnet3_open,
2233 .ndo_stop = vmxnet3_close,
2234 .ndo_start_xmit = vmxnet3_xmit_frame,
2235 .ndo_set_mac_address = vmxnet3_set_mac_addr,
2236 .ndo_change_mtu = vmxnet3_change_mtu,
2237 .ndo_get_stats = vmxnet3_get_stats,
2238 .ndo_tx_timeout = vmxnet3_tx_timeout,
2239 .ndo_set_multicast_list = vmxnet3_set_mc,
2240 .ndo_vlan_rx_register = vmxnet3_vlan_rx_register,
2241 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
2242 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
2243#ifdef CONFIG_NET_POLL_CONTROLLER
2244 .ndo_poll_controller = vmxnet3_netpoll,
2245#endif
2246 };
2247 int err;
2248 bool dma64 = false; /* stupid gcc */
2249 u32 ver;
2250 struct net_device *netdev;
2251 struct vmxnet3_adapter *adapter;
2252 u8 mac[ETH_ALEN];
2253
2254 netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter));
2255 if (!netdev) {
2256 printk(KERN_ERR "Failed to alloc ethernet device for adapter "
2257 "%s\n", pci_name(pdev));
2258 return -ENOMEM;
2259 }
2260
2261 pci_set_drvdata(pdev, netdev);
2262 adapter = netdev_priv(netdev);
2263 adapter->netdev = netdev;
2264 adapter->pdev = pdev;
2265
2266 adapter->shared = pci_alloc_consistent(adapter->pdev,
2267 sizeof(struct Vmxnet3_DriverShared),
2268 &adapter->shared_pa);
2269 if (!adapter->shared) {
2270 printk(KERN_ERR "Failed to allocate memory for %s\n",
2271 pci_name(pdev));
2272 err = -ENOMEM;
2273 goto err_alloc_shared;
2274 }
2275
2276 adapter->tqd_start = pci_alloc_consistent(adapter->pdev,
2277 sizeof(struct Vmxnet3_TxQueueDesc) +
2278 sizeof(struct Vmxnet3_RxQueueDesc),
2279 &adapter->queue_desc_pa);
2280
2281 if (!adapter->tqd_start) {
2282 printk(KERN_ERR "Failed to allocate memory for %s\n",
2283 pci_name(pdev));
2284 err = -ENOMEM;
2285 goto err_alloc_queue_desc;
2286 }
2287 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start
2288 + 1);
2289
2290 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2291 if (adapter->pm_conf == NULL) {
2292 printk(KERN_ERR "Failed to allocate memory for %s\n",
2293 pci_name(pdev));
2294 err = -ENOMEM;
2295 goto err_alloc_pm;
2296 }
2297
2298 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
2299 if (err < 0)
2300 goto err_alloc_pci;
2301
2302 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
2303 if (ver & 1) {
2304 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
2305 } else {
2306 printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
2307 " %s\n", ver, pci_name(pdev));
2308 err = -EBUSY;
2309 goto err_ver;
2310 }
2311
2312 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
2313 if (ver & 1) {
2314 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
2315 } else {
2316 printk(KERN_ERR "Incompatible upt version (0x%x) for "
2317 "adapter %s\n", ver, pci_name(pdev));
2318 err = -EBUSY;
2319 goto err_ver;
2320 }
2321
2322 vmxnet3_declare_features(adapter, dma64);
2323
2324 adapter->dev_number = atomic_read(&devices_found);
2325 vmxnet3_alloc_intr_resources(adapter);
2326
2327 vmxnet3_read_mac_addr(adapter, mac);
2328 memcpy(netdev->dev_addr, mac, netdev->addr_len);
2329
2330 netdev->netdev_ops = &vmxnet3_netdev_ops;
2331 netdev->watchdog_timeo = 5 * HZ;
2332 vmxnet3_set_ethtool_ops(netdev);
2333
2334 INIT_WORK(&adapter->work, vmxnet3_reset_work);
2335
2336 netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64);
2337 SET_NETDEV_DEV(netdev, &pdev->dev);
2338 err = register_netdev(netdev);
2339
2340 if (err) {
2341 printk(KERN_ERR "Failed to register adapter %s\n",
2342 pci_name(pdev));
2343 goto err_register;
2344 }
2345
2346 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2347 atomic_inc(&devices_found);
2348 return 0;
2349
2350err_register:
2351 vmxnet3_free_intr_resources(adapter);
2352err_ver:
2353 vmxnet3_free_pci_resources(adapter);
2354err_alloc_pci:
2355 kfree(adapter->pm_conf);
2356err_alloc_pm:
2357 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
2358 sizeof(struct Vmxnet3_RxQueueDesc),
2359 adapter->tqd_start, adapter->queue_desc_pa);
2360err_alloc_queue_desc:
2361 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2362 adapter->shared, adapter->shared_pa);
2363err_alloc_shared:
2364 pci_set_drvdata(pdev, NULL);
2365 free_netdev(netdev);
2366 return err;
2367}
2368
2369
2370static void __devexit
2371vmxnet3_remove_device(struct pci_dev *pdev)
2372{
2373 struct net_device *netdev = pci_get_drvdata(pdev);
2374 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2375
2376 flush_scheduled_work();
2377
2378 unregister_netdev(netdev);
2379
2380 vmxnet3_free_intr_resources(adapter);
2381 vmxnet3_free_pci_resources(adapter);
2382 kfree(adapter->pm_conf);
2383 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
2384 sizeof(struct Vmxnet3_RxQueueDesc),
2385 adapter->tqd_start, adapter->queue_desc_pa);
2386 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2387 adapter->shared, adapter->shared_pa);
2388 free_netdev(netdev);
2389}
2390
2391
2392#ifdef CONFIG_PM
2393
2394static int
2395vmxnet3_suspend(struct device *device)
2396{
2397 struct pci_dev *pdev = to_pci_dev(device);
2398 struct net_device *netdev = pci_get_drvdata(pdev);
2399 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2400 struct Vmxnet3_PMConf *pmConf;
2401 struct ethhdr *ehdr;
2402 struct arphdr *ahdr;
2403 u8 *arpreq;
2404 struct in_device *in_dev;
2405 struct in_ifaddr *ifa;
2406 int i = 0;
2407
2408 if (!netif_running(netdev))
2409 return 0;
2410
2411 vmxnet3_disable_all_intrs(adapter);
2412 vmxnet3_free_irqs(adapter);
2413 vmxnet3_free_intr_resources(adapter);
2414
2415 netif_device_detach(netdev);
2416 netif_stop_queue(netdev);
2417
2418 /* Create wake-up filters. */
2419 pmConf = adapter->pm_conf;
2420 memset(pmConf, 0, sizeof(*pmConf));
2421
2422 if (adapter->wol & WAKE_UCAST) {
2423 pmConf->filters[i].patternSize = ETH_ALEN;
2424 pmConf->filters[i].maskSize = 1;
2425 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
2426 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
2427
2428 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
2429 i++;
2430 }
2431
2432 if (adapter->wol & WAKE_ARP) {
2433 in_dev = in_dev_get(netdev);
2434 if (!in_dev)
2435 goto skip_arp;
2436
2437 ifa = (struct in_ifaddr *)in_dev->ifa_list;
2438 if (!ifa)
2439 goto skip_arp;
2440
2441 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
2442 sizeof(struct arphdr) + /* ARP header */
2443 2 * ETH_ALEN + /* 2 Ethernet addresses*/
2444 2 * sizeof(u32); /*2 IPv4 addresses */
2445 pmConf->filters[i].maskSize =
2446 (pmConf->filters[i].patternSize - 1) / 8 + 1;
2447
2448 /* ETH_P_ARP in Ethernet header. */
2449 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
2450 ehdr->h_proto = htons(ETH_P_ARP);
2451
2452 /* ARPOP_REQUEST in ARP header. */
2453 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
2454 ahdr->ar_op = htons(ARPOP_REQUEST);
2455 arpreq = (u8 *)(ahdr + 1);
2456
2457 /* The Unicast IPv4 address in 'tip' field. */
2458 arpreq += 2 * ETH_ALEN + sizeof(u32);
2459 *(u32 *)arpreq = ifa->ifa_address;
2460
2461 /* The mask for the relevant bits. */
2462 pmConf->filters[i].mask[0] = 0x00;
2463 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
2464 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
2465 pmConf->filters[i].mask[3] = 0x00;
2466 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
2467 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
2468 in_dev_put(in_dev);
2469
2470 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
2471 i++;
2472 }
2473
2474skip_arp:
2475 if (adapter->wol & WAKE_MAGIC)
2476 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
2477
2478 pmConf->numFilters = i;
2479
2480 adapter->shared->devRead.pmConfDesc.confVer = 1;
2481 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
2482 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
2483
2484 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2485 VMXNET3_CMD_UPDATE_PMCFG);
2486
2487 pci_save_state(pdev);
2488 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
2489 adapter->wol);
2490 pci_disable_device(pdev);
2491 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
2492
2493 return 0;
2494}
2495
2496
2497static int
2498vmxnet3_resume(struct device *device)
2499{
2500 int err;
2501 struct pci_dev *pdev = to_pci_dev(device);
2502 struct net_device *netdev = pci_get_drvdata(pdev);
2503 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2504 struct Vmxnet3_PMConf *pmConf;
2505
2506 if (!netif_running(netdev))
2507 return 0;
2508
2509 /* Destroy wake-up filters. */
2510 pmConf = adapter->pm_conf;
2511 memset(pmConf, 0, sizeof(*pmConf));
2512
2513 adapter->shared->devRead.pmConfDesc.confVer = 1;
2514 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
2515 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
2516
2517 netif_device_attach(netdev);
2518 pci_set_power_state(pdev, PCI_D0);
2519 pci_restore_state(pdev);
2520 err = pci_enable_device_mem(pdev);
2521 if (err != 0)
2522 return err;
2523
2524 pci_enable_wake(pdev, PCI_D0, 0);
2525
2526 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2527 VMXNET3_CMD_UPDATE_PMCFG);
2528 vmxnet3_alloc_intr_resources(adapter);
2529 vmxnet3_request_irqs(adapter);
2530 vmxnet3_enable_all_intrs(adapter);
2531
2532 return 0;
2533}
2534
2535static struct dev_pm_ops vmxnet3_pm_ops = {
2536 .suspend = vmxnet3_suspend,
2537 .resume = vmxnet3_resume,
2538};
2539#endif
2540
2541static struct pci_driver vmxnet3_driver = {
2542 .name = vmxnet3_driver_name,
2543 .id_table = vmxnet3_pciid_table,
2544 .probe = vmxnet3_probe_device,
2545 .remove = __devexit_p(vmxnet3_remove_device),
2546#ifdef CONFIG_PM
2547 .driver.pm = &vmxnet3_pm_ops,
2548#endif
2549};
2550
2551
2552static int __init
2553vmxnet3_init_module(void)
2554{
2555 printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
2556 VMXNET3_DRIVER_VERSION_REPORT);
2557 return pci_register_driver(&vmxnet3_driver);
2558}
2559
2560module_init(vmxnet3_init_module);
2561
2562
2563static void
2564vmxnet3_exit_module(void)
2565{
2566 pci_unregister_driver(&vmxnet3_driver);
2567}
2568
2569module_exit(vmxnet3_exit_module);
2570
2571MODULE_AUTHOR("VMware, Inc.");
2572MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
2573MODULE_LICENSE("GPL v2");
2574MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
new file mode 100644
index 000000000000..c2c15e4cafc7
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -0,0 +1,566 @@
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
27
28#include "vmxnet3_int.h"
29
30struct vmxnet3_stat_desc {
31 char desc[ETH_GSTRING_LEN];
32 int offset;
33};
34
35
36static u32
37vmxnet3_get_rx_csum(struct net_device *netdev)
38{
39 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
40 return adapter->rxcsum;
41}
42
43
44static int
45vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
46{
47 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
48
49 if (adapter->rxcsum != val) {
50 adapter->rxcsum = val;
51 if (netif_running(netdev)) {
52 if (val)
53 adapter->shared->devRead.misc.uptFeatures |=
54 UPT1_F_RXCSUM;
55 else
56 adapter->shared->devRead.misc.uptFeatures &=
57 ~UPT1_F_RXCSUM;
58
59 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
60 VMXNET3_CMD_UPDATE_FEATURE);
61 }
62 }
63 return 0;
64}
65
66
67/* per tq stats maintained by the device */
68static const struct vmxnet3_stat_desc
69vmxnet3_tq_dev_stats[] = {
70 /* description, offset */
71 { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
72 { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
73 { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
74 { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
75 { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
76 { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
77 { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
78 { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
79 { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
80 { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
81};
82
83/* per tq stats maintained by the driver */
84static const struct vmxnet3_stat_desc
85vmxnet3_tq_driver_stats[] = {
86 /* description, offset */
87 {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
88 drop_total) },
89 { " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
90 drop_too_many_frags) },
91 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
92 drop_oversized_hdr) },
93 { " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
94 drop_hdr_inspect_err) },
95 { " tso", offsetof(struct vmxnet3_tq_driver_stats,
96 drop_tso) },
97 { "ring full", offsetof(struct vmxnet3_tq_driver_stats,
98 tx_ring_full) },
99 { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
100 linearized) },
101 { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
102 copy_skb_header) },
103 { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
104 oversized_hdr) },
105};
106
107/* per rq stats maintained by the device */
108static const struct vmxnet3_stat_desc
109vmxnet3_rq_dev_stats[] = {
110 { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
111 { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
112 { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
113 { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
114 { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
115 { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
116 { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
117 { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
118 { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
119 { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
120};
121
122/* per rq stats maintained by the driver */
123static const struct vmxnet3_stat_desc
124vmxnet3_rq_driver_stats[] = {
125 /* description, offset */
126 { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
127 drop_total) },
128 { " err", offsetof(struct vmxnet3_rq_driver_stats,
129 drop_err) },
130 { " fcs", offsetof(struct vmxnet3_rq_driver_stats,
131 drop_fcs) },
132 { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
133 rx_buf_alloc_failure) },
134};
135
136/* gloabl stats maintained by the driver */
137static const struct vmxnet3_stat_desc
138vmxnet3_global_stats[] = {
139 /* description, offset */
140 { "tx timeout count", offsetof(struct vmxnet3_adapter,
141 tx_timeout_count) }
142};
143
144
145struct net_device_stats *
146vmxnet3_get_stats(struct net_device *netdev)
147{
148 struct vmxnet3_adapter *adapter;
149 struct vmxnet3_tq_driver_stats *drvTxStats;
150 struct vmxnet3_rq_driver_stats *drvRxStats;
151 struct UPT1_TxStats *devTxStats;
152 struct UPT1_RxStats *devRxStats;
153 struct net_device_stats *net_stats = &netdev->stats;
154
155 adapter = netdev_priv(netdev);
156
157 /* Collect the dev stats into the shared area */
158 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
159
160 /* Assuming that we have a single queue device */
161 devTxStats = &adapter->tqd_start->stats;
162 devRxStats = &adapter->rqd_start->stats;
163
164 /* Get access to the driver stats per queue */
165 drvTxStats = &adapter->tx_queue.stats;
166 drvRxStats = &adapter->rx_queue.stats;
167
168 memset(net_stats, 0, sizeof(*net_stats));
169
170 net_stats->rx_packets = devRxStats->ucastPktsRxOK +
171 devRxStats->mcastPktsRxOK +
172 devRxStats->bcastPktsRxOK;
173
174 net_stats->tx_packets = devTxStats->ucastPktsTxOK +
175 devTxStats->mcastPktsTxOK +
176 devTxStats->bcastPktsTxOK;
177
178 net_stats->rx_bytes = devRxStats->ucastBytesRxOK +
179 devRxStats->mcastBytesRxOK +
180 devRxStats->bcastBytesRxOK;
181
182 net_stats->tx_bytes = devTxStats->ucastBytesTxOK +
183 devTxStats->mcastBytesTxOK +
184 devTxStats->bcastBytesTxOK;
185
186 net_stats->rx_errors = devRxStats->pktsRxError;
187 net_stats->tx_errors = devTxStats->pktsTxError;
188 net_stats->rx_dropped = drvRxStats->drop_total;
189 net_stats->tx_dropped = drvTxStats->drop_total;
190 net_stats->multicast = devRxStats->mcastPktsRxOK;
191
192 return net_stats;
193}
194
195static int
196vmxnet3_get_sset_count(struct net_device *netdev, int sset)
197{
198 switch (sset) {
199 case ETH_SS_STATS:
200 return ARRAY_SIZE(vmxnet3_tq_dev_stats) +
201 ARRAY_SIZE(vmxnet3_tq_driver_stats) +
202 ARRAY_SIZE(vmxnet3_rq_dev_stats) +
203 ARRAY_SIZE(vmxnet3_rq_driver_stats) +
204 ARRAY_SIZE(vmxnet3_global_stats);
205 default:
206 return -EOPNOTSUPP;
207 }
208}
209
210
211static int
212vmxnet3_get_regs_len(struct net_device *netdev)
213{
214 return 20 * sizeof(u32);
215}
216
217
218static void
219vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
220{
221 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
222
223 strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
224 drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
225
226 strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
227 sizeof(drvinfo->version));
228 drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0';
229
230 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
231 drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0';
232
233 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
234 ETHTOOL_BUSINFO_LEN);
235 drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS);
236 drvinfo->testinfo_len = 0;
237 drvinfo->eedump_len = 0;
238 drvinfo->regdump_len = vmxnet3_get_regs_len(netdev);
239}
240
241
242static void
243vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
244{
245 if (stringset == ETH_SS_STATS) {
246 int i;
247
248 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
249 memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
250 ETH_GSTRING_LEN);
251 buf += ETH_GSTRING_LEN;
252 }
253 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) {
254 memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
255 ETH_GSTRING_LEN);
256 buf += ETH_GSTRING_LEN;
257 }
258 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
259 memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
260 ETH_GSTRING_LEN);
261 buf += ETH_GSTRING_LEN;
262 }
263 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) {
264 memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
265 ETH_GSTRING_LEN);
266 buf += ETH_GSTRING_LEN;
267 }
268 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
269 memcpy(buf, vmxnet3_global_stats[i].desc,
270 ETH_GSTRING_LEN);
271 buf += ETH_GSTRING_LEN;
272 }
273 }
274}
275
276static u32
277vmxnet3_get_flags(struct net_device *netdev) {
278 return netdev->features;
279}
280
281static int
282vmxnet3_set_flags(struct net_device *netdev, u32 data) {
283 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
284 u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
285 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
286
287 if (lro_requested ^ lro_present) {
288 /* toggle the LRO feature*/
289 netdev->features ^= NETIF_F_LRO;
290
291 /* update harware LRO capability accordingly */
292 if (lro_requested)
293 adapter->shared->devRead.misc.uptFeatures &= UPT1_F_LRO;
294 else
295 adapter->shared->devRead.misc.uptFeatures &=
296 ~UPT1_F_LRO;
297 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
298 VMXNET3_CMD_UPDATE_FEATURE);
299 }
300 return 0;
301}
302
303static void
304vmxnet3_get_ethtool_stats(struct net_device *netdev,
305 struct ethtool_stats *stats, u64 *buf)
306{
307 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
308 u8 *base;
309 int i;
310
311 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
312
313 /* this does assume each counter is 64-bit wide */
314
315 base = (u8 *)&adapter->tqd_start->stats;
316 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
317 *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset);
318
319 base = (u8 *)&adapter->tx_queue.stats;
320 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
321 *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset);
322
323 base = (u8 *)&adapter->rqd_start->stats;
324 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
325 *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
326
327 base = (u8 *)&adapter->rx_queue.stats;
328 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
329 *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset);
330
331 base = (u8 *)adapter;
332 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
333 *buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset);
334}
335
336
337static void
338vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
339{
340 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
341 u32 *buf = p;
342
343 memset(p, 0, vmxnet3_get_regs_len(netdev));
344
345 regs->version = 1;
346
347 /* Update vmxnet3_get_regs_len if we want to dump more registers */
348
349 /* make each ring use multiple of 16 bytes */
350 buf[0] = adapter->tx_queue.tx_ring.next2fill;
351 buf[1] = adapter->tx_queue.tx_ring.next2comp;
352 buf[2] = adapter->tx_queue.tx_ring.gen;
353 buf[3] = 0;
354
355 buf[4] = adapter->tx_queue.comp_ring.next2proc;
356 buf[5] = adapter->tx_queue.comp_ring.gen;
357 buf[6] = adapter->tx_queue.stopped;
358 buf[7] = 0;
359
360 buf[8] = adapter->rx_queue.rx_ring[0].next2fill;
361 buf[9] = adapter->rx_queue.rx_ring[0].next2comp;
362 buf[10] = adapter->rx_queue.rx_ring[0].gen;
363 buf[11] = 0;
364
365 buf[12] = adapter->rx_queue.rx_ring[1].next2fill;
366 buf[13] = adapter->rx_queue.rx_ring[1].next2comp;
367 buf[14] = adapter->rx_queue.rx_ring[1].gen;
368 buf[15] = 0;
369
370 buf[16] = adapter->rx_queue.comp_ring.next2proc;
371 buf[17] = adapter->rx_queue.comp_ring.gen;
372 buf[18] = 0;
373 buf[19] = 0;
374}
375
376
377static void
378vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
379{
380 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
381
382 wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC;
383 wol->wolopts = adapter->wol;
384}
385
386
387static int
388vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
389{
390 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
391
392 if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST |
393 WAKE_MAGICSECURE)) {
394 return -EOPNOTSUPP;
395 }
396
397 adapter->wol = wol->wolopts;
398
399 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
400
401 return 0;
402}
403
404
405static int
406vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
407{
408 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
409
410 ecmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full |
411 SUPPORTED_TP;
412 ecmd->advertising = ADVERTISED_TP;
413 ecmd->port = PORT_TP;
414 ecmd->transceiver = XCVR_INTERNAL;
415
416 if (adapter->link_speed) {
417 ecmd->speed = adapter->link_speed;
418 ecmd->duplex = DUPLEX_FULL;
419 } else {
420 ecmd->speed = -1;
421 ecmd->duplex = -1;
422 }
423 return 0;
424}
425
426
427static void
428vmxnet3_get_ringparam(struct net_device *netdev,
429 struct ethtool_ringparam *param)
430{
431 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
432
433 param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
434 param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
435 param->rx_mini_max_pending = 0;
436 param->rx_jumbo_max_pending = 0;
437
438 param->rx_pending = adapter->rx_queue.rx_ring[0].size;
439 param->tx_pending = adapter->tx_queue.tx_ring.size;
440 param->rx_mini_pending = 0;
441 param->rx_jumbo_pending = 0;
442}
443
444
445static int
446vmxnet3_set_ringparam(struct net_device *netdev,
447 struct ethtool_ringparam *param)
448{
449 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
450 u32 new_tx_ring_size, new_rx_ring_size;
451 u32 sz;
452 int err = 0;
453
454 if (param->tx_pending == 0 || param->tx_pending >
455 VMXNET3_TX_RING_MAX_SIZE)
456 return -EINVAL;
457
458 if (param->rx_pending == 0 || param->rx_pending >
459 VMXNET3_RX_RING_MAX_SIZE)
460 return -EINVAL;
461
462
463 /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
464 new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
465 ~VMXNET3_RING_SIZE_MASK;
466 new_tx_ring_size = min_t(u32, new_tx_ring_size,
467 VMXNET3_TX_RING_MAX_SIZE);
468 if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size %
469 VMXNET3_RING_SIZE_ALIGN) != 0)
470 return -EINVAL;
471
472 /* ring0 has to be a multiple of
473 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
474 */
475 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
476 new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz;
477 new_rx_ring_size = min_t(u32, new_rx_ring_size,
478 VMXNET3_RX_RING_MAX_SIZE / sz * sz);
479 if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size %
480 sz) != 0)
481 return -EINVAL;
482
483 if (new_tx_ring_size == adapter->tx_queue.tx_ring.size &&
484 new_rx_ring_size == adapter->rx_queue.rx_ring[0].size) {
485 return 0;
486 }
487
488 /*
489 * Reset_work may be in the middle of resetting the device, wait for its
490 * completion.
491 */
492 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
493 msleep(1);
494
495 if (netif_running(netdev)) {
496 vmxnet3_quiesce_dev(adapter);
497 vmxnet3_reset_dev(adapter);
498
499 /* recreate the rx queue and the tx queue based on the
500 * new sizes */
501 vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
502 vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
503
504 err = vmxnet3_create_queues(adapter, new_tx_ring_size,
505 new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
506 if (err) {
507 /* failed, most likely because of OOM, try default
508 * size */
509 printk(KERN_ERR "%s: failed to apply new sizes, try the"
510 " default ones\n", netdev->name);
511 err = vmxnet3_create_queues(adapter,
512 VMXNET3_DEF_TX_RING_SIZE,
513 VMXNET3_DEF_RX_RING_SIZE,
514 VMXNET3_DEF_RX_RING_SIZE);
515 if (err) {
516 printk(KERN_ERR "%s: failed to create queues "
517 "with default sizes. Closing it\n",
518 netdev->name);
519 goto out;
520 }
521 }
522
523 err = vmxnet3_activate_dev(adapter);
524 if (err)
525 printk(KERN_ERR "%s: failed to re-activate, error %d."
526 " Closing it\n", netdev->name, err);
527 }
528
529out:
530 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
531 if (err)
532 vmxnet3_force_close(adapter);
533
534 return err;
535}
536
537
538static struct ethtool_ops vmxnet3_ethtool_ops = {
539 .get_settings = vmxnet3_get_settings,
540 .get_drvinfo = vmxnet3_get_drvinfo,
541 .get_regs_len = vmxnet3_get_regs_len,
542 .get_regs = vmxnet3_get_regs,
543 .get_wol = vmxnet3_get_wol,
544 .set_wol = vmxnet3_set_wol,
545 .get_link = ethtool_op_get_link,
546 .get_rx_csum = vmxnet3_get_rx_csum,
547 .set_rx_csum = vmxnet3_set_rx_csum,
548 .get_tx_csum = ethtool_op_get_tx_csum,
549 .set_tx_csum = ethtool_op_set_tx_hw_csum,
550 .get_sg = ethtool_op_get_sg,
551 .set_sg = ethtool_op_set_sg,
552 .get_tso = ethtool_op_get_tso,
553 .set_tso = ethtool_op_set_tso,
554 .get_strings = vmxnet3_get_strings,
555 .get_flags = vmxnet3_get_flags,
556 .set_flags = vmxnet3_set_flags,
557 .get_sset_count = vmxnet3_get_sset_count,
558 .get_ethtool_stats = vmxnet3_get_ethtool_stats,
559 .get_ringparam = vmxnet3_get_ringparam,
560 .set_ringparam = vmxnet3_set_ringparam,
561};
562
563void vmxnet3_set_ethtool_ops(struct net_device *netdev)
564{
565 SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops);
566}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
new file mode 100644
index 000000000000..3c0d70d58111
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -0,0 +1,389 @@
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
27#ifndef _VMXNET3_INT_H
28#define _VMXNET3_INT_H
29
30#include <linux/types.h>
31#include <linux/ethtool.h>
32#include <linux/delay.h>
33#include <linux/device.h>
34#include <linux/netdevice.h>
35#include <linux/pci.h>
36#include <linux/ethtool.h>
37#include <linux/compiler.h>
38#include <linux/module.h>
39#include <linux/moduleparam.h>
40#include <linux/slab.h>
41#include <linux/spinlock.h>
42#include <linux/ioport.h>
43#include <linux/highmem.h>
44#include <linux/init.h>
45#include <linux/timer.h>
46#include <linux/skbuff.h>
47#include <linux/interrupt.h>
48#include <linux/workqueue.h>
49#include <linux/uaccess.h>
50#include <asm/dma.h>
51#include <asm/page.h>
52
53#include <linux/tcp.h>
54#include <linux/udp.h>
55#include <linux/ip.h>
56#include <linux/ipv6.h>
57#include <linux/in.h>
58#include <linux/etherdevice.h>
59#include <asm/checksum.h>
60#include <linux/if_vlan.h>
61#include <linux/if_arp.h>
62#include <linux/inetdevice.h>
63
64#include "vmxnet3_defs.h"
65
66#ifdef DEBUG
67# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)"
68#else
69# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI"
70#endif
71
72
73/*
74 * Version numbers
75 */
76#define VMXNET3_DRIVER_VERSION_STRING "1.0.5.0-k"
77
78/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
79#define VMXNET3_DRIVER_VERSION_NUM 0x01000500
80
81
82/*
83 * Capabilities
84 */
85
86enum {
87 VMNET_CAP_SG = 0x0001, /* Can do scatter-gather transmits. */
88 VMNET_CAP_IP4_CSUM = 0x0002, /* Can checksum only TCP/UDP over
89 * IPv4 */
90 VMNET_CAP_HW_CSUM = 0x0004, /* Can checksum all packets. */
91 VMNET_CAP_HIGH_DMA = 0x0008, /* Can DMA to high memory. */
92 VMNET_CAP_TOE = 0x0010, /* Supports TCP/IP offload. */
93 VMNET_CAP_TSO = 0x0020, /* Supports TCP Segmentation
94 * offload */
95 VMNET_CAP_SW_TSO = 0x0040, /* Supports SW TCP Segmentation */
96 VMNET_CAP_VMXNET_APROM = 0x0080, /* Vmxnet APROM support */
97 VMNET_CAP_HW_TX_VLAN = 0x0100, /* Can we do VLAN tagging in HW */
98 VMNET_CAP_HW_RX_VLAN = 0x0200, /* Can we do VLAN untagging in HW */
99 VMNET_CAP_SW_VLAN = 0x0400, /* VLAN tagging/untagging in SW */
100 VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */
101 VMNET_CAP_ENABLE_INT_INLINE = 0x1000, /* Enable Interrupt Inline */
102 VMNET_CAP_ENABLE_HEADER_COPY = 0x2000, /* copy header for vmkernel */
103 VMNET_CAP_TX_CHAIN = 0x4000, /* Guest can use multiple tx entries
104 * for a pkt */
105 VMNET_CAP_RX_CHAIN = 0x8000, /* pkt can span multiple rx entries */
106 VMNET_CAP_LPD = 0x10000, /* large pkt delivery */
107 VMNET_CAP_BPF = 0x20000, /* BPF Support in VMXNET Virtual HW*/
108 VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/
109 /* pages transmits */
110 VMNET_CAP_IP6_CSUM = 0x80000, /* Can do IPv6 csum offload. */
111 VMNET_CAP_TSO6 = 0x100000, /* TSO seg. offload for IPv6 pkts. */
112 VMNET_CAP_TSO256k = 0x200000, /* Can do TSO seg offload for */
113 /* pkts up to 256kB. */
114 VMNET_CAP_UPT = 0x400000 /* Support UPT */
115};
116
117/*
118 * PCI vendor and device IDs.
119 */
120#define PCI_VENDOR_ID_VMWARE 0x15AD
121#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0
122#define MAX_ETHERNET_CARDS 10
123#define MAX_PCI_PASSTHRU_DEVICE 6
124
125struct vmxnet3_cmd_ring {
126 union Vmxnet3_GenericDesc *base;
127 u32 size;
128 u32 next2fill;
129 u32 next2comp;
130 u8 gen;
131 dma_addr_t basePA;
132};
133
134static inline void
135vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
136{
137 ring->next2fill++;
138 if (unlikely(ring->next2fill == ring->size)) {
139 ring->next2fill = 0;
140 VMXNET3_FLIP_RING_GEN(ring->gen);
141 }
142}
143
144static inline void
145vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
146{
147 VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
148}
149
150static inline int
151vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
152{
153 return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
154 ring->next2comp - ring->next2fill - 1;
155}
156
157struct vmxnet3_comp_ring {
158 union Vmxnet3_GenericDesc *base;
159 u32 size;
160 u32 next2proc;
161 u8 gen;
162 u8 intr_idx;
163 dma_addr_t basePA;
164};
165
166static inline void
167vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
168{
169 ring->next2proc++;
170 if (unlikely(ring->next2proc == ring->size)) {
171 ring->next2proc = 0;
172 VMXNET3_FLIP_RING_GEN(ring->gen);
173 }
174}
175
176struct vmxnet3_tx_data_ring {
177 struct Vmxnet3_TxDataDesc *base;
178 u32 size;
179 dma_addr_t basePA;
180};
181
182enum vmxnet3_buf_map_type {
183 VMXNET3_MAP_INVALID = 0,
184 VMXNET3_MAP_NONE,
185 VMXNET3_MAP_SINGLE,
186 VMXNET3_MAP_PAGE,
187};
188
189struct vmxnet3_tx_buf_info {
190 u32 map_type;
191 u16 len;
192 u16 sop_idx;
193 dma_addr_t dma_addr;
194 struct sk_buff *skb;
195};
196
197struct vmxnet3_tq_driver_stats {
198 u64 drop_total; /* # of pkts dropped by the driver, the
199 * counters below track droppings due to
200 * different reasons
201 */
202 u64 drop_too_many_frags;
203 u64 drop_oversized_hdr;
204 u64 drop_hdr_inspect_err;
205 u64 drop_tso;
206
207 u64 tx_ring_full;
208 u64 linearized; /* # of pkts linearized */
209 u64 copy_skb_header; /* # of times we have to copy skb header */
210 u64 oversized_hdr;
211};
212
213struct vmxnet3_tx_ctx {
214 bool ipv4;
215 u16 mss;
216 u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum
217 * offloading
218 */
219 u32 l4_hdr_size; /* only valid if mss != 0 */
220 u32 copy_size; /* # of bytes copied into the data ring */
221 union Vmxnet3_GenericDesc *sop_txd;
222 union Vmxnet3_GenericDesc *eop_txd;
223};
224
225struct vmxnet3_tx_queue {
226 spinlock_t tx_lock;
227 struct vmxnet3_cmd_ring tx_ring;
228 struct vmxnet3_tx_buf_info *buf_info;
229 struct vmxnet3_tx_data_ring data_ring;
230 struct vmxnet3_comp_ring comp_ring;
231 struct Vmxnet3_TxQueueCtrl *shared;
232 struct vmxnet3_tq_driver_stats stats;
233 bool stopped;
234 int num_stop; /* # of times the queue is
235 * stopped */
236} __attribute__((__aligned__(SMP_CACHE_BYTES)));
237
238enum vmxnet3_rx_buf_type {
239 VMXNET3_RX_BUF_NONE = 0,
240 VMXNET3_RX_BUF_SKB = 1,
241 VMXNET3_RX_BUF_PAGE = 2
242};
243
244struct vmxnet3_rx_buf_info {
245 enum vmxnet3_rx_buf_type buf_type;
246 u16 len;
247 union {
248 struct sk_buff *skb;
249 struct page *page;
250 };
251 dma_addr_t dma_addr;
252};
253
254struct vmxnet3_rx_ctx {
255 struct sk_buff *skb;
256 u32 sop_idx;
257};
258
259struct vmxnet3_rq_driver_stats {
260 u64 drop_total;
261 u64 drop_err;
262 u64 drop_fcs;
263 u64 rx_buf_alloc_failure;
264};
265
266struct vmxnet3_rx_queue {
267 struct vmxnet3_cmd_ring rx_ring[2];
268 struct vmxnet3_comp_ring comp_ring;
269 struct vmxnet3_rx_ctx rx_ctx;
270 u32 qid; /* rqID in RCD for buffer from 1st ring */
271 u32 qid2; /* rqID in RCD for buffer from 2nd ring */
272 u32 uncommitted[2]; /* # of buffers allocated since last RXPROD
273 * update */
274 struct vmxnet3_rx_buf_info *buf_info[2];
275 struct Vmxnet3_RxQueueCtrl *shared;
276 struct vmxnet3_rq_driver_stats stats;
277} __attribute__((__aligned__(SMP_CACHE_BYTES)));
278
279#define VMXNET3_LINUX_MAX_MSIX_VECT 1
280
281struct vmxnet3_intr {
282 enum vmxnet3_intr_mask_mode mask_mode;
283 enum vmxnet3_intr_type type; /* MSI-X, MSI, or INTx? */
284 u8 num_intrs; /* # of intr vectors */
285 u8 event_intr_idx; /* idx of the intr vector for event */
286 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
287#ifdef CONFIG_PCI_MSI
288 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
289#endif
290};
291
292#define VMXNET3_STATE_BIT_RESETTING 0
293#define VMXNET3_STATE_BIT_QUIESCED 1
294struct vmxnet3_adapter {
295 struct vmxnet3_tx_queue tx_queue;
296 struct vmxnet3_rx_queue rx_queue;
297 struct napi_struct napi;
298 struct vlan_group *vlan_grp;
299
300 struct vmxnet3_intr intr;
301
302 struct Vmxnet3_DriverShared *shared;
303 struct Vmxnet3_PMConf *pm_conf;
304 struct Vmxnet3_TxQueueDesc *tqd_start; /* first tx queue desc */
305 struct Vmxnet3_RxQueueDesc *rqd_start; /* first rx queue desc */
306 struct net_device *netdev;
307 struct pci_dev *pdev;
308
309 u8 *hw_addr0; /* for BAR 0 */
310 u8 *hw_addr1; /* for BAR 1 */
311
312 /* feature control */
313 bool rxcsum;
314 bool lro;
315 bool jumbo_frame;
316
317 /* rx buffer related */
318 unsigned skb_buf_size;
319 int rx_buf_per_pkt; /* only apply to the 1st ring */
320 dma_addr_t shared_pa;
321 dma_addr_t queue_desc_pa;
322
323 /* Wake-on-LAN */
324 u32 wol;
325
326 /* Link speed */
327 u32 link_speed; /* in mbps */
328
329 u64 tx_timeout_count;
330 struct work_struct work;
331
332 unsigned long state; /* VMXNET3_STATE_BIT_xxx */
333
334 int dev_number;
335};
336
337#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
338 writel((val), (adapter)->hw_addr0 + (reg))
339#define VMXNET3_READ_BAR0_REG(adapter, reg) \
340 readl((adapter)->hw_addr0 + (reg))
341
342#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
343 writel((val), (adapter)->hw_addr1 + (reg))
344#define VMXNET3_READ_BAR1_REG(adapter, reg) \
345 readl((adapter)->hw_addr1 + (reg))
346
347#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
348#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
349 ((rq)->rx_ring[ring_idx].size >> 3)
350
351#define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma))
352#define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32))
353
354/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
355#define VMXNET3_DEF_TX_RING_SIZE 512
356#define VMXNET3_DEF_RX_RING_SIZE 256
357
358#define VMXNET3_MAX_ETH_HDR_SIZE 22
359#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
360
361int
362vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
363
364int
365vmxnet3_activate_dev(struct vmxnet3_adapter *adapter);
366
367void
368vmxnet3_force_close(struct vmxnet3_adapter *adapter);
369
370void
371vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
372
373void
374vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
375 struct vmxnet3_adapter *adapter);
376
377void
378vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
379 struct vmxnet3_adapter *adapter);
380
381int
382vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
383 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
384
385extern void vmxnet3_set_ethtool_ops(struct net_device *netdev);
386extern struct net_device_stats *vmxnet3_get_stats(struct net_device *netdev);
387
388extern char vmxnet3_driver_name[];
389#endif
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 9693b0fd323d..0bd898c94759 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/capability.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/types.h> 21#include <linux/types.h>
21#include <linux/string.h> 22#include <linux/string.h>
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 66360a2a14c2..e2c33c06190b 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -76,6 +76,7 @@
76 76
77#include <linux/module.h> 77#include <linux/module.h>
78#include <linux/kernel.h> 78#include <linux/kernel.h>
79#include <linux/sched.h>
79#include <linux/slab.h> 80#include <linux/slab.h>
80#include <linux/poll.h> 81#include <linux/poll.h>
81#include <linux/fs.h> 82#include <linux/fs.h>
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index 2573c18b6aa5..cd8cb95c5bd7 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -84,6 +84,7 @@
84#include <linux/kernel.h> /* printk(), and other useful stuff */ 84#include <linux/kernel.h> /* printk(), and other useful stuff */
85#include <linux/module.h> 85#include <linux/module.h>
86#include <linux/string.h> /* inline memset(), etc. */ 86#include <linux/string.h> /* inline memset(), etc. */
87#include <linux/sched.h>
87#include <linux/slab.h> /* kmalloc(), kfree() */ 88#include <linux/slab.h> /* kmalloc(), kfree() */
88#include <linux/stddef.h> /* offsetof(), etc. */ 89#include <linux/stddef.h> /* offsetof(), etc. */
89#include <linux/wanrouter.h> /* WAN router definitions */ 90#include <linux/wanrouter.h> /* WAN router definitions */
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 81c8aec9df92..07d00b4cf48a 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -81,6 +81,7 @@
81 */ 81 */
82 82
83#include <linux/module.h> 83#include <linux/module.h>
84#include <linux/sched.h>
84#include <linux/types.h> 85#include <linux/types.h>
85#include <linux/errno.h> 86#include <linux/errno.h>
86#include <linux/list.h> 87#include <linux/list.h>
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 3e90eb816181..beda387f2fc7 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -19,6 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/version.h> 20#include <linux/version.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/sched.h>
22#include <linux/ioport.h> 23#include <linux/ioport.h>
23#include <linux/init.h> 24#include <linux/init.h>
24#include <linux/if.h> 25#include <linux/if.h>
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index cf5fd17ad707..f1bff98acd1f 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -58,8 +58,7 @@ struct cisco_state {
58 spinlock_t lock; 58 spinlock_t lock;
59 unsigned long last_poll; 59 unsigned long last_poll;
60 int up; 60 int up;
61 int request_sent; 61 u32 txseq; /* TX sequence number, 0 = none */
62 u32 txseq; /* TX sequence number */
63 u32 rxseq; /* RX sequence number */ 62 u32 rxseq; /* RX sequence number */
64}; 63};
65 64
@@ -163,6 +162,7 @@ static int cisco_rx(struct sk_buff *skb)
163 struct cisco_packet *cisco_data; 162 struct cisco_packet *cisco_data;
164 struct in_device *in_dev; 163 struct in_device *in_dev;
165 __be32 addr, mask; 164 __be32 addr, mask;
165 u32 ack;
166 166
167 if (skb->len < sizeof(struct hdlc_header)) 167 if (skb->len < sizeof(struct hdlc_header))
168 goto rx_error; 168 goto rx_error;
@@ -223,8 +223,10 @@ static int cisco_rx(struct sk_buff *skb)
223 case CISCO_KEEPALIVE_REQ: 223 case CISCO_KEEPALIVE_REQ:
224 spin_lock(&st->lock); 224 spin_lock(&st->lock);
225 st->rxseq = ntohl(cisco_data->par1); 225 st->rxseq = ntohl(cisco_data->par1);
226 if (st->request_sent && 226 ack = ntohl(cisco_data->par2);
227 ntohl(cisco_data->par2) == st->txseq) { 227 if (ack && (ack == st->txseq ||
228 /* our current REQ may be in transit */
229 ack == st->txseq - 1)) {
228 st->last_poll = jiffies; 230 st->last_poll = jiffies;
229 if (!st->up) { 231 if (!st->up) {
230 u32 sec, min, hrs, days; 232 u32 sec, min, hrs, days;
@@ -275,7 +277,6 @@ static void cisco_timer(unsigned long arg)
275 277
276 cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq), 278 cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
277 htonl(st->rxseq)); 279 htonl(st->rxseq));
278 st->request_sent = 1;
279 spin_unlock(&st->lock); 280 spin_unlock(&st->lock);
280 281
281 st->timer.expires = jiffies + st->settings.interval * HZ; 282 st->timer.expires = jiffies + st->settings.interval * HZ;
@@ -293,9 +294,7 @@ static void cisco_start(struct net_device *dev)
293 unsigned long flags; 294 unsigned long flags;
294 295
295 spin_lock_irqsave(&st->lock, flags); 296 spin_lock_irqsave(&st->lock, flags);
296 st->up = 0; 297 st->up = st->txseq = st->rxseq = 0;
297 st->request_sent = 0;
298 st->txseq = st->rxseq = 0;
299 spin_unlock_irqrestore(&st->lock, flags); 298 spin_unlock_irqrestore(&st->lock, flags);
300 299
301 init_timer(&st->timer); 300 init_timer(&st->timer);
@@ -317,8 +316,7 @@ static void cisco_stop(struct net_device *dev)
317 316
318 spin_lock_irqsave(&st->lock, flags); 317 spin_lock_irqsave(&st->lock, flags);
319 netif_dormant_on(dev); 318 netif_dormant_on(dev);
320 st->up = 0; 319 st->up = st->txseq = 0;
321 st->request_sent = 0;
322 spin_unlock_irqrestore(&st->lock, flags); 320 spin_unlock_irqrestore(&st->lock, flags);
323} 321}
324 322
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 83da596e2052..58c66819f39b 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -18,6 +18,7 @@
18 18
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/capability.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22#include <linux/types.h> 23#include <linux/types.h>
23#include <linux/fcntl.h> 24#include <linux/fcntl.h>
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index a52f29c72c33..f1340faaf022 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/capability.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/types.h> 21#include <linux/types.h>
21#include <linux/fcntl.h> 22#include <linux/fcntl.h>
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 49ea9c92b7e6..d7a764a2fc1a 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -31,13 +31,12 @@ config STRIP
31 ---help--- 31 ---help---
32 Say Y if you have a Metricom radio and intend to use Starmode Radio 32 Say Y if you have a Metricom radio and intend to use Starmode Radio
33 IP. STRIP is a radio protocol developed for the MosquitoNet project 33 IP. STRIP is a radio protocol developed for the MosquitoNet project
34 (on the WWW at <http://mosquitonet.stanford.edu/>) to send Internet 34 to send Internet traffic using Metricom radios. Metricom radios are
35 traffic using Metricom radios. Metricom radios are small, battery 35 small, battery powered, 100kbit/sec packet radio transceivers, about
36 powered, 100kbit/sec packet radio transceivers, about the size and 36 the size and weight of a cellular telephone. (You may also have heard
37 weight of a cellular telephone. (You may also have heard them called 37 them called "Metricom modems" but we avoid the term "modem" because
38 "Metricom modems" but we avoid the term "modem" because it misleads 38 it misleads many people into thinking that you can plug a Metricom
39 many people into thinking that you can plug a Metricom modem into a 39 modem into a phone line and use it as a modem.)
40 phone line and use it as a modem.)
41 40
42 You can use STRIP on any Linux machine with a serial port, although 41 You can use STRIP on any Linux machine with a serial port, although
43 it is obviously most useful for people with laptop computers. If you 42 it is obviously most useful for people with laptop computers. If you
diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h
index 4f6ab1322189..b07e4d3a6b4d 100644
--- a/drivers/net/wireless/adm8211.h
+++ b/drivers/net/wireless/adm8211.h
@@ -266,7 +266,7 @@ do { \
266#define ADM8211_SYNCTL_CS1 (1 << 28) 266#define ADM8211_SYNCTL_CS1 (1 << 28)
267#define ADM8211_SYNCTL_CAL (1 << 27) 267#define ADM8211_SYNCTL_CAL (1 << 27)
268#define ADM8211_SYNCTL_SELCAL (1 << 26) 268#define ADM8211_SYNCTL_SELCAL (1 << 26)
269#define ADM8211_SYNCTL_RFtype ((1 << 24) || (1 << 23) || (1 << 22)) 269#define ADM8211_SYNCTL_RFtype ((1 << 24) | (1 << 23) | (1 << 22))
270#define ADM8211_SYNCTL_RFMD (1 << 22) 270#define ADM8211_SYNCTL_RFMD (1 << 22)
271#define ADM8211_SYNCTL_GENERAL (0x7 << 22) 271#define ADM8211_SYNCTL_GENERAL (0x7 << 22)
272/* SYNCTL 21:0 Data (Si4126: 18-bit data, 4-bit address) */ 272/* SYNCTL 21:0 Data (Si4126: 18-bit data, 4-bit address) */
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 7116a1aa20ce..abf896a7390e 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -4790,9 +4790,8 @@ static int proc_stats_rid_open( struct inode *inode,
4790static int get_dec_u16( char *buffer, int *start, int limit ) { 4790static int get_dec_u16( char *buffer, int *start, int limit ) {
4791 u16 value; 4791 u16 value;
4792 int valid = 0; 4792 int valid = 0;
4793 for( value = 0; buffer[*start] >= '0' && 4793 for (value = 0; *start < limit && buffer[*start] >= '0' &&
4794 buffer[*start] <= '9' && 4794 buffer[*start] <= '9'; (*start)++) {
4795 *start < limit; (*start)++ ) {
4796 valid = 1; 4795 valid = 1;
4797 value *= 10; 4796 value *= 10;
4798 value += buffer[*start] - '0'; 4797 value += buffer[*start] - '0';
diff --git a/drivers/net/wireless/arlan-proc.c b/drivers/net/wireless/arlan-proc.c
index 2ab1d59870f4..a8b689635a3b 100644
--- a/drivers/net/wireless/arlan-proc.c
+++ b/drivers/net/wireless/arlan-proc.c
@@ -402,7 +402,7 @@ static int arlan_setup_card_by_book(struct net_device *dev)
402 402
403static char arlan_drive_info[ARLAN_STR_SIZE] = "A655\n\0"; 403static char arlan_drive_info[ARLAN_STR_SIZE] = "A655\n\0";
404 404
405static int arlan_sysctl_info(ctl_table * ctl, int write, struct file *filp, 405static int arlan_sysctl_info(ctl_table * ctl, int write,
406 void __user *buffer, size_t * lenp, loff_t *ppos) 406 void __user *buffer, size_t * lenp, loff_t *ppos)
407{ 407{
408 int i; 408 int i;
@@ -629,7 +629,7 @@ final:
629 *lenp = pos; 629 *lenp = pos;
630 630
631 if (!write) 631 if (!write)
632 retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos); 632 retv = proc_dostring(ctl, write, buffer, lenp, ppos);
633 else 633 else
634 { 634 {
635 *lenp = 0; 635 *lenp = 0;
@@ -639,7 +639,7 @@ final:
639} 639}
640 640
641 641
642static int arlan_sysctl_info161719(ctl_table * ctl, int write, struct file *filp, 642static int arlan_sysctl_info161719(ctl_table * ctl, int write,
643 void __user *buffer, size_t * lenp, loff_t *ppos) 643 void __user *buffer, size_t * lenp, loff_t *ppos)
644{ 644{
645 int i; 645 int i;
@@ -669,11 +669,11 @@ static int arlan_sysctl_info161719(ctl_table * ctl, int write, struct file *filp
669 669
670final: 670final:
671 *lenp = pos; 671 *lenp = pos;
672 retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos); 672 retv = proc_dostring(ctl, write, buffer, lenp, ppos);
673 return retv; 673 return retv;
674} 674}
675 675
676static int arlan_sysctl_infotxRing(ctl_table * ctl, int write, struct file *filp, 676static int arlan_sysctl_infotxRing(ctl_table * ctl, int write,
677 void __user *buffer, size_t * lenp, loff_t *ppos) 677 void __user *buffer, size_t * lenp, loff_t *ppos)
678{ 678{
679 int i; 679 int i;
@@ -698,11 +698,11 @@ static int arlan_sysctl_infotxRing(ctl_table * ctl, int write, struct file *filp
698 SARLBNpln(u_char, txBuffer, 0x800); 698 SARLBNpln(u_char, txBuffer, 0x800);
699final: 699final:
700 *lenp = pos; 700 *lenp = pos;
701 retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos); 701 retv = proc_dostring(ctl, write, buffer, lenp, ppos);
702 return retv; 702 return retv;
703} 703}
704 704
705static int arlan_sysctl_inforxRing(ctl_table * ctl, int write, struct file *filp, 705static int arlan_sysctl_inforxRing(ctl_table * ctl, int write,
706 void __user *buffer, size_t * lenp, loff_t *ppos) 706 void __user *buffer, size_t * lenp, loff_t *ppos)
707{ 707{
708 int i; 708 int i;
@@ -726,11 +726,11 @@ static int arlan_sysctl_inforxRing(ctl_table * ctl, int write, struct file *filp
726 SARLBNpln(u_char, rxBuffer, 0x800); 726 SARLBNpln(u_char, rxBuffer, 0x800);
727final: 727final:
728 *lenp = pos; 728 *lenp = pos;
729 retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos); 729 retv = proc_dostring(ctl, write, buffer, lenp, ppos);
730 return retv; 730 return retv;
731} 731}
732 732
733static int arlan_sysctl_info18(ctl_table * ctl, int write, struct file *filp, 733static int arlan_sysctl_info18(ctl_table * ctl, int write,
734 void __user *buffer, size_t * lenp, loff_t *ppos) 734 void __user *buffer, size_t * lenp, loff_t *ppos)
735{ 735{
736 int i; 736 int i;
@@ -756,7 +756,7 @@ static int arlan_sysctl_info18(ctl_table * ctl, int write, struct file *filp,
756 756
757final: 757final:
758 *lenp = pos; 758 *lenp = pos;
759 retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos); 759 retv = proc_dostring(ctl, write, buffer, lenp, ppos);
760 return retv; 760 return retv;
761} 761}
762 762
@@ -766,7 +766,7 @@ final:
766 766
767static char conf_reset_result[200]; 767static char conf_reset_result[200];
768 768
769static int arlan_configure(ctl_table * ctl, int write, struct file *filp, 769static int arlan_configure(ctl_table * ctl, int write,
770 void __user *buffer, size_t * lenp, loff_t *ppos) 770 void __user *buffer, size_t * lenp, loff_t *ppos)
771{ 771{
772 int pos = 0; 772 int pos = 0;
@@ -788,10 +788,10 @@ static int arlan_configure(ctl_table * ctl, int write, struct file *filp,
788 return -1; 788 return -1;
789 789
790 *lenp = pos; 790 *lenp = pos;
791 return proc_dostring(ctl, write, filp, buffer, lenp, ppos); 791 return proc_dostring(ctl, write, buffer, lenp, ppos);
792} 792}
793 793
794static int arlan_sysctl_reset(ctl_table * ctl, int write, struct file *filp, 794static int arlan_sysctl_reset(ctl_table * ctl, int write,
795 void __user *buffer, size_t * lenp, loff_t *ppos) 795 void __user *buffer, size_t * lenp, loff_t *ppos)
796{ 796{
797 int pos = 0; 797 int pos = 0;
@@ -811,7 +811,7 @@ static int arlan_sysctl_reset(ctl_table * ctl, int write, struct file *filp,
811 } else 811 } else
812 return -1; 812 return -1;
813 *lenp = pos + 3; 813 *lenp = pos + 3;
814 return proc_dostring(ctl, write, filp, buffer, lenp, ppos); 814 return proc_dostring(ctl, write, buffer, lenp, ppos);
815} 815}
816 816
817 817
diff --git a/drivers/net/wireless/ath/ar9170/phy.c b/drivers/net/wireless/ath/ar9170/phy.c
index b3e5cf3735b0..dbd488da18b1 100644
--- a/drivers/net/wireless/ath/ar9170/phy.c
+++ b/drivers/net/wireless/ath/ar9170/phy.c
@@ -1141,7 +1141,8 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar,
1141 u8 vpds[2][AR5416_PD_GAIN_ICEPTS]; 1141 u8 vpds[2][AR5416_PD_GAIN_ICEPTS];
1142 u8 pwrs[2][AR5416_PD_GAIN_ICEPTS]; 1142 u8 pwrs[2][AR5416_PD_GAIN_ICEPTS];
1143 int chain, idx, i; 1143 int chain, idx, i;
1144 u8 f; 1144 u32 phy_data = 0;
1145 u8 f, tmp;
1145 1146
1146 switch (channel->band) { 1147 switch (channel->band) {
1147 case IEEE80211_BAND_2GHZ: 1148 case IEEE80211_BAND_2GHZ:
@@ -1208,9 +1209,6 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar,
1208 } 1209 }
1209 1210
1210 for (i = 0; i < 76; i++) { 1211 for (i = 0; i < 76; i++) {
1211 u32 phy_data;
1212 u8 tmp;
1213
1214 if (i < 25) { 1212 if (i < 25) {
1215 tmp = ar9170_interpolate_val(i, &pwrs[0][0], 1213 tmp = ar9170_interpolate_val(i, &pwrs[0][0],
1216 &vpds[0][0]); 1214 &vpds[0][0]);
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e0138ac8bf50..e974e5829e1a 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -64,6 +64,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
64 { USB_DEVICE(0x0cf3, 0x9170) }, 64 { USB_DEVICE(0x0cf3, 0x9170) },
65 /* Atheros TG121N */ 65 /* Atheros TG121N */
66 { USB_DEVICE(0x0cf3, 0x1001) }, 66 { USB_DEVICE(0x0cf3, 0x1001) },
67 /* TP-Link TL-WN821N v2 */
68 { USB_DEVICE(0x0cf3, 0x1002) },
67 /* Cace Airpcap NX */ 69 /* Cace Airpcap NX */
68 { USB_DEVICE(0xcace, 0x0300) }, 70 { USB_DEVICE(0xcace, 0x0300) },
69 /* D-Link DWA 160A */ 71 /* D-Link DWA 160A */
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 3234995e8881..0ad6d0b76e9e 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -609,14 +609,24 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
609 AR_PHY_CH1_EXT_CCA, 609 AR_PHY_CH1_EXT_CCA,
610 AR_PHY_CH2_EXT_CCA 610 AR_PHY_CH2_EXT_CCA
611 }; 611 };
612 u8 chainmask; 612 u8 chainmask, rx_chain_status;
613 613
614 rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK);
614 if (AR_SREV_9285(ah)) 615 if (AR_SREV_9285(ah))
615 chainmask = 0x9; 616 chainmask = 0x9;
616 else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) 617 else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) {
617 chainmask = 0x1B; 618 if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4))
618 else 619 chainmask = 0x1B;
619 chainmask = 0x3F; 620 else
621 chainmask = 0x09;
622 } else {
623 if (rx_chain_status & 0x4)
624 chainmask = 0x3F;
625 else if (rx_chain_status & 0x2)
626 chainmask = 0x1B;
627 else
628 chainmask = 0x09;
629 }
620 630
621 h = ah->nfCalHist; 631 h = ah->nfCalHist;
622 632
@@ -697,6 +707,8 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah)
697 noise_floor = AR_PHY_CCA_MAX_AR9280_GOOD_VALUE; 707 noise_floor = AR_PHY_CCA_MAX_AR9280_GOOD_VALUE;
698 else if (AR_SREV_9285(ah)) 708 else if (AR_SREV_9285(ah))
699 noise_floor = AR_PHY_CCA_MAX_AR9285_GOOD_VALUE; 709 noise_floor = AR_PHY_CCA_MAX_AR9285_GOOD_VALUE;
710 else if (AR_SREV_9287(ah))
711 noise_floor = AR_PHY_CCA_MAX_AR9287_GOOD_VALUE;
700 else 712 else
701 noise_floor = AR_PHY_CCA_MAX_AR5416_GOOD_VALUE; 713 noise_floor = AR_PHY_CCA_MAX_AR5416_GOOD_VALUE;
702 714
@@ -924,6 +936,7 @@ static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset)
924 regVal |= (1 << (19 + i)); 936 regVal |= (1 << (19 + i));
925 REG_WRITE(ah, 0x7834, regVal); 937 REG_WRITE(ah, 0x7834, regVal);
926 udelay(1); 938 udelay(1);
939 regVal = REG_READ(ah, 0x7834);
927 regVal &= (~(0x1 << (19 + i))); 940 regVal &= (~(0x1 << (19 + i)));
928 reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9); 941 reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9);
929 regVal |= (reg_field << (19 + i)); 942 regVal |= (reg_field << (19 + i));
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 019bcbba40ed..9028ab193e42 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -28,6 +28,7 @@ extern const struct ath9k_percal_data adc_init_dc_cal;
28#define AR_PHY_CCA_MAX_AR5416_GOOD_VALUE -85 28#define AR_PHY_CCA_MAX_AR5416_GOOD_VALUE -85
29#define AR_PHY_CCA_MAX_AR9280_GOOD_VALUE -112 29#define AR_PHY_CCA_MAX_AR9280_GOOD_VALUE -112
30#define AR_PHY_CCA_MAX_AR9285_GOOD_VALUE -118 30#define AR_PHY_CCA_MAX_AR9285_GOOD_VALUE -118
31#define AR_PHY_CCA_MAX_AR9287_GOOD_VALUE -118
31#define AR_PHY_CCA_MAX_HIGH_VALUE -62 32#define AR_PHY_CCA_MAX_HIGH_VALUE -62
32#define AR_PHY_CCA_MIN_BAD_VALUE -140 33#define AR_PHY_CCA_MIN_BAD_VALUE -140
33#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3 34#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index ae7fb5dcb266..4071fc91da0a 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -509,6 +509,8 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
509 REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE, 509 REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE,
510 eep->baseEepHeader.dacLpMode); 510 eep->baseEepHeader.dacLpMode);
511 511
512 udelay(100);
513
512 REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL, AR_PHY_FRAME_CTL_TX_CLIP, 514 REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL, AR_PHY_FRAME_CTL_TX_CLIP,
513 pModal->miscBits >> 2); 515 pModal->miscBits >> 2);
514 516
@@ -902,7 +904,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
902 u16 powerLimit) 904 u16 powerLimit)
903{ 905{
904#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */ 906#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */
905#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10 /* 10*log10(3)*2 */ 907#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */
906 908
907 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 909 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
908 struct ar5416_eeprom_def *pEepData = &ah->eeprom.def; 910 struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index b6c6cca07812..ca7694caf364 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -842,7 +842,7 @@ static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
842 842
843static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah) 843static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
844{ 844{
845 if (AR_SREV_9287_11(ah)) 845 if (AR_SREV_9287_11_OR_LATER(ah))
846 INIT_INI_ARRAY(&ah->iniModesRxGain, 846 INIT_INI_ARRAY(&ah->iniModesRxGain,
847 ar9287Modes_rx_gain_9287_1_1, 847 ar9287Modes_rx_gain_9287_1_1,
848 ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6); 848 ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
@@ -853,7 +853,7 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
853 else if (AR_SREV_9280_20(ah)) 853 else if (AR_SREV_9280_20(ah))
854 ath9k_hw_init_rxgain_ini(ah); 854 ath9k_hw_init_rxgain_ini(ah);
855 855
856 if (AR_SREV_9287_11(ah)) { 856 if (AR_SREV_9287_11_OR_LATER(ah)) {
857 INIT_INI_ARRAY(&ah->iniModesTxGain, 857 INIT_INI_ARRAY(&ah->iniModesTxGain,
858 ar9287Modes_tx_gain_9287_1_1, 858 ar9287Modes_tx_gain_9287_1_1,
859 ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6); 859 ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
@@ -965,7 +965,7 @@ int ath9k_hw_init(struct ath_hw *ah)
965 ath9k_hw_init_mode_regs(ah); 965 ath9k_hw_init_mode_regs(ah);
966 966
967 if (ah->is_pciexpress) 967 if (ah->is_pciexpress)
968 ath9k_hw_configpcipowersave(ah, 0); 968 ath9k_hw_configpcipowersave(ah, 0, 0);
969 else 969 else
970 ath9k_hw_disablepcie(ah); 970 ath9k_hw_disablepcie(ah);
971 971
@@ -1273,6 +1273,15 @@ static void ath9k_hw_override_ini(struct ath_hw *ah,
1273 */ 1273 */
1274 REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 1274 REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
1275 1275
1276 if (AR_SREV_9280_10_OR_LATER(ah)) {
1277 val = REG_READ(ah, AR_PCU_MISC_MODE2) &
1278 (~AR_PCU_MISC_MODE2_HWWAR1);
1279
1280 if (AR_SREV_9287_10_OR_LATER(ah))
1281 val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
1282
1283 REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
1284 }
1276 1285
1277 if (!AR_SREV_5416_20_OR_LATER(ah) || 1286 if (!AR_SREV_5416_20_OR_LATER(ah) ||
1278 AR_SREV_9280_10_OR_LATER(ah)) 1287 AR_SREV_9280_10_OR_LATER(ah))
@@ -1784,7 +1793,7 @@ static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan,
1784static bool ath9k_hw_chip_reset(struct ath_hw *ah, 1793static bool ath9k_hw_chip_reset(struct ath_hw *ah,
1785 struct ath9k_channel *chan) 1794 struct ath9k_channel *chan)
1786{ 1795{
1787 if (OLC_FOR_AR9280_20_LATER) { 1796 if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) {
1788 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) 1797 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON))
1789 return false; 1798 return false;
1790 } else if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM)) 1799 } else if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
@@ -2338,6 +2347,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2338 struct ath9k_channel *curchan = ah->curchan; 2347 struct ath9k_channel *curchan = ah->curchan;
2339 u32 saveDefAntenna; 2348 u32 saveDefAntenna;
2340 u32 macStaId1; 2349 u32 macStaId1;
2350 u64 tsf = 0;
2341 int i, rx_chainmask, r; 2351 int i, rx_chainmask, r;
2342 2352
2343 ah->extprotspacing = sc->ht_extprotspacing; 2353 ah->extprotspacing = sc->ht_extprotspacing;
@@ -2347,7 +2357,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2347 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 2357 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
2348 return -EIO; 2358 return -EIO;
2349 2359
2350 if (curchan) 2360 if (curchan && !ah->chip_fullsleep)
2351 ath9k_hw_getnf(ah, curchan); 2361 ath9k_hw_getnf(ah, curchan);
2352 2362
2353 if (bChannelChange && 2363 if (bChannelChange &&
@@ -2356,8 +2366,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2356 (chan->channel != ah->curchan->channel) && 2366 (chan->channel != ah->curchan->channel) &&
2357 ((chan->channelFlags & CHANNEL_ALL) == 2367 ((chan->channelFlags & CHANNEL_ALL) ==
2358 (ah->curchan->channelFlags & CHANNEL_ALL)) && 2368 (ah->curchan->channelFlags & CHANNEL_ALL)) &&
2359 (!AR_SREV_9280(ah) || (!IS_CHAN_A_5MHZ_SPACED(chan) && 2369 !(AR_SREV_9280(ah) || IS_CHAN_A_5MHZ_SPACED(chan) ||
2360 !IS_CHAN_A_5MHZ_SPACED(ah->curchan)))) { 2370 IS_CHAN_A_5MHZ_SPACED(ah->curchan))) {
2361 2371
2362 if (ath9k_hw_channel_change(ah, chan, sc->tx_chan_width)) { 2372 if (ath9k_hw_channel_change(ah, chan, sc->tx_chan_width)) {
2363 ath9k_hw_loadnf(ah, ah->curchan); 2373 ath9k_hw_loadnf(ah, ah->curchan);
@@ -2372,6 +2382,10 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2372 2382
2373 macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B; 2383 macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
2374 2384
2385 /* For chips on which RTC reset is done, save TSF before it gets cleared */
2386 if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
2387 tsf = ath9k_hw_gettsf64(ah);
2388
2375 saveLedState = REG_READ(ah, AR_CFG_LED) & 2389 saveLedState = REG_READ(ah, AR_CFG_LED) &
2376 (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL | 2390 (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
2377 AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW); 2391 AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW);
@@ -2398,6 +2412,10 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2398 udelay(50); 2412 udelay(50);
2399 } 2413 }
2400 2414
2415 /* Restore TSF */
2416 if (tsf && AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
2417 ath9k_hw_settsf64(ah, tsf);
2418
2401 if (AR_SREV_9280_10_OR_LATER(ah)) 2419 if (AR_SREV_9280_10_OR_LATER(ah))
2402 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); 2420 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
2403 2421
@@ -3005,9 +3023,10 @@ void ath9k_ps_restore(struct ath_softc *sc)
3005 * Programming the SerDes must go through the same 288 bit serial shift 3023 * Programming the SerDes must go through the same 288 bit serial shift
3006 * register as the other analog registers. Hence the 9 writes. 3024 * register as the other analog registers. Hence the 9 writes.
3007 */ 3025 */
3008void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore) 3026void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off)
3009{ 3027{
3010 u8 i; 3028 u8 i;
3029 u32 val;
3011 3030
3012 if (ah->is_pciexpress != true) 3031 if (ah->is_pciexpress != true)
3013 return; 3032 return;
@@ -3017,84 +3036,113 @@ void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore)
3017 return; 3036 return;
3018 3037
3019 /* Nothing to do on restore for 11N */ 3038 /* Nothing to do on restore for 11N */
3020 if (restore) 3039 if (!restore) {
3021 return; 3040 if (AR_SREV_9280_20_OR_LATER(ah)) {
3041 /*
3042 * AR9280 2.0 or later chips use SerDes values from the
3043 * initvals.h initialized depending on chipset during
3044 * ath9k_hw_init()
3045 */
3046 for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
3047 REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
3048 INI_RA(&ah->iniPcieSerdes, i, 1));
3049 }
3050 } else if (AR_SREV_9280(ah) &&
3051 (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
3052 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
3053 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
3054
3055 /* RX shut off when elecidle is asserted */
3056 REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
3057 REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
3058 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
3059
3060 /* Shut off CLKREQ active in L1 */
3061 if (ah->config.pcie_clock_req)
3062 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
3063 else
3064 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
3022 3065
3023 if (AR_SREV_9280_20_OR_LATER(ah)) { 3066 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
3024 /* 3067 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
3025 * AR9280 2.0 or later chips use SerDes values from the 3068 REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
3026 * initvals.h initialized depending on chipset during
3027 * ath9k_hw_init()
3028 */
3029 for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
3030 REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
3031 INI_RA(&ah->iniPcieSerdes, i, 1));
3032 }
3033 } else if (AR_SREV_9280(ah) &&
3034 (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
3035 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
3036 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
3037 3069
3038 /* RX shut off when elecidle is asserted */ 3070 /* Load the new settings */
3039 REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019); 3071 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
3040 REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
3041 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
3042 3072
3043 /* Shut off CLKREQ active in L1 */ 3073 } else {
3044 if (ah->config.pcie_clock_req) 3074 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
3045 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc); 3075 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
3046 else
3047 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
3048
3049 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
3050 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
3051 REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
3052 3076
3053 /* Load the new settings */ 3077 /* RX shut off when elecidle is asserted */
3054 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); 3078 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
3079 REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
3080 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
3055 3081
3056 } else { 3082 /*
3057 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); 3083 * Ignore ah->ah_config.pcie_clock_req setting for
3058 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); 3084 * pre-AR9280 11n
3085 */
3086 REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
3059 3087
3060 /* RX shut off when elecidle is asserted */ 3088 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
3061 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039); 3089 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
3062 REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824); 3090 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
3063 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
3064 3091
3065 /* 3092 /* Load the new settings */
3066 * Ignore ah->ah_config.pcie_clock_req setting for 3093 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
3067 * pre-AR9280 11n 3094 }
3068 */
3069 REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
3070 3095
3071 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); 3096 udelay(1000);
3072 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
3073 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
3074 3097
3075 /* Load the new settings */ 3098 /* set bit 19 to allow forcing of pcie core into L1 state */
3076 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); 3099 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
3077 }
3078 3100
3079 udelay(1000); 3101 /* Several PCIe massages to ensure proper behaviour */
3102 if (ah->config.pcie_waen) {
3103 val = ah->config.pcie_waen;
3104 if (!power_off)
3105 val &= (~AR_WA_D3_L1_DISABLE);
3106 } else {
3107 if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
3108 AR_SREV_9287(ah)) {
3109 val = AR9285_WA_DEFAULT;
3110 if (!power_off)
3111 val &= (~AR_WA_D3_L1_DISABLE);
3112 } else if (AR_SREV_9280(ah)) {
3113 /*
3114 * On AR9280 chips bit 22 of 0x4004 needs to be
3115 * set otherwise card may disappear.
3116 */
3117 val = AR9280_WA_DEFAULT;
3118 if (!power_off)
3119 val &= (~AR_WA_D3_L1_DISABLE);
3120 } else
3121 val = AR_WA_DEFAULT;
3122 }
3080 3123
3081 /* set bit 19 to allow forcing of pcie core into L1 state */ 3124 REG_WRITE(ah, AR_WA, val);
3082 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA); 3125 }
3083 3126
3084 /* Several PCIe massages to ensure proper behaviour */ 3127 if (power_off) {
3085 if (ah->config.pcie_waen) {
3086 REG_WRITE(ah, AR_WA, ah->config.pcie_waen);
3087 } else {
3088 if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || AR_SREV_9287(ah))
3089 REG_WRITE(ah, AR_WA, AR9285_WA_DEFAULT);
3090 /* 3128 /*
3091 * On AR9280 chips bit 22 of 0x4004 needs to be set to 3129 * Set PCIe workaround bits
3092 * otherwise card may disappear. 3130 * bit 14 in WA register (disable L1) should only
3131 * be set when device enters D3 and be cleared
3132 * when device comes back to D0.
3093 */ 3133 */
3094 else if (AR_SREV_9280(ah)) 3134 if (ah->config.pcie_waen) {
3095 REG_WRITE(ah, AR_WA, AR9280_WA_DEFAULT); 3135 if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
3096 else 3136 REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
3097 REG_WRITE(ah, AR_WA, AR_WA_DEFAULT); 3137 } else {
3138 if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
3139 AR_SREV_9287(ah)) &&
3140 (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
3141 (AR_SREV_9280(ah) &&
3142 (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
3143 REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
3144 }
3145 }
3098 } 3146 }
3099} 3147}
3100 3148
@@ -3652,15 +3700,7 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah)
3652 } 3700 }
3653#endif 3701#endif
3654 3702
3655 if ((ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || 3703 pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
3656 (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE) ||
3657 (ah->hw_version.macVersion == AR_SREV_VERSION_9160) ||
3658 (ah->hw_version.macVersion == AR_SREV_VERSION_9100) ||
3659 (ah->hw_version.macVersion == AR_SREV_VERSION_9280) ||
3660 (ah->hw_version.macVersion == AR_SREV_VERSION_9285))
3661 pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
3662 else
3663 pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
3664 3704
3665 if (AR_SREV_9280(ah) || AR_SREV_9285(ah)) 3705 if (AR_SREV_9280(ah) || AR_SREV_9285(ah))
3666 pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS; 3706 pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 9106a0b537dd..b89234571829 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -106,7 +106,7 @@
106#define AH_TSF_WRITE_TIMEOUT 100 /* (us) */ 106#define AH_TSF_WRITE_TIMEOUT 100 /* (us) */
107#define AH_TIME_QUANTUM 10 107#define AH_TIME_QUANTUM 10
108#define AR_KEYTABLE_SIZE 128 108#define AR_KEYTABLE_SIZE 128
109#define POWER_UP_TIME 200000 109#define POWER_UP_TIME 10000
110#define SPUR_RSSI_THRESH 40 110#define SPUR_RSSI_THRESH 40
111 111
112#define CAB_TIMEOUT_VAL 10 112#define CAB_TIMEOUT_VAL 10
@@ -650,7 +650,7 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
650 const struct ath9k_beacon_state *bs); 650 const struct ath9k_beacon_state *bs);
651bool ath9k_hw_setpower(struct ath_hw *ah, 651bool ath9k_hw_setpower(struct ath_hw *ah,
652 enum ath9k_power_mode mode); 652 enum ath9k_power_mode mode);
653void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore); 653void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off);
654 654
655/* Interrupt Handling */ 655/* Interrupt Handling */
656bool ath9k_hw_intrpend(struct ath_hw *ah); 656bool ath9k_hw_intrpend(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 3dc7b5a13e64..52bed89063d4 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1131,7 +1131,7 @@ void ath_radio_enable(struct ath_softc *sc)
1131 int r; 1131 int r;
1132 1132
1133 ath9k_ps_wakeup(sc); 1133 ath9k_ps_wakeup(sc);
1134 ath9k_hw_configpcipowersave(ah, 0); 1134 ath9k_hw_configpcipowersave(ah, 0, 0);
1135 1135
1136 if (!ah->curchan) 1136 if (!ah->curchan)
1137 ah->curchan = ath_get_curchannel(sc, sc->hw); 1137 ah->curchan = ath_get_curchannel(sc, sc->hw);
@@ -1202,7 +1202,7 @@ void ath_radio_disable(struct ath_softc *sc)
1202 spin_unlock_bh(&sc->sc_resetlock); 1202 spin_unlock_bh(&sc->sc_resetlock);
1203 1203
1204 ath9k_hw_phy_disable(ah); 1204 ath9k_hw_phy_disable(ah);
1205 ath9k_hw_configpcipowersave(ah, 1); 1205 ath9k_hw_configpcipowersave(ah, 1, 1);
1206 ath9k_ps_restore(sc); 1206 ath9k_ps_restore(sc);
1207 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); 1207 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
1208} 1208}
@@ -1226,11 +1226,6 @@ static void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
1226 bool blocked = !!ath_is_rfkill_set(sc); 1226 bool blocked = !!ath_is_rfkill_set(sc);
1227 1227
1228 wiphy_rfkill_set_hw_state(hw->wiphy, blocked); 1228 wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
1229
1230 if (blocked)
1231 ath_radio_disable(sc);
1232 else
1233 ath_radio_enable(sc);
1234} 1229}
1235 1230
1236static void ath_start_rfkill_poll(struct ath_softc *sc) 1231static void ath_start_rfkill_poll(struct ath_softc *sc)
@@ -1260,6 +1255,7 @@ void ath_detach(struct ath_softc *sc)
1260 DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n"); 1255 DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n");
1261 1256
1262 ath_deinit_leds(sc); 1257 ath_deinit_leds(sc);
1258 wiphy_rfkill_stop_polling(sc->hw->wiphy);
1263 1259
1264 for (i = 0; i < sc->num_sec_wiphy; i++) { 1260 for (i = 0; i < sc->num_sec_wiphy; i++) {
1265 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 1261 struct ath_wiphy *aphy = sc->sec_wiphy[i];
@@ -1942,7 +1938,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
1942 init_channel = ath_get_curchannel(sc, hw); 1938 init_channel = ath_get_curchannel(sc, hw);
1943 1939
1944 /* Reset SERDES registers */ 1940 /* Reset SERDES registers */
1945 ath9k_hw_configpcipowersave(sc->sc_ah, 0); 1941 ath9k_hw_configpcipowersave(sc->sc_ah, 0, 0);
1946 1942
1947 /* 1943 /*
1948 * The basic interface to setting the hardware in a good 1944 * The basic interface to setting the hardware in a good
@@ -2166,11 +2162,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2166 } else 2162 } else
2167 sc->rx.rxlink = NULL; 2163 sc->rx.rxlink = NULL;
2168 2164
2169 wiphy_rfkill_stop_polling(sc->hw->wiphy);
2170
2171 /* disable HAL and put h/w to sleep */ 2165 /* disable HAL and put h/w to sleep */
2172 ath9k_hw_disable(sc->sc_ah); 2166 ath9k_hw_disable(sc->sc_ah);
2173 ath9k_hw_configpcipowersave(sc->sc_ah, 1); 2167 ath9k_hw_configpcipowersave(sc->sc_ah, 1, 1);
2174 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); 2168 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
2175 2169
2176 sc->sc_flags |= SC_OP_INVALID; 2170 sc->sc_flags |= SC_OP_INVALID;
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index e5c29eb86e80..d83b77f821e9 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -676,8 +676,9 @@
676#define AR_RC_HOSTIF 0x00000100 676#define AR_RC_HOSTIF 0x00000100
677 677
678#define AR_WA 0x4004 678#define AR_WA 0x4004
679#define AR_WA_D3_L1_DISABLE (1 << 14)
679#define AR9285_WA_DEFAULT 0x004a05cb 680#define AR9285_WA_DEFAULT 0x004a05cb
680#define AR9280_WA_DEFAULT 0x0040073f 681#define AR9280_WA_DEFAULT 0x0040073b
681#define AR_WA_DEFAULT 0x0000073f 682#define AR_WA_DEFAULT 0x0000073f
682 683
683 684
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 83e38134accb..54ea61c15d8b 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -61,11 +61,28 @@ config B43_PCMCIA
61 61
62 If unsure, say N. 62 If unsure, say N.
63 63
64config B43_SDIO
65 bool "Broadcom 43xx SDIO device support (EXPERIMENTAL)"
66 depends on B43 && SSB_SDIOHOST_POSSIBLE && EXPERIMENTAL
67 select SSB_SDIOHOST
68 ---help---
69 Broadcom 43xx device support for Soft-MAC SDIO devices.
70
71 With this config option you can drive Soft-MAC b43 cards with a
72 Secure Digital I/O interface.
73 This includes the WLAN daughter card found on the Nintendo Wii
74 video game console.
75 Note that this does not support Broadcom 43xx Full-MAC devices.
76
77 It's safe to select Y here, even if you don't have a B43 SDIO device.
78
79 If unsure, say N.
80
64# Data transfers to the device via PIO 81# Data transfers to the device via PIO
65# This is only needed on PCMCIA devices. All others can do DMA properly. 82# This is only needed on PCMCIA and SDIO devices. All others can do DMA properly.
66config B43_PIO 83config B43_PIO
67 bool 84 bool
68 depends on B43 && (B43_PCMCIA || B43_FORCE_PIO) 85 depends on B43 && (B43_SDIO || B43_PCMCIA || B43_FORCE_PIO)
69 select SSB_BLOCKIO 86 select SSB_BLOCKIO
70 default y 87 default y
71 88
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index da379f4b0c3a..84772a2542dc 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -16,6 +16,7 @@ b43-$(CONFIG_B43_PIO) += pio.o
16b43-y += rfkill.o 16b43-y += rfkill.o
17b43-$(CONFIG_B43_LEDS) += leds.o 17b43-$(CONFIG_B43_LEDS) += leds.o
18b43-$(CONFIG_B43_PCMCIA) += pcmcia.o 18b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
19b43-$(CONFIG_B43_SDIO) += sdio.o
19b43-$(CONFIG_B43_DEBUG) += debugfs.o 20b43-$(CONFIG_B43_DEBUG) += debugfs.o
20 21
21obj-$(CONFIG_B43) += b43.o 22obj-$(CONFIG_B43) += b43.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 09cfe68537b6..660716214d49 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -607,86 +607,7 @@ struct b43_qos_params {
607 struct ieee80211_tx_queue_params p; 607 struct ieee80211_tx_queue_params p;
608}; 608};
609 609
610struct b43_wldev; 610struct b43_wl;
611
612/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */
613struct b43_wl {
614 /* Pointer to the active wireless device on this chip */
615 struct b43_wldev *current_dev;
616 /* Pointer to the ieee80211 hardware data structure */
617 struct ieee80211_hw *hw;
618
619 /* Global driver mutex. Every operation must run with this mutex locked. */
620 struct mutex mutex;
621 /* Hard-IRQ spinlock. This lock protects things used in the hard-IRQ
622 * handler, only. This basically is just the IRQ mask register. */
623 spinlock_t hardirq_lock;
624
625 /* The number of queues that were registered with the mac80211 subsystem
626 * initially. This is a backup copy of hw->queues in case hw->queues has
627 * to be dynamically lowered at runtime (Firmware does not support QoS).
628 * hw->queues has to be restored to the original value before unregistering
629 * from the mac80211 subsystem. */
630 u16 mac80211_initially_registered_queues;
631
632 /* R/W lock for data transmission.
633 * Transmissions on 2+ queues can run concurrently, but somebody else
634 * might sync with TX by write_lock_irqsave()'ing. */
635 rwlock_t tx_lock;
636 /* Lock for LEDs access. */
637 spinlock_t leds_lock;
638
639 /* We can only have one operating interface (802.11 core)
640 * at a time. General information about this interface follows.
641 */
642
643 struct ieee80211_vif *vif;
644 /* The MAC address of the operating interface. */
645 u8 mac_addr[ETH_ALEN];
646 /* Current BSSID */
647 u8 bssid[ETH_ALEN];
648 /* Interface type. (NL80211_IFTYPE_XXX) */
649 int if_type;
650 /* Is the card operating in AP, STA or IBSS mode? */
651 bool operating;
652 /* filter flags */
653 unsigned int filter_flags;
654 /* Stats about the wireless interface */
655 struct ieee80211_low_level_stats ieee_stats;
656
657#ifdef CONFIG_B43_HWRNG
658 struct hwrng rng;
659 bool rng_initialized;
660 char rng_name[30 + 1];
661#endif /* CONFIG_B43_HWRNG */
662
663 /* List of all wireless devices on this chip */
664 struct list_head devlist;
665 u8 nr_devs;
666
667 bool radiotap_enabled;
668 bool radio_enabled;
669
670 /* The beacon we are currently using (AP or IBSS mode). */
671 struct sk_buff *current_beacon;
672 bool beacon0_uploaded;
673 bool beacon1_uploaded;
674 bool beacon_templates_virgin; /* Never wrote the templates? */
675 struct work_struct beacon_update_trigger;
676
677 /* The current QOS parameters for the 4 queues. */
678 struct b43_qos_params qos_params[4];
679
680 /* Work for adjustment of the transmission power.
681 * This is scheduled when we determine that the actual TX output
682 * power doesn't match what we want. */
683 struct work_struct txpower_adjust_work;
684
685 /* Packet transmit work */
686 struct work_struct tx_work;
687 /* Queue of packets to be transmitted. */
688 struct sk_buff_head tx_queue;
689};
690 611
691/* The type of the firmware file. */ 612/* The type of the firmware file. */
692enum b43_firmware_file_type { 613enum b43_firmware_file_type {
@@ -768,13 +689,10 @@ struct b43_wldev {
768 /* The device initialization status. 689 /* The device initialization status.
769 * Use b43_status() to query. */ 690 * Use b43_status() to query. */
770 atomic_t __init_status; 691 atomic_t __init_status;
771 /* Saved init status for handling suspend. */
772 int suspend_init_status;
773 692
774 bool bad_frames_preempt; /* Use "Bad Frames Preemption" (default off) */ 693 bool bad_frames_preempt; /* Use "Bad Frames Preemption" (default off) */
775 bool dfq_valid; /* Directed frame queue valid (IBSS PS mode, ATIM) */ 694 bool dfq_valid; /* Directed frame queue valid (IBSS PS mode, ATIM) */
776 bool radio_hw_enable; /* saved state of radio hardware enabled state */ 695 bool radio_hw_enable; /* saved state of radio hardware enabled state */
777 bool suspend_in_progress; /* TRUE, if we are in a suspend/resume cycle */
778 bool qos_enabled; /* TRUE, if QoS is used. */ 696 bool qos_enabled; /* TRUE, if QoS is used. */
779 bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */ 697 bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */
780 698
@@ -794,12 +712,6 @@ struct b43_wldev {
794 /* Various statistics about the physical device. */ 712 /* Various statistics about the physical device. */
795 struct b43_stats stats; 713 struct b43_stats stats;
796 714
797 /* The device LEDs. */
798 struct b43_led led_tx;
799 struct b43_led led_rx;
800 struct b43_led led_assoc;
801 struct b43_led led_radio;
802
803 /* Reason code of the last interrupt. */ 715 /* Reason code of the last interrupt. */
804 u32 irq_reason; 716 u32 irq_reason;
805 u32 dma_reason[6]; 717 u32 dma_reason[6];
@@ -830,9 +742,104 @@ struct b43_wldev {
830 /* Debugging stuff follows. */ 742 /* Debugging stuff follows. */
831#ifdef CONFIG_B43_DEBUG 743#ifdef CONFIG_B43_DEBUG
832 struct b43_dfsentry *dfsentry; 744 struct b43_dfsentry *dfsentry;
745 unsigned int irq_count;
746 unsigned int irq_bit_count[32];
747 unsigned int tx_count;
748 unsigned int rx_count;
833#endif 749#endif
834}; 750};
835 751
752/*
753 * Include goes here to avoid a dependency problem.
754 * A better fix would be to integrate xmit.h into b43.h.
755 */
756#include "xmit.h"
757
758/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */
759struct b43_wl {
760 /* Pointer to the active wireless device on this chip */
761 struct b43_wldev *current_dev;
762 /* Pointer to the ieee80211 hardware data structure */
763 struct ieee80211_hw *hw;
764
765 /* Global driver mutex. Every operation must run with this mutex locked. */
766 struct mutex mutex;
767 /* Hard-IRQ spinlock. This lock protects things used in the hard-IRQ
768 * handler, only. This basically is just the IRQ mask register. */
769 spinlock_t hardirq_lock;
770
771 /* The number of queues that were registered with the mac80211 subsystem
772 * initially. This is a backup copy of hw->queues in case hw->queues has
773 * to be dynamically lowered at runtime (Firmware does not support QoS).
774 * hw->queues has to be restored to the original value before unregistering
775 * from the mac80211 subsystem. */
776 u16 mac80211_initially_registered_queues;
777
778 /* We can only have one operating interface (802.11 core)
779 * at a time. General information about this interface follows.
780 */
781
782 struct ieee80211_vif *vif;
783 /* The MAC address of the operating interface. */
784 u8 mac_addr[ETH_ALEN];
785 /* Current BSSID */
786 u8 bssid[ETH_ALEN];
787 /* Interface type. (NL80211_IFTYPE_XXX) */
788 int if_type;
789 /* Is the card operating in AP, STA or IBSS mode? */
790 bool operating;
791 /* filter flags */
792 unsigned int filter_flags;
793 /* Stats about the wireless interface */
794 struct ieee80211_low_level_stats ieee_stats;
795
796#ifdef CONFIG_B43_HWRNG
797 struct hwrng rng;
798 bool rng_initialized;
799 char rng_name[30 + 1];
800#endif /* CONFIG_B43_HWRNG */
801
802 /* List of all wireless devices on this chip */
803 struct list_head devlist;
804 u8 nr_devs;
805
806 bool radiotap_enabled;
807 bool radio_enabled;
808
809 /* The beacon we are currently using (AP or IBSS mode). */
810 struct sk_buff *current_beacon;
811 bool beacon0_uploaded;
812 bool beacon1_uploaded;
813 bool beacon_templates_virgin; /* Never wrote the templates? */
814 struct work_struct beacon_update_trigger;
815
816 /* The current QOS parameters for the 4 queues. */
817 struct b43_qos_params qos_params[4];
818
819 /* Work for adjustment of the transmission power.
820 * This is scheduled when we determine that the actual TX output
821 * power doesn't match what we want. */
822 struct work_struct txpower_adjust_work;
823
824 /* Packet transmit work */
825 struct work_struct tx_work;
826 /* Queue of packets to be transmitted. */
827 struct sk_buff_head tx_queue;
828
829 /* The device LEDs. */
830 struct b43_leds leds;
831
832#ifdef CONFIG_B43_PIO
833 /*
834 * RX/TX header/tail buffers used by the frame transmit functions.
835 */
836 struct b43_rxhdr_fw4 rxhdr;
837 struct b43_txhdr txhdr;
838 u8 rx_tail[4];
839 u8 tx_tail[4];
840#endif /* CONFIG_B43_PIO */
841};
842
836static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw) 843static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw)
837{ 844{
838 return hw->priv; 845 return hw->priv;
diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
index 8f64943e3f60..80b19a44a407 100644
--- a/drivers/net/wireless/b43/debugfs.c
+++ b/drivers/net/wireless/b43/debugfs.c
@@ -689,6 +689,7 @@ static void b43_add_dynamic_debug(struct b43_wldev *dev)
689 add_dyn_dbg("debug_lo", B43_DBG_LO, 0); 689 add_dyn_dbg("debug_lo", B43_DBG_LO, 0);
690 add_dyn_dbg("debug_firmware", B43_DBG_FIRMWARE, 0); 690 add_dyn_dbg("debug_firmware", B43_DBG_FIRMWARE, 0);
691 add_dyn_dbg("debug_keys", B43_DBG_KEYS, 0); 691 add_dyn_dbg("debug_keys", B43_DBG_KEYS, 0);
692 add_dyn_dbg("debug_verbose_stats", B43_DBG_VERBOSESTATS, 0);
692 693
693#undef add_dyn_dbg 694#undef add_dyn_dbg
694} 695}
diff --git a/drivers/net/wireless/b43/debugfs.h b/drivers/net/wireless/b43/debugfs.h
index e47b4b488b04..822aad8842f4 100644
--- a/drivers/net/wireless/b43/debugfs.h
+++ b/drivers/net/wireless/b43/debugfs.h
@@ -13,6 +13,7 @@ enum b43_dyndbg { /* Dynamic debugging features */
13 B43_DBG_LO, 13 B43_DBG_LO,
14 B43_DBG_FIRMWARE, 14 B43_DBG_FIRMWARE,
15 B43_DBG_KEYS, 15 B43_DBG_KEYS,
16 B43_DBG_VERBOSESTATS,
16 __B43_NR_DYNDBG, 17 __B43_NR_DYNDBG,
17}; 18};
18 19
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index a467ee260a19..8701034569fa 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1428,9 +1428,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1428 ring->nr_failed_tx_packets++; 1428 ring->nr_failed_tx_packets++;
1429 ring->nr_total_packet_tries += status->frame_count; 1429 ring->nr_total_packet_tries += status->frame_count;
1430#endif /* DEBUG */ 1430#endif /* DEBUG */
1431 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb); 1431 ieee80211_tx_status(dev->wl->hw, meta->skb);
1432 1432
1433 /* skb is freed by ieee80211_tx_status_irqsafe() */ 1433 /* skb is freed by ieee80211_tx_status() */
1434 meta->skb = NULL; 1434 meta->skb = NULL;
1435 } else { 1435 } else {
1436 /* No need to call free_descriptor_buffer here, as 1436 /* No need to call free_descriptor_buffer here, as
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c
index c8b317094c31..1e8dba488004 100644
--- a/drivers/net/wireless/b43/leds.c
+++ b/drivers/net/wireless/b43/leds.c
@@ -34,57 +34,88 @@
34static void b43_led_turn_on(struct b43_wldev *dev, u8 led_index, 34static void b43_led_turn_on(struct b43_wldev *dev, u8 led_index,
35 bool activelow) 35 bool activelow)
36{ 36{
37 struct b43_wl *wl = dev->wl;
38 unsigned long flags;
39 u16 ctl; 37 u16 ctl;
40 38
41 spin_lock_irqsave(&wl->leds_lock, flags);
42 ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL); 39 ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL);
43 if (activelow) 40 if (activelow)
44 ctl &= ~(1 << led_index); 41 ctl &= ~(1 << led_index);
45 else 42 else
46 ctl |= (1 << led_index); 43 ctl |= (1 << led_index);
47 b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl); 44 b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl);
48 spin_unlock_irqrestore(&wl->leds_lock, flags);
49} 45}
50 46
51static void b43_led_turn_off(struct b43_wldev *dev, u8 led_index, 47static void b43_led_turn_off(struct b43_wldev *dev, u8 led_index,
52 bool activelow) 48 bool activelow)
53{ 49{
54 struct b43_wl *wl = dev->wl;
55 unsigned long flags;
56 u16 ctl; 50 u16 ctl;
57 51
58 spin_lock_irqsave(&wl->leds_lock, flags);
59 ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL); 52 ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL);
60 if (activelow) 53 if (activelow)
61 ctl |= (1 << led_index); 54 ctl |= (1 << led_index);
62 else 55 else
63 ctl &= ~(1 << led_index); 56 ctl &= ~(1 << led_index);
64 b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl); 57 b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl);
65 spin_unlock_irqrestore(&wl->leds_lock, flags);
66} 58}
67 59
68/* Callback from the LED subsystem. */ 60static void b43_led_update(struct b43_wldev *dev,
69static void b43_led_brightness_set(struct led_classdev *led_dev, 61 struct b43_led *led)
70 enum led_brightness brightness)
71{ 62{
72 struct b43_led *led = container_of(led_dev, struct b43_led, led_dev);
73 struct b43_wldev *dev = led->dev;
74 bool radio_enabled; 63 bool radio_enabled;
64 bool turn_on;
75 65
76 if (unlikely(b43_status(dev) < B43_STAT_INITIALIZED)) 66 if (!led->wl)
77 return; 67 return;
78 68
79 /* Checking the radio-enabled status here is slightly racy,
80 * but we want to avoid the locking overhead and we don't care
81 * whether the LED has the wrong state for a second. */
82 radio_enabled = (dev->phy.radio_on && dev->radio_hw_enable); 69 radio_enabled = (dev->phy.radio_on && dev->radio_hw_enable);
83 70
84 if (brightness == LED_OFF || !radio_enabled) 71 /* The led->state read is racy, but we don't care. In case we raced
85 b43_led_turn_off(dev, led->index, led->activelow); 72 * with the brightness_set handler, we will be called again soon
73 * to fixup our state. */
74 if (radio_enabled)
75 turn_on = atomic_read(&led->state) != LED_OFF;
86 else 76 else
77 turn_on = 0;
78 if (turn_on == led->hw_state)
79 return;
80 led->hw_state = turn_on;
81
82 if (turn_on)
87 b43_led_turn_on(dev, led->index, led->activelow); 83 b43_led_turn_on(dev, led->index, led->activelow);
84 else
85 b43_led_turn_off(dev, led->index, led->activelow);
86}
87
88static void b43_leds_work(struct work_struct *work)
89{
90 struct b43_leds *leds = container_of(work, struct b43_leds, work);
91 struct b43_wl *wl = container_of(leds, struct b43_wl, leds);
92 struct b43_wldev *dev;
93
94 mutex_lock(&wl->mutex);
95 dev = wl->current_dev;
96 if (unlikely(!dev || b43_status(dev) < B43_STAT_STARTED))
97 goto out_unlock;
98
99 b43_led_update(dev, &wl->leds.led_tx);
100 b43_led_update(dev, &wl->leds.led_rx);
101 b43_led_update(dev, &wl->leds.led_radio);
102 b43_led_update(dev, &wl->leds.led_assoc);
103
104out_unlock:
105 mutex_unlock(&wl->mutex);
106}
107
108/* Callback from the LED subsystem. */
109static void b43_led_brightness_set(struct led_classdev *led_dev,
110 enum led_brightness brightness)
111{
112 struct b43_led *led = container_of(led_dev, struct b43_led, led_dev);
113 struct b43_wl *wl = led->wl;
114
115 if (likely(!wl->leds.stop)) {
116 atomic_set(&led->state, brightness);
117 ieee80211_queue_work(wl->hw, &wl->leds.work);
118 }
88} 119}
89 120
90static int b43_register_led(struct b43_wldev *dev, struct b43_led *led, 121static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
@@ -93,15 +124,15 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
93{ 124{
94 int err; 125 int err;
95 126
96 b43_led_turn_off(dev, led_index, activelow); 127 if (led->wl)
97 if (led->dev)
98 return -EEXIST; 128 return -EEXIST;
99 if (!default_trigger) 129 if (!default_trigger)
100 return -EINVAL; 130 return -EINVAL;
101 led->dev = dev; 131 led->wl = dev->wl;
102 led->index = led_index; 132 led->index = led_index;
103 led->activelow = activelow; 133 led->activelow = activelow;
104 strncpy(led->name, name, sizeof(led->name)); 134 strncpy(led->name, name, sizeof(led->name));
135 atomic_set(&led->state, 0);
105 136
106 led->led_dev.name = led->name; 137 led->led_dev.name = led->name;
107 led->led_dev.default_trigger = default_trigger; 138 led->led_dev.default_trigger = default_trigger;
@@ -110,19 +141,19 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
110 err = led_classdev_register(dev->dev->dev, &led->led_dev); 141 err = led_classdev_register(dev->dev->dev, &led->led_dev);
111 if (err) { 142 if (err) {
112 b43warn(dev->wl, "LEDs: Failed to register %s\n", name); 143 b43warn(dev->wl, "LEDs: Failed to register %s\n", name);
113 led->dev = NULL; 144 led->wl = NULL;
114 return err; 145 return err;
115 } 146 }
147
116 return 0; 148 return 0;
117} 149}
118 150
119static void b43_unregister_led(struct b43_led *led) 151static void b43_unregister_led(struct b43_led *led)
120{ 152{
121 if (!led->dev) 153 if (!led->wl)
122 return; 154 return;
123 led_classdev_unregister(&led->led_dev); 155 led_classdev_unregister(&led->led_dev);
124 b43_led_turn_off(led->dev, led->index, led->activelow); 156 led->wl = NULL;
125 led->dev = NULL;
126} 157}
127 158
128static void b43_map_led(struct b43_wldev *dev, 159static void b43_map_led(struct b43_wldev *dev,
@@ -137,24 +168,20 @@ static void b43_map_led(struct b43_wldev *dev,
137 * generic LED triggers. */ 168 * generic LED triggers. */
138 switch (behaviour) { 169 switch (behaviour) {
139 case B43_LED_INACTIVE: 170 case B43_LED_INACTIVE:
140 break;
141 case B43_LED_OFF: 171 case B43_LED_OFF:
142 b43_led_turn_off(dev, led_index, activelow);
143 break;
144 case B43_LED_ON: 172 case B43_LED_ON:
145 b43_led_turn_on(dev, led_index, activelow);
146 break; 173 break;
147 case B43_LED_ACTIVITY: 174 case B43_LED_ACTIVITY:
148 case B43_LED_TRANSFER: 175 case B43_LED_TRANSFER:
149 case B43_LED_APTRANSFER: 176 case B43_LED_APTRANSFER:
150 snprintf(name, sizeof(name), 177 snprintf(name, sizeof(name),
151 "b43-%s::tx", wiphy_name(hw->wiphy)); 178 "b43-%s::tx", wiphy_name(hw->wiphy));
152 b43_register_led(dev, &dev->led_tx, name, 179 b43_register_led(dev, &dev->wl->leds.led_tx, name,
153 ieee80211_get_tx_led_name(hw), 180 ieee80211_get_tx_led_name(hw),
154 led_index, activelow); 181 led_index, activelow);
155 snprintf(name, sizeof(name), 182 snprintf(name, sizeof(name),
156 "b43-%s::rx", wiphy_name(hw->wiphy)); 183 "b43-%s::rx", wiphy_name(hw->wiphy));
157 b43_register_led(dev, &dev->led_rx, name, 184 b43_register_led(dev, &dev->wl->leds.led_rx, name,
158 ieee80211_get_rx_led_name(hw), 185 ieee80211_get_rx_led_name(hw),
159 led_index, activelow); 186 led_index, activelow);
160 break; 187 break;
@@ -164,18 +191,15 @@ static void b43_map_led(struct b43_wldev *dev,
164 case B43_LED_MODE_BG: 191 case B43_LED_MODE_BG:
165 snprintf(name, sizeof(name), 192 snprintf(name, sizeof(name),
166 "b43-%s::radio", wiphy_name(hw->wiphy)); 193 "b43-%s::radio", wiphy_name(hw->wiphy));
167 b43_register_led(dev, &dev->led_radio, name, 194 b43_register_led(dev, &dev->wl->leds.led_radio, name,
168 ieee80211_get_radio_led_name(hw), 195 ieee80211_get_radio_led_name(hw),
169 led_index, activelow); 196 led_index, activelow);
170 /* Sync the RF-kill LED state with radio and switch states. */
171 if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev))
172 b43_led_turn_on(dev, led_index, activelow);
173 break; 197 break;
174 case B43_LED_WEIRD: 198 case B43_LED_WEIRD:
175 case B43_LED_ASSOC: 199 case B43_LED_ASSOC:
176 snprintf(name, sizeof(name), 200 snprintf(name, sizeof(name),
177 "b43-%s::assoc", wiphy_name(hw->wiphy)); 201 "b43-%s::assoc", wiphy_name(hw->wiphy));
178 b43_register_led(dev, &dev->led_assoc, name, 202 b43_register_led(dev, &dev->wl->leds.led_assoc, name,
179 ieee80211_get_assoc_led_name(hw), 203 ieee80211_get_assoc_led_name(hw),
180 led_index, activelow); 204 led_index, activelow);
181 break; 205 break;
@@ -186,58 +210,150 @@ static void b43_map_led(struct b43_wldev *dev,
186 } 210 }
187} 211}
188 212
189void b43_leds_init(struct b43_wldev *dev) 213static void b43_led_get_sprominfo(struct b43_wldev *dev,
214 unsigned int led_index,
215 enum b43_led_behaviour *behaviour,
216 bool *activelow)
190{ 217{
191 struct ssb_bus *bus = dev->dev->bus; 218 struct ssb_bus *bus = dev->dev->bus;
192 u8 sprom[4]; 219 u8 sprom[4];
193 int i;
194 enum b43_led_behaviour behaviour;
195 bool activelow;
196 220
197 sprom[0] = bus->sprom.gpio0; 221 sprom[0] = bus->sprom.gpio0;
198 sprom[1] = bus->sprom.gpio1; 222 sprom[1] = bus->sprom.gpio1;
199 sprom[2] = bus->sprom.gpio2; 223 sprom[2] = bus->sprom.gpio2;
200 sprom[3] = bus->sprom.gpio3; 224 sprom[3] = bus->sprom.gpio3;
201 225
202 for (i = 0; i < 4; i++) { 226 if (sprom[led_index] == 0xFF) {
203 if (sprom[i] == 0xFF) { 227 /* There is no LED information in the SPROM
204 /* There is no LED information in the SPROM 228 * for this LED. Hardcode it here. */
205 * for this LED. Hardcode it here. */ 229 *activelow = 0;
206 activelow = 0; 230 switch (led_index) {
207 switch (i) { 231 case 0:
208 case 0: 232 *behaviour = B43_LED_ACTIVITY;
209 behaviour = B43_LED_ACTIVITY; 233 *activelow = 1;
210 activelow = 1; 234 if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ)
211 if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ) 235 *behaviour = B43_LED_RADIO_ALL;
212 behaviour = B43_LED_RADIO_ALL; 236 break;
213 break; 237 case 1:
214 case 1: 238 *behaviour = B43_LED_RADIO_B;
215 behaviour = B43_LED_RADIO_B; 239 if (bus->boardinfo.vendor == PCI_VENDOR_ID_ASUSTEK)
216 if (bus->boardinfo.vendor == PCI_VENDOR_ID_ASUSTEK) 240 *behaviour = B43_LED_ASSOC;
217 behaviour = B43_LED_ASSOC; 241 break;
218 break; 242 case 2:
219 case 2: 243 *behaviour = B43_LED_RADIO_A;
220 behaviour = B43_LED_RADIO_A; 244 break;
221 break; 245 case 3:
222 case 3: 246 *behaviour = B43_LED_OFF;
223 behaviour = B43_LED_OFF; 247 break;
224 break; 248 default:
225 default: 249 B43_WARN_ON(1);
226 B43_WARN_ON(1); 250 return;
227 return; 251 }
228 } 252 } else {
253 *behaviour = sprom[led_index] & B43_LED_BEHAVIOUR;
254 *activelow = !!(sprom[led_index] & B43_LED_ACTIVELOW);
255 }
256}
257
258void b43_leds_init(struct b43_wldev *dev)
259{
260 struct b43_led *led;
261 unsigned int i;
262 enum b43_led_behaviour behaviour;
263 bool activelow;
264
265 /* Sync the RF-kill LED state (if we have one) with radio and switch states. */
266 led = &dev->wl->leds.led_radio;
267 if (led->wl) {
268 if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev)) {
269 b43_led_turn_on(dev, led->index, led->activelow);
270 led->hw_state = 1;
271 atomic_set(&led->state, 1);
229 } else { 272 } else {
230 behaviour = sprom[i] & B43_LED_BEHAVIOUR; 273 b43_led_turn_off(dev, led->index, led->activelow);
231 activelow = !!(sprom[i] & B43_LED_ACTIVELOW); 274 led->hw_state = 0;
275 atomic_set(&led->state, 0);
232 } 276 }
233 b43_map_led(dev, i, behaviour, activelow);
234 } 277 }
278
279 /* Initialize TX/RX/ASSOC leds */
280 led = &dev->wl->leds.led_tx;
281 if (led->wl) {
282 b43_led_turn_off(dev, led->index, led->activelow);
283 led->hw_state = 0;
284 atomic_set(&led->state, 0);
285 }
286 led = &dev->wl->leds.led_rx;
287 if (led->wl) {
288 b43_led_turn_off(dev, led->index, led->activelow);
289 led->hw_state = 0;
290 atomic_set(&led->state, 0);
291 }
292 led = &dev->wl->leds.led_assoc;
293 if (led->wl) {
294 b43_led_turn_off(dev, led->index, led->activelow);
295 led->hw_state = 0;
296 atomic_set(&led->state, 0);
297 }
298
299 /* Initialize other LED states. */
300 for (i = 0; i < B43_MAX_NR_LEDS; i++) {
301 b43_led_get_sprominfo(dev, i, &behaviour, &activelow);
302 switch (behaviour) {
303 case B43_LED_OFF:
304 b43_led_turn_off(dev, i, activelow);
305 break;
306 case B43_LED_ON:
307 b43_led_turn_on(dev, i, activelow);
308 break;
309 default:
310 /* Leave others as-is. */
311 break;
312 }
313 }
314
315 dev->wl->leds.stop = 0;
235} 316}
236 317
237void b43_leds_exit(struct b43_wldev *dev) 318void b43_leds_exit(struct b43_wldev *dev)
238{ 319{
239 b43_unregister_led(&dev->led_tx); 320 struct b43_leds *leds = &dev->wl->leds;
240 b43_unregister_led(&dev->led_rx); 321
241 b43_unregister_led(&dev->led_assoc); 322 b43_led_turn_off(dev, leds->led_tx.index, leds->led_tx.activelow);
242 b43_unregister_led(&dev->led_radio); 323 b43_led_turn_off(dev, leds->led_rx.index, leds->led_rx.activelow);
324 b43_led_turn_off(dev, leds->led_assoc.index, leds->led_assoc.activelow);
325 b43_led_turn_off(dev, leds->led_radio.index, leds->led_radio.activelow);
326}
327
328void b43_leds_stop(struct b43_wldev *dev)
329{
330 struct b43_leds *leds = &dev->wl->leds;
331
332 leds->stop = 1;
333 cancel_work_sync(&leds->work);
334}
335
336void b43_leds_register(struct b43_wldev *dev)
337{
338 unsigned int i;
339 enum b43_led_behaviour behaviour;
340 bool activelow;
341
342 INIT_WORK(&dev->wl->leds.work, b43_leds_work);
343
344 /* Register the LEDs to the LED subsystem. */
345 for (i = 0; i < B43_MAX_NR_LEDS; i++) {
346 b43_led_get_sprominfo(dev, i, &behaviour, &activelow);
347 b43_map_led(dev, i, behaviour, activelow);
348 }
349}
350
351void b43_leds_unregister(struct b43_wl *wl)
352{
353 struct b43_leds *leds = &wl->leds;
354
355 b43_unregister_led(&leds->led_tx);
356 b43_unregister_led(&leds->led_rx);
357 b43_unregister_led(&leds->led_assoc);
358 b43_unregister_led(&leds->led_radio);
243} 359}
diff --git a/drivers/net/wireless/b43/leds.h b/drivers/net/wireless/b43/leds.h
index b8b1dd521243..32b66d53cdac 100644
--- a/drivers/net/wireless/b43/leds.h
+++ b/drivers/net/wireless/b43/leds.h
@@ -1,18 +1,20 @@
1#ifndef B43_LEDS_H_ 1#ifndef B43_LEDS_H_
2#define B43_LEDS_H_ 2#define B43_LEDS_H_
3 3
4struct b43_wl;
4struct b43_wldev; 5struct b43_wldev;
5 6
6#ifdef CONFIG_B43_LEDS 7#ifdef CONFIG_B43_LEDS
7 8
8#include <linux/types.h> 9#include <linux/types.h>
9#include <linux/leds.h> 10#include <linux/leds.h>
11#include <linux/workqueue.h>
10 12
11 13
12#define B43_LED_MAX_NAME_LEN 31 14#define B43_LED_MAX_NAME_LEN 31
13 15
14struct b43_led { 16struct b43_led {
15 struct b43_wldev *dev; 17 struct b43_wl *wl;
16 /* The LED class device */ 18 /* The LED class device */
17 struct led_classdev led_dev; 19 struct led_classdev led_dev;
18 /* The index number of the LED. */ 20 /* The index number of the LED. */
@@ -22,8 +24,24 @@ struct b43_led {
22 bool activelow; 24 bool activelow;
23 /* The unique name string for this LED device. */ 25 /* The unique name string for this LED device. */
24 char name[B43_LED_MAX_NAME_LEN + 1]; 26 char name[B43_LED_MAX_NAME_LEN + 1];
27 /* The current status of the LED. This is updated locklessly. */
28 atomic_t state;
29 /* The active state in hardware. */
30 bool hw_state;
25}; 31};
26 32
33struct b43_leds {
34 struct b43_led led_tx;
35 struct b43_led led_rx;
36 struct b43_led led_radio;
37 struct b43_led led_assoc;
38
39 bool stop;
40 struct work_struct work;
41};
42
43#define B43_MAX_NR_LEDS 4
44
27#define B43_LED_BEHAVIOUR 0x7F 45#define B43_LED_BEHAVIOUR 0x7F
28#define B43_LED_ACTIVELOW 0x80 46#define B43_LED_ACTIVELOW 0x80
29/* LED behaviour values */ 47/* LED behaviour values */
@@ -42,23 +60,35 @@ enum b43_led_behaviour {
42 B43_LED_INACTIVE, 60 B43_LED_INACTIVE,
43}; 61};
44 62
63void b43_leds_register(struct b43_wldev *dev);
64void b43_leds_unregister(struct b43_wl *wl);
45void b43_leds_init(struct b43_wldev *dev); 65void b43_leds_init(struct b43_wldev *dev);
46void b43_leds_exit(struct b43_wldev *dev); 66void b43_leds_exit(struct b43_wldev *dev);
67void b43_leds_stop(struct b43_wldev *dev);
47 68
48 69
49#else /* CONFIG_B43_LEDS */ 70#else /* CONFIG_B43_LEDS */
50/* LED support disabled */ 71/* LED support disabled */
51 72
52struct b43_led { 73struct b43_leds {
53 /* empty */ 74 /* empty */
54}; 75};
55 76
77static inline void b43_leds_register(struct b43_wldev *dev)
78{
79}
80static inline void b43_leds_unregister(struct b43_wl *wl)
81{
82}
56static inline void b43_leds_init(struct b43_wldev *dev) 83static inline void b43_leds_init(struct b43_wldev *dev)
57{ 84{
58} 85}
59static inline void b43_leds_exit(struct b43_wldev *dev) 86static inline void b43_leds_exit(struct b43_wldev *dev)
60{ 87{
61} 88}
89static inline void b43_leds_stop(struct b43_wldev *dev)
90{
91}
62#endif /* CONFIG_B43_LEDS */ 92#endif /* CONFIG_B43_LEDS */
63 93
64#endif /* B43_LEDS_H_ */ 94#endif /* B43_LEDS_H_ */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index e789792a36bc..86f35827f008 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -8,6 +8,9 @@
8 Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org> 8 Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
9 Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch> 9 Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
10 10
11 SDIO support
12 Copyright (c) 2009 Albert Herranz <albert_herranz@yahoo.es>
13
11 Some parts of the code in this file are derived from the ipw2200 14 Some parts of the code in this file are derived from the ipw2200
12 driver Copyright(c) 2003 - 2004 Intel Corporation. 15 driver Copyright(c) 2003 - 2004 Intel Corporation.
13 16
@@ -53,6 +56,8 @@
53#include "xmit.h" 56#include "xmit.h"
54#include "lo.h" 57#include "lo.h"
55#include "pcmcia.h" 58#include "pcmcia.h"
59#include "sdio.h"
60#include <linux/mmc/sdio_func.h>
56 61
57MODULE_DESCRIPTION("Broadcom B43 wireless driver"); 62MODULE_DESCRIPTION("Broadcom B43 wireless driver");
58MODULE_AUTHOR("Martin Langer"); 63MODULE_AUTHOR("Martin Langer");
@@ -1587,7 +1592,7 @@ static void b43_beacon_update_trigger_work(struct work_struct *work)
1587 mutex_lock(&wl->mutex); 1592 mutex_lock(&wl->mutex);
1588 dev = wl->current_dev; 1593 dev = wl->current_dev;
1589 if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED))) { 1594 if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED))) {
1590 if (0 /*FIXME dev->dev->bus->bustype == SSB_BUSTYPE_SDIO*/) { 1595 if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) {
1591 /* wl->mutex is enough. */ 1596 /* wl->mutex is enough. */
1592 b43_do_beacon_update_trigger_work(dev); 1597 b43_do_beacon_update_trigger_work(dev);
1593 mmiowb(); 1598 mmiowb();
@@ -1825,6 +1830,16 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
1825 1830
1826 /* Re-enable interrupts on the device by restoring the current interrupt mask. */ 1831 /* Re-enable interrupts on the device by restoring the current interrupt mask. */
1827 b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask); 1832 b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask);
1833
1834#if B43_DEBUG
1835 if (b43_debug(dev, B43_DBG_VERBOSESTATS)) {
1836 dev->irq_count++;
1837 for (i = 0; i < ARRAY_SIZE(dev->irq_bit_count); i++) {
1838 if (reason & (1 << i))
1839 dev->irq_bit_count[i]++;
1840 }
1841 }
1842#endif
1828} 1843}
1829 1844
1830/* Interrupt thread handler. Handles device interrupts in thread context. */ 1845/* Interrupt thread handler. Handles device interrupts in thread context. */
@@ -1905,6 +1920,21 @@ static irqreturn_t b43_interrupt_handler(int irq, void *dev_id)
1905 return ret; 1920 return ret;
1906} 1921}
1907 1922
1923/* SDIO interrupt handler. This runs in process context. */
1924static void b43_sdio_interrupt_handler(struct b43_wldev *dev)
1925{
1926 struct b43_wl *wl = dev->wl;
1927 irqreturn_t ret;
1928
1929 mutex_lock(&wl->mutex);
1930
1931 ret = b43_do_interrupt(dev);
1932 if (ret == IRQ_WAKE_THREAD)
1933 b43_do_interrupt_thread(dev);
1934
1935 mutex_unlock(&wl->mutex);
1936}
1937
1908void b43_do_release_fw(struct b43_firmware_file *fw) 1938void b43_do_release_fw(struct b43_firmware_file *fw)
1909{ 1939{
1910 release_firmware(fw->data); 1940 release_firmware(fw->data);
@@ -2645,6 +2675,20 @@ static void b43_adjust_opmode(struct b43_wldev *dev)
2645 cfp_pretbtt = 50; 2675 cfp_pretbtt = 50;
2646 } 2676 }
2647 b43_write16(dev, 0x612, cfp_pretbtt); 2677 b43_write16(dev, 0x612, cfp_pretbtt);
2678
2679 /* FIXME: We don't currently implement the PMQ mechanism,
2680 * so always disable it. If we want to implement PMQ,
2681 * we need to enable it here (clear DISCPMQ) in AP mode.
2682 */
2683 if (0 /* ctl & B43_MACCTL_AP */) {
2684 b43_write32(dev, B43_MMIO_MACCTL,
2685 b43_read32(dev, B43_MMIO_MACCTL)
2686 & ~B43_MACCTL_DISCPMQ);
2687 } else {
2688 b43_write32(dev, B43_MMIO_MACCTL,
2689 b43_read32(dev, B43_MMIO_MACCTL)
2690 | B43_MACCTL_DISCPMQ);
2691 }
2648} 2692}
2649 2693
2650static void b43_rate_memory_write(struct b43_wldev *dev, u16 rate, int is_ofdm) 2694static void b43_rate_memory_write(struct b43_wldev *dev, u16 rate, int is_ofdm)
@@ -2873,6 +2917,27 @@ static void b43_periodic_every15sec(struct b43_wldev *dev)
2873 2917
2874 atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); 2918 atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
2875 wmb(); 2919 wmb();
2920
2921#if B43_DEBUG
2922 if (b43_debug(dev, B43_DBG_VERBOSESTATS)) {
2923 unsigned int i;
2924
2925 b43dbg(dev->wl, "Stats: %7u IRQs/sec, %7u TX/sec, %7u RX/sec\n",
2926 dev->irq_count / 15,
2927 dev->tx_count / 15,
2928 dev->rx_count / 15);
2929 dev->irq_count = 0;
2930 dev->tx_count = 0;
2931 dev->rx_count = 0;
2932 for (i = 0; i < ARRAY_SIZE(dev->irq_bit_count); i++) {
2933 if (dev->irq_bit_count[i]) {
2934 b43dbg(dev->wl, "Stats: %7u IRQ-%02u/sec (0x%08X)\n",
2935 dev->irq_bit_count[i] / 15, i, (1 << i));
2936 dev->irq_bit_count[i] = 0;
2937 }
2938 }
2939 }
2940#endif
2876} 2941}
2877 2942
2878static void do_periodic_work(struct b43_wldev *dev) 2943static void do_periodic_work(struct b43_wldev *dev)
@@ -3002,14 +3067,18 @@ static void b43_security_init(struct b43_wldev *dev)
3002static int b43_rng_read(struct hwrng *rng, u32 *data) 3067static int b43_rng_read(struct hwrng *rng, u32 *data)
3003{ 3068{
3004 struct b43_wl *wl = (struct b43_wl *)rng->priv; 3069 struct b43_wl *wl = (struct b43_wl *)rng->priv;
3070 struct b43_wldev *dev;
3071 int count = -ENODEV;
3005 3072
3006 /* FIXME: We need to take wl->mutex here to make sure the device 3073 mutex_lock(&wl->mutex);
3007 * is not going away from under our ass. However it could deadlock 3074 dev = wl->current_dev;
3008 * with hwrng internal locking. */ 3075 if (likely(dev && b43_status(dev) >= B43_STAT_INITIALIZED)) {
3009 3076 *data = b43_read16(dev, B43_MMIO_RNG);
3010 *data = b43_read16(wl->current_dev, B43_MMIO_RNG); 3077 count = sizeof(u16);
3078 }
3079 mutex_unlock(&wl->mutex);
3011 3080
3012 return (sizeof(u16)); 3081 return count;
3013} 3082}
3014#endif /* CONFIG_B43_HWRNG */ 3083#endif /* CONFIG_B43_HWRNG */
3015 3084
@@ -3068,6 +3137,9 @@ static void b43_tx_work(struct work_struct *work)
3068 dev_kfree_skb(skb); /* Drop it */ 3137 dev_kfree_skb(skb); /* Drop it */
3069 } 3138 }
3070 3139
3140#if B43_DEBUG
3141 dev->tx_count++;
3142#endif
3071 mutex_unlock(&wl->mutex); 3143 mutex_unlock(&wl->mutex);
3072} 3144}
3073 3145
@@ -3802,6 +3874,7 @@ static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev)
3802{ 3874{
3803 struct b43_wl *wl = dev->wl; 3875 struct b43_wl *wl = dev->wl;
3804 struct b43_wldev *orig_dev; 3876 struct b43_wldev *orig_dev;
3877 u32 mask;
3805 3878
3806redo: 3879redo:
3807 if (!dev || b43_status(dev) < B43_STAT_STARTED) 3880 if (!dev || b43_status(dev) < B43_STAT_STARTED)
@@ -3820,7 +3893,7 @@ redo:
3820 3893
3821 /* Disable interrupts on the device. */ 3894 /* Disable interrupts on the device. */
3822 b43_set_status(dev, B43_STAT_INITIALIZED); 3895 b43_set_status(dev, B43_STAT_INITIALIZED);
3823 if (0 /*FIXME dev->dev->bus->bustype == SSB_BUSTYPE_SDIO*/) { 3896 if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) {
3824 /* wl->mutex is locked. That is enough. */ 3897 /* wl->mutex is locked. That is enough. */
3825 b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0); 3898 b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0);
3826 b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */ 3899 b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */
@@ -3830,10 +3903,15 @@ redo:
3830 b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */ 3903 b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */
3831 spin_unlock_irq(&wl->hardirq_lock); 3904 spin_unlock_irq(&wl->hardirq_lock);
3832 } 3905 }
3833 /* Synchronize the interrupt handlers. Unlock to avoid deadlocks. */ 3906 /* Synchronize and free the interrupt handlers. Unlock to avoid deadlocks. */
3834 orig_dev = dev; 3907 orig_dev = dev;
3835 mutex_unlock(&wl->mutex); 3908 mutex_unlock(&wl->mutex);
3836 synchronize_irq(dev->dev->irq); 3909 if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) {
3910 b43_sdio_free_irq(dev);
3911 } else {
3912 synchronize_irq(dev->dev->irq);
3913 free_irq(dev->dev->irq, dev);
3914 }
3837 mutex_lock(&wl->mutex); 3915 mutex_lock(&wl->mutex);
3838 dev = wl->current_dev; 3916 dev = wl->current_dev;
3839 if (!dev) 3917 if (!dev)
@@ -3843,14 +3921,15 @@ redo:
3843 goto redo; 3921 goto redo;
3844 return dev; 3922 return dev;
3845 } 3923 }
3846 B43_WARN_ON(b43_read32(dev, B43_MMIO_GEN_IRQ_MASK)); 3924 mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK);
3925 B43_WARN_ON(mask != 0xFFFFFFFF && mask);
3847 3926
3848 /* Drain the TX queue */ 3927 /* Drain the TX queue */
3849 while (skb_queue_len(&wl->tx_queue)) 3928 while (skb_queue_len(&wl->tx_queue))
3850 dev_kfree_skb(skb_dequeue(&wl->tx_queue)); 3929 dev_kfree_skb(skb_dequeue(&wl->tx_queue));
3851 3930
3852 b43_mac_suspend(dev); 3931 b43_mac_suspend(dev);
3853 free_irq(dev->dev->irq, dev); 3932 b43_leds_exit(dev);
3854 b43dbg(wl, "Wireless interface stopped\n"); 3933 b43dbg(wl, "Wireless interface stopped\n");
3855 3934
3856 return dev; 3935 return dev;
@@ -3864,12 +3943,20 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
3864 B43_WARN_ON(b43_status(dev) != B43_STAT_INITIALIZED); 3943 B43_WARN_ON(b43_status(dev) != B43_STAT_INITIALIZED);
3865 3944
3866 drain_txstatus_queue(dev); 3945 drain_txstatus_queue(dev);
3867 err = request_threaded_irq(dev->dev->irq, b43_interrupt_handler, 3946 if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) {
3868 b43_interrupt_thread_handler, 3947 err = b43_sdio_request_irq(dev, b43_sdio_interrupt_handler);
3869 IRQF_SHARED, KBUILD_MODNAME, dev); 3948 if (err) {
3870 if (err) { 3949 b43err(dev->wl, "Cannot request SDIO IRQ\n");
3871 b43err(dev->wl, "Cannot request IRQ-%d\n", dev->dev->irq); 3950 goto out;
3872 goto out; 3951 }
3952 } else {
3953 err = request_threaded_irq(dev->dev->irq, b43_interrupt_handler,
3954 b43_interrupt_thread_handler,
3955 IRQF_SHARED, KBUILD_MODNAME, dev);
3956 if (err) {
3957 b43err(dev->wl, "Cannot request IRQ-%d\n", dev->dev->irq);
3958 goto out;
3959 }
3873 } 3960 }
3874 3961
3875 /* We are ready to run. */ 3962 /* We are ready to run. */
@@ -3882,8 +3969,10 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
3882 /* Start maintainance work */ 3969 /* Start maintainance work */
3883 b43_periodic_tasks_setup(dev); 3970 b43_periodic_tasks_setup(dev);
3884 3971
3972 b43_leds_init(dev);
3973
3885 b43dbg(dev->wl, "Wireless interface started\n"); 3974 b43dbg(dev->wl, "Wireless interface started\n");
3886 out: 3975out:
3887 return err; 3976 return err;
3888} 3977}
3889 3978
@@ -4160,10 +4249,6 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
4160 macctl |= B43_MACCTL_PSM_JMP0; 4249 macctl |= B43_MACCTL_PSM_JMP0;
4161 b43_write32(dev, B43_MMIO_MACCTL, macctl); 4250 b43_write32(dev, B43_MMIO_MACCTL, macctl);
4162 4251
4163 if (!dev->suspend_in_progress) {
4164 b43_leds_exit(dev);
4165 b43_rng_exit(dev->wl);
4166 }
4167 b43_dma_free(dev); 4252 b43_dma_free(dev);
4168 b43_pio_free(dev); 4253 b43_pio_free(dev);
4169 b43_chip_exit(dev); 4254 b43_chip_exit(dev);
@@ -4180,7 +4265,6 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
4180/* Initialize a wireless core */ 4265/* Initialize a wireless core */
4181static int b43_wireless_core_init(struct b43_wldev *dev) 4266static int b43_wireless_core_init(struct b43_wldev *dev)
4182{ 4267{
4183 struct b43_wl *wl = dev->wl;
4184 struct ssb_bus *bus = dev->dev->bus; 4268 struct ssb_bus *bus = dev->dev->bus;
4185 struct ssb_sprom *sprom = &bus->sprom; 4269 struct ssb_sprom *sprom = &bus->sprom;
4186 struct b43_phy *phy = &dev->phy; 4270 struct b43_phy *phy = &dev->phy;
@@ -4264,7 +4348,9 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4264 /* Maximum Contention Window */ 4348 /* Maximum Contention Window */
4265 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF); 4349 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF);
4266 4350
4267 if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) || B43_FORCE_PIO) { 4351 if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
4352 (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) ||
4353 B43_FORCE_PIO) {
4268 dev->__using_pio_transfers = 1; 4354 dev->__using_pio_transfers = 1;
4269 err = b43_pio_init(dev); 4355 err = b43_pio_init(dev);
4270 } else { 4356 } else {
@@ -4280,15 +4366,13 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4280 ssb_bus_powerup(bus, !(sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW)); 4366 ssb_bus_powerup(bus, !(sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW));
4281 b43_upload_card_macaddress(dev); 4367 b43_upload_card_macaddress(dev);
4282 b43_security_init(dev); 4368 b43_security_init(dev);
4283 if (!dev->suspend_in_progress) 4369
4284 b43_rng_init(wl); 4370 ieee80211_wake_queues(dev->wl->hw);
4285 4371
4286 ieee80211_wake_queues(dev->wl->hw); 4372 ieee80211_wake_queues(dev->wl->hw);
4287 4373
4288 b43_set_status(dev, B43_STAT_INITIALIZED); 4374 b43_set_status(dev, B43_STAT_INITIALIZED);
4289 4375
4290 if (!dev->suspend_in_progress)
4291 b43_leds_init(dev);
4292out: 4376out:
4293 return err; 4377 return err;
4294 4378
@@ -4837,7 +4921,6 @@ static int b43_wireless_init(struct ssb_device *dev)
4837 4921
4838 /* Initialize struct b43_wl */ 4922 /* Initialize struct b43_wl */
4839 wl->hw = hw; 4923 wl->hw = hw;
4840 spin_lock_init(&wl->leds_lock);
4841 mutex_init(&wl->mutex); 4924 mutex_init(&wl->mutex);
4842 spin_lock_init(&wl->hardirq_lock); 4925 spin_lock_init(&wl->hardirq_lock);
4843 INIT_LIST_HEAD(&wl->devlist); 4926 INIT_LIST_HEAD(&wl->devlist);
@@ -4878,6 +4961,8 @@ static int b43_probe(struct ssb_device *dev, const struct ssb_device_id *id)
4878 err = ieee80211_register_hw(wl->hw); 4961 err = ieee80211_register_hw(wl->hw);
4879 if (err) 4962 if (err)
4880 goto err_one_core_detach; 4963 goto err_one_core_detach;
4964 b43_leds_register(wl->current_dev);
4965 b43_rng_init(wl);
4881 } 4966 }
4882 4967
4883 out: 4968 out:
@@ -4906,12 +4991,15 @@ static void b43_remove(struct ssb_device *dev)
4906 * might have modified it. Restoring is important, so the networking 4991 * might have modified it. Restoring is important, so the networking
4907 * stack can properly free resources. */ 4992 * stack can properly free resources. */
4908 wl->hw->queues = wl->mac80211_initially_registered_queues; 4993 wl->hw->queues = wl->mac80211_initially_registered_queues;
4994 b43_leds_stop(wldev);
4909 ieee80211_unregister_hw(wl->hw); 4995 ieee80211_unregister_hw(wl->hw);
4910 } 4996 }
4911 4997
4912 b43_one_core_detach(dev); 4998 b43_one_core_detach(dev);
4913 4999
4914 if (list_empty(&wl->devlist)) { 5000 if (list_empty(&wl->devlist)) {
5001 b43_rng_exit(wl);
5002 b43_leds_unregister(wl);
4915 /* Last core on the chip unregistered. 5003 /* Last core on the chip unregistered.
4916 * We can destroy common struct b43_wl. 5004 * We can destroy common struct b43_wl.
4917 */ 5005 */
@@ -4929,80 +5017,17 @@ void b43_controller_restart(struct b43_wldev *dev, const char *reason)
4929 ieee80211_queue_work(dev->wl->hw, &dev->restart_work); 5017 ieee80211_queue_work(dev->wl->hw, &dev->restart_work);
4930} 5018}
4931 5019
4932#ifdef CONFIG_PM
4933
4934static int b43_suspend(struct ssb_device *dev, pm_message_t state)
4935{
4936 struct b43_wldev *wldev = ssb_get_drvdata(dev);
4937 struct b43_wl *wl = wldev->wl;
4938
4939 b43dbg(wl, "Suspending...\n");
4940
4941 mutex_lock(&wl->mutex);
4942 wldev->suspend_in_progress = true;
4943 wldev->suspend_init_status = b43_status(wldev);
4944 if (wldev->suspend_init_status >= B43_STAT_STARTED)
4945 wldev = b43_wireless_core_stop(wldev);
4946 if (wldev && wldev->suspend_init_status >= B43_STAT_INITIALIZED)
4947 b43_wireless_core_exit(wldev);
4948 mutex_unlock(&wl->mutex);
4949
4950 b43dbg(wl, "Device suspended.\n");
4951
4952 return 0;
4953}
4954
4955static int b43_resume(struct ssb_device *dev)
4956{
4957 struct b43_wldev *wldev = ssb_get_drvdata(dev);
4958 struct b43_wl *wl = wldev->wl;
4959 int err = 0;
4960
4961 b43dbg(wl, "Resuming...\n");
4962
4963 mutex_lock(&wl->mutex);
4964 if (wldev->suspend_init_status >= B43_STAT_INITIALIZED) {
4965 err = b43_wireless_core_init(wldev);
4966 if (err) {
4967 b43err(wl, "Resume failed at core init\n");
4968 goto out;
4969 }
4970 }
4971 if (wldev->suspend_init_status >= B43_STAT_STARTED) {
4972 err = b43_wireless_core_start(wldev);
4973 if (err) {
4974 b43_leds_exit(wldev);
4975 b43_rng_exit(wldev->wl);
4976 b43_wireless_core_exit(wldev);
4977 b43err(wl, "Resume failed at core start\n");
4978 goto out;
4979 }
4980 }
4981 b43dbg(wl, "Device resumed.\n");
4982 out:
4983 wldev->suspend_in_progress = false;
4984 mutex_unlock(&wl->mutex);
4985 return err;
4986}
4987
4988#else /* CONFIG_PM */
4989# define b43_suspend NULL
4990# define b43_resume NULL
4991#endif /* CONFIG_PM */
4992
4993static struct ssb_driver b43_ssb_driver = { 5020static struct ssb_driver b43_ssb_driver = {
4994 .name = KBUILD_MODNAME, 5021 .name = KBUILD_MODNAME,
4995 .id_table = b43_ssb_tbl, 5022 .id_table = b43_ssb_tbl,
4996 .probe = b43_probe, 5023 .probe = b43_probe,
4997 .remove = b43_remove, 5024 .remove = b43_remove,
4998 .suspend = b43_suspend,
4999 .resume = b43_resume,
5000}; 5025};
5001 5026
5002static void b43_print_driverinfo(void) 5027static void b43_print_driverinfo(void)
5003{ 5028{
5004 const char *feat_pci = "", *feat_pcmcia = "", *feat_nphy = "", 5029 const char *feat_pci = "", *feat_pcmcia = "", *feat_nphy = "",
5005 *feat_leds = ""; 5030 *feat_leds = "", *feat_sdio = "";
5006 5031
5007#ifdef CONFIG_B43_PCI_AUTOSELECT 5032#ifdef CONFIG_B43_PCI_AUTOSELECT
5008 feat_pci = "P"; 5033 feat_pci = "P";
@@ -5016,11 +5041,14 @@ static void b43_print_driverinfo(void)
5016#ifdef CONFIG_B43_LEDS 5041#ifdef CONFIG_B43_LEDS
5017 feat_leds = "L"; 5042 feat_leds = "L";
5018#endif 5043#endif
5044#ifdef CONFIG_B43_SDIO
5045 feat_sdio = "S";
5046#endif
5019 printk(KERN_INFO "Broadcom 43xx driver loaded " 5047 printk(KERN_INFO "Broadcom 43xx driver loaded "
5020 "[ Features: %s%s%s%s, Firmware-ID: " 5048 "[ Features: %s%s%s%s%s, Firmware-ID: "
5021 B43_SUPPORTED_FIRMWARE_ID " ]\n", 5049 B43_SUPPORTED_FIRMWARE_ID " ]\n",
5022 feat_pci, feat_pcmcia, feat_nphy, 5050 feat_pci, feat_pcmcia, feat_nphy,
5023 feat_leds); 5051 feat_leds, feat_sdio);
5024} 5052}
5025 5053
5026static int __init b43_init(void) 5054static int __init b43_init(void)
@@ -5031,13 +5059,18 @@ static int __init b43_init(void)
5031 err = b43_pcmcia_init(); 5059 err = b43_pcmcia_init();
5032 if (err) 5060 if (err)
5033 goto err_dfs_exit; 5061 goto err_dfs_exit;
5034 err = ssb_driver_register(&b43_ssb_driver); 5062 err = b43_sdio_init();
5035 if (err) 5063 if (err)
5036 goto err_pcmcia_exit; 5064 goto err_pcmcia_exit;
5065 err = ssb_driver_register(&b43_ssb_driver);
5066 if (err)
5067 goto err_sdio_exit;
5037 b43_print_driverinfo(); 5068 b43_print_driverinfo();
5038 5069
5039 return err; 5070 return err;
5040 5071
5072err_sdio_exit:
5073 b43_sdio_exit();
5041err_pcmcia_exit: 5074err_pcmcia_exit:
5042 b43_pcmcia_exit(); 5075 b43_pcmcia_exit();
5043err_dfs_exit: 5076err_dfs_exit:
@@ -5048,6 +5081,7 @@ err_dfs_exit:
5048static void __exit b43_exit(void) 5081static void __exit b43_exit(void)
5049{ 5082{
5050 ssb_driver_unregister(&b43_ssb_driver); 5083 ssb_driver_unregister(&b43_ssb_driver);
5084 b43_sdio_exit();
5051 b43_pcmcia_exit(); 5085 b43_pcmcia_exit();
5052 b43_debugfs_exit(); 5086 b43_debugfs_exit();
5053} 5087}
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 3e02d969f683..1e318d815a5b 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -2228,6 +2228,16 @@ static enum b43_txpwr_result b43_lpphy_op_recalc_txpower(struct b43_wldev *dev,
2228 return B43_TXPWR_RES_DONE; 2228 return B43_TXPWR_RES_DONE;
2229} 2229}
2230 2230
2231void b43_lpphy_op_switch_analog(struct b43_wldev *dev, bool on)
2232{
2233 if (on) {
2234 b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xfff8);
2235 } else {
2236 b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0x0007);
2237 b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 0x0007);
2238 }
2239}
2240
2231const struct b43_phy_operations b43_phyops_lp = { 2241const struct b43_phy_operations b43_phyops_lp = {
2232 .allocate = b43_lpphy_op_allocate, 2242 .allocate = b43_lpphy_op_allocate,
2233 .free = b43_lpphy_op_free, 2243 .free = b43_lpphy_op_free,
@@ -2239,7 +2249,7 @@ const struct b43_phy_operations b43_phyops_lp = {
2239 .radio_read = b43_lpphy_op_radio_read, 2249 .radio_read = b43_lpphy_op_radio_read,
2240 .radio_write = b43_lpphy_op_radio_write, 2250 .radio_write = b43_lpphy_op_radio_write,
2241 .software_rfkill = b43_lpphy_op_software_rfkill, 2251 .software_rfkill = b43_lpphy_op_software_rfkill,
2242 .switch_analog = b43_phyop_switch_analog_generic, 2252 .switch_analog = b43_lpphy_op_switch_analog,
2243 .switch_channel = b43_lpphy_op_switch_channel, 2253 .switch_channel = b43_lpphy_op_switch_channel,
2244 .get_default_chan = b43_lpphy_op_get_default_chan, 2254 .get_default_chan = b43_lpphy_op_get_default_chan,
2245 .set_rx_antenna = b43_lpphy_op_set_rx_antenna, 2255 .set_rx_antenna = b43_lpphy_op_set_rx_antenna,
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index 3498b68385e7..9b9044400218 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -30,6 +30,7 @@
30#include "xmit.h" 30#include "xmit.h"
31 31
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/sched.h>
33 34
34 35
35static u16 generate_cookie(struct b43_pio_txqueue *q, 36static u16 generate_cookie(struct b43_pio_txqueue *q,
@@ -331,6 +332,7 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
331 unsigned int data_len) 332 unsigned int data_len)
332{ 333{
333 struct b43_wldev *dev = q->dev; 334 struct b43_wldev *dev = q->dev;
335 struct b43_wl *wl = dev->wl;
334 const u8 *data = _data; 336 const u8 *data = _data;
335 337
336 ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI; 338 ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI;
@@ -343,7 +345,11 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
343 /* Write the last byte. */ 345 /* Write the last byte. */
344 ctl &= ~B43_PIO_TXCTL_WRITEHI; 346 ctl &= ~B43_PIO_TXCTL_WRITEHI;
345 b43_piotx_write16(q, B43_PIO_TXCTL, ctl); 347 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
346 b43_piotx_write16(q, B43_PIO_TXDATA, data[data_len - 1]); 348 wl->tx_tail[0] = data[data_len - 1];
349 wl->tx_tail[1] = 0;
350 ssb_block_write(dev->dev, wl->tx_tail, 2,
351 q->mmio_base + B43_PIO_TXDATA,
352 sizeof(u16));
347 } 353 }
348 354
349 return ctl; 355 return ctl;
@@ -376,6 +382,7 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
376 unsigned int data_len) 382 unsigned int data_len)
377{ 383{
378 struct b43_wldev *dev = q->dev; 384 struct b43_wldev *dev = q->dev;
385 struct b43_wl *wl = dev->wl;
379 const u8 *data = _data; 386 const u8 *data = _data;
380 387
381 ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 | 388 ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 |
@@ -386,26 +393,33 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
386 q->mmio_base + B43_PIO8_TXDATA, 393 q->mmio_base + B43_PIO8_TXDATA,
387 sizeof(u32)); 394 sizeof(u32));
388 if (data_len & 3) { 395 if (data_len & 3) {
389 u32 value = 0; 396 wl->tx_tail[3] = 0;
390
391 /* Write the last few bytes. */ 397 /* Write the last few bytes. */
392 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 | 398 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
393 B43_PIO8_TXCTL_24_31); 399 B43_PIO8_TXCTL_24_31);
394 data = &(data[data_len - 1]);
395 switch (data_len & 3) { 400 switch (data_len & 3) {
396 case 3: 401 case 3:
397 ctl |= B43_PIO8_TXCTL_16_23; 402 ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
398 value |= (u32)(*data) << 16; 403 wl->tx_tail[0] = data[data_len - 3];
399 data--; 404 wl->tx_tail[1] = data[data_len - 2];
405 wl->tx_tail[2] = data[data_len - 1];
406 break;
400 case 2: 407 case 2:
401 ctl |= B43_PIO8_TXCTL_8_15; 408 ctl |= B43_PIO8_TXCTL_8_15;
402 value |= (u32)(*data) << 8; 409 wl->tx_tail[0] = data[data_len - 2];
403 data--; 410 wl->tx_tail[1] = data[data_len - 1];
411 wl->tx_tail[2] = 0;
412 break;
404 case 1: 413 case 1:
405 value |= (u32)(*data); 414 wl->tx_tail[0] = data[data_len - 1];
415 wl->tx_tail[1] = 0;
416 wl->tx_tail[2] = 0;
417 break;
406 } 418 }
407 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); 419 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
408 b43_piotx_write32(q, B43_PIO8_TXDATA, value); 420 ssb_block_write(dev->dev, wl->tx_tail, 4,
421 q->mmio_base + B43_PIO8_TXDATA,
422 sizeof(u32));
409 } 423 }
410 424
411 return ctl; 425 return ctl;
@@ -435,8 +449,9 @@ static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack,
435static int pio_tx_frame(struct b43_pio_txqueue *q, 449static int pio_tx_frame(struct b43_pio_txqueue *q,
436 struct sk_buff *skb) 450 struct sk_buff *skb)
437{ 451{
452 struct b43_wldev *dev = q->dev;
453 struct b43_wl *wl = dev->wl;
438 struct b43_pio_txpacket *pack; 454 struct b43_pio_txpacket *pack;
439 struct b43_txhdr txhdr;
440 u16 cookie; 455 u16 cookie;
441 int err; 456 int err;
442 unsigned int hdrlen; 457 unsigned int hdrlen;
@@ -447,8 +462,8 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
447 struct b43_pio_txpacket, list); 462 struct b43_pio_txpacket, list);
448 463
449 cookie = generate_cookie(q, pack); 464 cookie = generate_cookie(q, pack);
450 hdrlen = b43_txhdr_size(q->dev); 465 hdrlen = b43_txhdr_size(dev);
451 err = b43_generate_txhdr(q->dev, (u8 *)&txhdr, skb, 466 err = b43_generate_txhdr(dev, (u8 *)&wl->txhdr, skb,
452 info, cookie); 467 info, cookie);
453 if (err) 468 if (err)
454 return err; 469 return err;
@@ -456,15 +471,15 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
456 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { 471 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
457 /* Tell the firmware about the cookie of the last 472 /* Tell the firmware about the cookie of the last
458 * mcast frame, so it can clear the more-data bit in it. */ 473 * mcast frame, so it can clear the more-data bit in it. */
459 b43_shm_write16(q->dev, B43_SHM_SHARED, 474 b43_shm_write16(dev, B43_SHM_SHARED,
460 B43_SHM_SH_MCASTCOOKIE, cookie); 475 B43_SHM_SH_MCASTCOOKIE, cookie);
461 } 476 }
462 477
463 pack->skb = skb; 478 pack->skb = skb;
464 if (q->rev >= 8) 479 if (q->rev >= 8)
465 pio_tx_frame_4byte_queue(pack, (const u8 *)&txhdr, hdrlen); 480 pio_tx_frame_4byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen);
466 else 481 else
467 pio_tx_frame_2byte_queue(pack, (const u8 *)&txhdr, hdrlen); 482 pio_tx_frame_2byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen);
468 483
469 /* Remove it from the list of available packet slots. 484 /* Remove it from the list of available packet slots.
470 * It will be put back when we receive the status report. */ 485 * It will be put back when we receive the status report. */
@@ -574,7 +589,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
574 q->buffer_used -= total_len; 589 q->buffer_used -= total_len;
575 q->free_packet_slots += 1; 590 q->free_packet_slots += 1;
576 591
577 ieee80211_tx_status_irqsafe(dev->wl->hw, pack->skb); 592 ieee80211_tx_status(dev->wl->hw, pack->skb);
578 pack->skb = NULL; 593 pack->skb = NULL;
579 list_add(&pack->list, &q->packets_list); 594 list_add(&pack->list, &q->packets_list);
580 595
@@ -604,14 +619,14 @@ void b43_pio_get_tx_stats(struct b43_wldev *dev,
604static bool pio_rx_frame(struct b43_pio_rxqueue *q) 619static bool pio_rx_frame(struct b43_pio_rxqueue *q)
605{ 620{
606 struct b43_wldev *dev = q->dev; 621 struct b43_wldev *dev = q->dev;
607 struct b43_rxhdr_fw4 rxhdr; 622 struct b43_wl *wl = dev->wl;
608 u16 len; 623 u16 len;
609 u32 macstat; 624 u32 macstat;
610 unsigned int i, padding; 625 unsigned int i, padding;
611 struct sk_buff *skb; 626 struct sk_buff *skb;
612 const char *err_msg = NULL; 627 const char *err_msg = NULL;
613 628
614 memset(&rxhdr, 0, sizeof(rxhdr)); 629 memset(&wl->rxhdr, 0, sizeof(wl->rxhdr));
615 630
616 /* Check if we have data and wait for it to get ready. */ 631 /* Check if we have data and wait for it to get ready. */
617 if (q->rev >= 8) { 632 if (q->rev >= 8) {
@@ -649,16 +664,16 @@ data_ready:
649 664
650 /* Get the preamble (RX header) */ 665 /* Get the preamble (RX header) */
651 if (q->rev >= 8) { 666 if (q->rev >= 8) {
652 ssb_block_read(dev->dev, &rxhdr, sizeof(rxhdr), 667 ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr),
653 q->mmio_base + B43_PIO8_RXDATA, 668 q->mmio_base + B43_PIO8_RXDATA,
654 sizeof(u32)); 669 sizeof(u32));
655 } else { 670 } else {
656 ssb_block_read(dev->dev, &rxhdr, sizeof(rxhdr), 671 ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr),
657 q->mmio_base + B43_PIO_RXDATA, 672 q->mmio_base + B43_PIO_RXDATA,
658 sizeof(u16)); 673 sizeof(u16));
659 } 674 }
660 /* Sanity checks. */ 675 /* Sanity checks. */
661 len = le16_to_cpu(rxhdr.frame_len); 676 len = le16_to_cpu(wl->rxhdr.frame_len);
662 if (unlikely(len > 0x700)) { 677 if (unlikely(len > 0x700)) {
663 err_msg = "len > 0x700"; 678 err_msg = "len > 0x700";
664 goto rx_error; 679 goto rx_error;
@@ -668,7 +683,7 @@ data_ready:
668 goto rx_error; 683 goto rx_error;
669 } 684 }
670 685
671 macstat = le32_to_cpu(rxhdr.mac_status); 686 macstat = le32_to_cpu(wl->rxhdr.mac_status);
672 if (macstat & B43_RX_MAC_FCSERR) { 687 if (macstat & B43_RX_MAC_FCSERR) {
673 if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) { 688 if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
674 /* Drop frames with failed FCS. */ 689 /* Drop frames with failed FCS. */
@@ -693,21 +708,23 @@ data_ready:
693 q->mmio_base + B43_PIO8_RXDATA, 708 q->mmio_base + B43_PIO8_RXDATA,
694 sizeof(u32)); 709 sizeof(u32));
695 if (len & 3) { 710 if (len & 3) {
696 u32 value;
697 char *data;
698
699 /* Read the last few bytes. */ 711 /* Read the last few bytes. */
700 value = b43_piorx_read32(q, B43_PIO8_RXDATA); 712 ssb_block_read(dev->dev, wl->rx_tail, 4,
701 data = &(skb->data[len + padding - 1]); 713 q->mmio_base + B43_PIO8_RXDATA,
714 sizeof(u32));
702 switch (len & 3) { 715 switch (len & 3) {
703 case 3: 716 case 3:
704 *data = (value >> 16); 717 skb->data[len + padding - 3] = wl->rx_tail[0];
705 data--; 718 skb->data[len + padding - 2] = wl->rx_tail[1];
719 skb->data[len + padding - 1] = wl->rx_tail[2];
720 break;
706 case 2: 721 case 2:
707 *data = (value >> 8); 722 skb->data[len + padding - 2] = wl->rx_tail[0];
708 data--; 723 skb->data[len + padding - 1] = wl->rx_tail[1];
724 break;
709 case 1: 725 case 1:
710 *data = value; 726 skb->data[len + padding - 1] = wl->rx_tail[0];
727 break;
711 } 728 }
712 } 729 }
713 } else { 730 } else {
@@ -715,15 +732,15 @@ data_ready:
715 q->mmio_base + B43_PIO_RXDATA, 732 q->mmio_base + B43_PIO_RXDATA,
716 sizeof(u16)); 733 sizeof(u16));
717 if (len & 1) { 734 if (len & 1) {
718 u16 value;
719
720 /* Read the last byte. */ 735 /* Read the last byte. */
721 value = b43_piorx_read16(q, B43_PIO_RXDATA); 736 ssb_block_read(dev->dev, wl->rx_tail, 2,
722 skb->data[len + padding - 1] = value; 737 q->mmio_base + B43_PIO_RXDATA,
738 sizeof(u16));
739 skb->data[len + padding - 1] = wl->rx_tail[0];
723 } 740 }
724 } 741 }
725 742
726 b43_rx(q->dev, skb, &rxhdr); 743 b43_rx(q->dev, skb, &wl->rxhdr);
727 744
728 return 1; 745 return 1;
729 746
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index 31e55999893f..ffdce6f3c909 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -28,12 +28,13 @@
28/* Returns TRUE, if the radio is enabled in hardware. */ 28/* Returns TRUE, if the radio is enabled in hardware. */
29bool b43_is_hw_radio_enabled(struct b43_wldev *dev) 29bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
30{ 30{
31 if (dev->phy.rev >= 3) { 31 if (dev->phy.rev >= 3 || dev->phy.type == B43_PHYTYPE_LP) {
32 if (!(b43_read32(dev, B43_MMIO_RADIO_HWENABLED_HI) 32 if (!(b43_read32(dev, B43_MMIO_RADIO_HWENABLED_HI)
33 & B43_MMIO_RADIO_HWENABLED_HI_MASK)) 33 & B43_MMIO_RADIO_HWENABLED_HI_MASK))
34 return 1; 34 return 1;
35 } else { 35 } else {
36 if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO) 36 if (b43_status(dev) >= B43_STAT_STARTED &&
37 b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
37 & B43_MMIO_RADIO_HWENABLED_LO_MASK) 38 & B43_MMIO_RADIO_HWENABLED_LO_MASK)
38 return 1; 39 return 1;
39 } 40 }
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
new file mode 100644
index 000000000000..0d3ac64147a5
--- /dev/null
+++ b/drivers/net/wireless/b43/sdio.c
@@ -0,0 +1,202 @@
1/*
2 * Broadcom B43 wireless driver
3 *
4 * SDIO over Sonics Silicon Backplane bus glue for b43.
5 *
6 * Copyright (C) 2009 Albert Herranz
7 * Copyright (C) 2009 Michael Buesch <mb@bu3sch.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or (at
12 * your option) any later version.
13 */
14
15#include <linux/kernel.h>
16#include <linux/mmc/card.h>
17#include <linux/mmc/sdio_func.h>
18#include <linux/mmc/sdio_ids.h>
19#include <linux/ssb/ssb.h>
20
21#include "sdio.h"
22#include "b43.h"
23
24
25#define HNBU_CHIPID 0x01 /* vendor & device id */
26
27#define B43_SDIO_BLOCK_SIZE 64 /* rx fifo max size in bytes */
28
29
30static const struct b43_sdio_quirk {
31 u16 vendor;
32 u16 device;
33 unsigned int quirks;
34} b43_sdio_quirks[] = {
35 { 0x14E4, 0x4318, SSB_QUIRK_SDIO_READ_AFTER_WRITE32, },
36 { },
37};
38
39
40static unsigned int b43_sdio_get_quirks(u16 vendor, u16 device)
41{
42 const struct b43_sdio_quirk *q;
43
44 for (q = b43_sdio_quirks; q->quirks; q++) {
45 if (vendor == q->vendor && device == q->device)
46 return q->quirks;
47 }
48
49 return 0;
50}
51
52static void b43_sdio_interrupt_dispatcher(struct sdio_func *func)
53{
54 struct b43_sdio *sdio = sdio_get_drvdata(func);
55 struct b43_wldev *dev = sdio->irq_handler_opaque;
56
57 if (unlikely(b43_status(dev) < B43_STAT_STARTED))
58 return;
59
60 sdio_release_host(func);
61 sdio->irq_handler(dev);
62 sdio_claim_host(func);
63}
64
65int b43_sdio_request_irq(struct b43_wldev *dev,
66 void (*handler)(struct b43_wldev *dev))
67{
68 struct ssb_bus *bus = dev->dev->bus;
69 struct sdio_func *func = bus->host_sdio;
70 struct b43_sdio *sdio = sdio_get_drvdata(func);
71 int err;
72
73 sdio->irq_handler_opaque = dev;
74 sdio->irq_handler = handler;
75 sdio_claim_host(func);
76 err = sdio_claim_irq(func, b43_sdio_interrupt_dispatcher);
77 sdio_release_host(func);
78
79 return err;
80}
81
82void b43_sdio_free_irq(struct b43_wldev *dev)
83{
84 struct ssb_bus *bus = dev->dev->bus;
85 struct sdio_func *func = bus->host_sdio;
86 struct b43_sdio *sdio = sdio_get_drvdata(func);
87
88 sdio_claim_host(func);
89 sdio_release_irq(func);
90 sdio_release_host(func);
91 sdio->irq_handler_opaque = NULL;
92 sdio->irq_handler = NULL;
93}
94
95static int b43_sdio_probe(struct sdio_func *func,
96 const struct sdio_device_id *id)
97{
98 struct b43_sdio *sdio;
99 struct sdio_func_tuple *tuple;
100 u16 vendor = 0, device = 0;
101 int error;
102
103 /* Look for the card chip identifier. */
104 tuple = func->tuples;
105 while (tuple) {
106 switch (tuple->code) {
107 case 0x80:
108 switch (tuple->data[0]) {
109 case HNBU_CHIPID:
110 if (tuple->size != 5)
111 break;
112 vendor = tuple->data[1] | (tuple->data[2]<<8);
113 device = tuple->data[3] | (tuple->data[4]<<8);
114 dev_info(&func->dev, "Chip ID %04x:%04x\n",
115 vendor, device);
116 break;
117 default:
118 break;
119 }
120 break;
121 default:
122 break;
123 }
124 tuple = tuple->next;
125 }
126 if (!vendor || !device) {
127 error = -ENODEV;
128 goto out;
129 }
130
131 sdio_claim_host(func);
132 error = sdio_set_block_size(func, B43_SDIO_BLOCK_SIZE);
133 if (error) {
134 dev_err(&func->dev, "failed to set block size to %u bytes,"
135 " error %d\n", B43_SDIO_BLOCK_SIZE, error);
136 goto err_release_host;
137 }
138 error = sdio_enable_func(func);
139 if (error) {
140 dev_err(&func->dev, "failed to enable func, error %d\n", error);
141 goto err_release_host;
142 }
143 sdio_release_host(func);
144
145 sdio = kzalloc(sizeof(*sdio), GFP_KERNEL);
146 if (!sdio) {
147 error = -ENOMEM;
148 dev_err(&func->dev, "failed to allocate ssb bus\n");
149 goto err_disable_func;
150 }
151 error = ssb_bus_sdiobus_register(&sdio->ssb, func,
152 b43_sdio_get_quirks(vendor, device));
153 if (error) {
154 dev_err(&func->dev, "failed to register ssb sdio bus,"
155 " error %d\n", error);
156 goto err_free_ssb;
157 }
158 sdio_set_drvdata(func, sdio);
159
160 return 0;
161
162err_free_ssb:
163 kfree(sdio);
164err_disable_func:
165 sdio_disable_func(func);
166err_release_host:
167 sdio_release_host(func);
168out:
169 return error;
170}
171
172static void b43_sdio_remove(struct sdio_func *func)
173{
174 struct b43_sdio *sdio = sdio_get_drvdata(func);
175
176 ssb_bus_unregister(&sdio->ssb);
177 sdio_disable_func(func);
178 kfree(sdio);
179 sdio_set_drvdata(func, NULL);
180}
181
182static const struct sdio_device_id b43_sdio_ids[] = {
183 { SDIO_DEVICE(0x02d0, 0x044b) }, /* Nintendo Wii WLAN daughter card */
184 { },
185};
186
187static struct sdio_driver b43_sdio_driver = {
188 .name = "b43-sdio",
189 .id_table = b43_sdio_ids,
190 .probe = b43_sdio_probe,
191 .remove = b43_sdio_remove,
192};
193
194int b43_sdio_init(void)
195{
196 return sdio_register_driver(&b43_sdio_driver);
197}
198
199void b43_sdio_exit(void)
200{
201 sdio_unregister_driver(&b43_sdio_driver);
202}
diff --git a/drivers/net/wireless/b43/sdio.h b/drivers/net/wireless/b43/sdio.h
new file mode 100644
index 000000000000..fb633094403a
--- /dev/null
+++ b/drivers/net/wireless/b43/sdio.h
@@ -0,0 +1,45 @@
1#ifndef B43_SDIO_H_
2#define B43_SDIO_H_
3
4#include <linux/ssb/ssb.h>
5
6struct b43_wldev;
7
8
9#ifdef CONFIG_B43_SDIO
10
11struct b43_sdio {
12 struct ssb_bus ssb;
13 void *irq_handler_opaque;
14 void (*irq_handler)(struct b43_wldev *dev);
15};
16
17int b43_sdio_request_irq(struct b43_wldev *dev,
18 void (*handler)(struct b43_wldev *dev));
19void b43_sdio_free_irq(struct b43_wldev *dev);
20
21int b43_sdio_init(void);
22void b43_sdio_exit(void);
23
24
25#else /* CONFIG_B43_SDIO */
26
27
28int b43_sdio_request_irq(struct b43_wldev *dev,
29 void (*handler)(struct b43_wldev *dev))
30{
31 return -ENODEV;
32}
33void b43_sdio_free_irq(struct b43_wldev *dev)
34{
35}
36static inline int b43_sdio_init(void)
37{
38 return 0;
39}
40static inline void b43_sdio_exit(void)
41{
42}
43
44#endif /* CONFIG_B43_SDIO */
45#endif /* B43_SDIO_H_ */
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 14f541248b5c..f4e9695ec186 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -27,7 +27,7 @@
27 27
28*/ 28*/
29 29
30#include "xmit.h" 30#include "b43.h"
31#include "phy_common.h" 31#include "phy_common.h"
32#include "dma.h" 32#include "dma.h"
33#include "pio.h" 33#include "pio.h"
@@ -690,8 +690,14 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
690 } 690 }
691 691
692 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); 692 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
693 ieee80211_rx_irqsafe(dev->wl->hw, skb);
694 693
694 local_bh_disable();
695 ieee80211_rx(dev->wl->hw, skb);
696 local_bh_enable();
697
698#if B43_DEBUG
699 dev->rx_count++;
700#endif
695 return; 701 return;
696drop: 702drop:
697 b43dbg(dev->wl, "RX: Packet dropped\n"); 703 b43dbg(dev->wl, "RX: Packet dropped\n");
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1d9223b3d4c4..4b60148a5e61 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -37,6 +37,7 @@
37#include <linux/firmware.h> 37#include <linux/firmware.h>
38#include <linux/wireless.h> 38#include <linux/wireless.h>
39#include <linux/workqueue.h> 39#include <linux/workqueue.h>
40#include <linux/sched.h>
40#include <linux/skbuff.h> 41#include <linux/skbuff.h>
41#include <linux/dma-mapping.h> 42#include <linux/dma-mapping.h>
42#include <net/dst.h> 43#include <net/dst.h>
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 11319ec2d64a..aaf227203a98 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -31,6 +31,7 @@
31 31
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/sched.h>
34#include <linux/types.h> 35#include <linux/types.h>
35 36
36#include "b43legacy.h" 37#include "b43legacy.h"
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 6fa14a4e4b53..4dfb40a84c96 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -1,6 +1,7 @@
1/* Host AP driver Info Frame processing (part of hostap.o module) */ 1/* Host AP driver Info Frame processing (part of hostap.o module) */
2 2
3#include <linux/if_arp.h> 3#include <linux/if_arp.h>
4#include <linux/sched.h>
4#include "hostap_wlan.h" 5#include "hostap_wlan.h"
5#include "hostap.h" 6#include "hostap.h"
6#include "hostap_ap.h" 7#include "hostap_ap.h"
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 3f2bda881a4f..9419cebca8a5 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -1,6 +1,7 @@
1/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */ 1/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */
2 2
3#include <linux/types.h> 3#include <linux/types.h>
4#include <linux/sched.h>
4#include <linux/ethtool.h> 5#include <linux/ethtool.h>
5#include <linux/if_arp.h> 6#include <linux/if_arp.h>
6#include <net/lib80211.h> 7#include <net/lib80211.h>
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 8d58e6ed4e7d..827824d45de9 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -30,6 +30,7 @@
30 30
31******************************************************************************/ 31******************************************************************************/
32 32
33#include <linux/sched.h>
33#include "ipw2200.h" 34#include "ipw2200.h"
34 35
35 36
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index a95caa014143..2716b91ba9fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -99,6 +99,8 @@ static struct iwl_lib_ops iwl1000_lib = {
99 .setup_deferred_work = iwl5000_setup_deferred_work, 99 .setup_deferred_work = iwl5000_setup_deferred_work,
100 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 100 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
101 .load_ucode = iwl5000_load_ucode, 101 .load_ucode = iwl5000_load_ucode,
102 .dump_nic_event_log = iwl_dump_nic_event_log,
103 .dump_nic_error_log = iwl_dump_nic_error_log,
102 .init_alive_start = iwl5000_init_alive_start, 104 .init_alive_start = iwl5000_init_alive_start,
103 .alive_notify = iwl5000_alive_notify, 105 .alive_notify = iwl5000_alive_notify,
104 .send_tx_power = iwl5000_send_tx_power, 106 .send_tx_power = iwl5000_send_tx_power,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index a16bd4147eac..cbb0585083a9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -702,7 +702,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
702 u8 sta_id = iwl_find_station(priv, hdr->addr1); 702 u8 sta_id = iwl_find_station(priv, hdr->addr1);
703 703
704 if (sta_id == IWL_INVALID_STATION) { 704 if (sta_id == IWL_INVALID_STATION) {
705 IWL_DEBUG_RATE(priv, "LQ: ADD station %pm\n", 705 IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n",
706 hdr->addr1); 706 hdr->addr1);
707 sta_id = iwl_add_station(priv, hdr->addr1, false, 707 sta_id = iwl_add_station(priv, hdr->addr1, false,
708 CMD_ASYNC, NULL); 708 CMD_ASYNC, NULL);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index e9a685d8e3a1..f059b49dc691 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -30,6 +30,7 @@
30#include <linux/pci.h> 30#include <linux/pci.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/sched.h>
33#include <linux/skbuff.h> 34#include <linux/skbuff.h>
34#include <linux/netdevice.h> 35#include <linux/netdevice.h>
35#include <linux/wireless.h> 36#include <linux/wireless.h>
@@ -610,7 +611,7 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
610 if (rx_status.band == IEEE80211_BAND_5GHZ) 611 if (rx_status.band == IEEE80211_BAND_5GHZ)
611 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE; 612 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
612 613
613 rx_status.antenna = le16_to_cpu(rx_hdr->phy_flags & 614 rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) &
614 RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; 615 RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
615 616
616 /* set the preamble flag if appropriate */ 617 /* set the preamble flag if appropriate */
@@ -2839,6 +2840,8 @@ static struct iwl_lib_ops iwl3945_lib = {
2839 .txq_free_tfd = iwl3945_hw_txq_free_tfd, 2840 .txq_free_tfd = iwl3945_hw_txq_free_tfd,
2840 .txq_init = iwl3945_hw_tx_queue_init, 2841 .txq_init = iwl3945_hw_tx_queue_init,
2841 .load_ucode = iwl3945_load_bsm, 2842 .load_ucode = iwl3945_load_bsm,
2843 .dump_nic_event_log = iwl3945_dump_nic_event_log,
2844 .dump_nic_error_log = iwl3945_dump_nic_error_log,
2842 .apm_ops = { 2845 .apm_ops = {
2843 .init = iwl3945_apm_init, 2846 .init = iwl3945_apm_init,
2844 .reset = iwl3945_apm_reset, 2847 .reset = iwl3945_apm_reset,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index f24036909916..21679bf3a1aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -209,6 +209,8 @@ extern int __must_check iwl3945_send_cmd(struct iwl_priv *priv,
209 struct iwl_host_cmd *cmd); 209 struct iwl_host_cmd *cmd);
210extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, 210extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
211 struct ieee80211_hdr *hdr,int left); 211 struct ieee80211_hdr *hdr,int left);
212extern void iwl3945_dump_nic_event_log(struct iwl_priv *priv);
213extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
212 214
213/* 215/*
214 * Currently used by iwl-3945-rs... look at restructuring so that it doesn't 216 * Currently used by iwl-3945-rs... look at restructuring so that it doesn't
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index ca61d3796cef..6f703a041847 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -30,6 +30,7 @@
30#include <linux/pci.h> 30#include <linux/pci.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/sched.h>
33#include <linux/skbuff.h> 34#include <linux/skbuff.h>
34#include <linux/netdevice.h> 35#include <linux/netdevice.h>
35#include <linux/wireless.h> 36#include <linux/wireless.h>
@@ -2021,6 +2022,12 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2021 agg->frame_count, txq_id, idx); 2022 agg->frame_count, txq_id, idx);
2022 2023
2023 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); 2024 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
2025 if (!hdr) {
2026 IWL_ERR(priv,
2027 "BUG_ON idx doesn't point to valid skb"
2028 " idx=%d, txq_id=%d\n", idx, txq_id);
2029 return -1;
2030 }
2024 2031
2025 sc = le16_to_cpu(hdr->seq_ctrl); 2032 sc = le16_to_cpu(hdr->seq_ctrl);
2026 if (idx != (SEQ_TO_SN(sc) & 0xff)) { 2033 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
@@ -2292,6 +2299,8 @@ static struct iwl_lib_ops iwl4965_lib = {
2292 .alive_notify = iwl4965_alive_notify, 2299 .alive_notify = iwl4965_alive_notify,
2293 .init_alive_start = iwl4965_init_alive_start, 2300 .init_alive_start = iwl4965_init_alive_start,
2294 .load_ucode = iwl4965_load_bsm, 2301 .load_ucode = iwl4965_load_bsm,
2302 .dump_nic_event_log = iwl_dump_nic_event_log,
2303 .dump_nic_error_log = iwl_dump_nic_error_log,
2295 .apm_ops = { 2304 .apm_ops = {
2296 .init = iwl4965_apm_init, 2305 .init = iwl4965_apm_init,
2297 .reset = iwl4965_apm_reset, 2306 .reset = iwl4965_apm_reset,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 1d539e3b8db1..6e6f516ba404 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -29,6 +29,7 @@
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/dma-mapping.h> 30#include <linux/dma-mapping.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/sched.h>
32#include <linux/skbuff.h> 33#include <linux/skbuff.h>
33#include <linux/netdevice.h> 34#include <linux/netdevice.h>
34#include <linux/wireless.h> 35#include <linux/wireless.h>
@@ -317,7 +318,7 @@ static void iwl5000_gain_computation(struct iwl_priv *priv,
317 (s32)average_noise[i])) / 1500; 318 (s32)average_noise[i])) / 1500;
318 /* bound gain by 2 bits value max, 3rd bit is sign */ 319 /* bound gain by 2 bits value max, 3rd bit is sign */
319 data->delta_gain_code[i] = 320 data->delta_gain_code[i] =
320 min(abs(delta_g), CHAIN_NOISE_MAX_DELTA_GAIN_CODE); 321 min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
321 322
322 if (delta_g < 0) 323 if (delta_g < 0)
323 /* set negative sign */ 324 /* set negative sign */
@@ -1163,6 +1164,12 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
1163 agg->frame_count, txq_id, idx); 1164 agg->frame_count, txq_id, idx);
1164 1165
1165 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); 1166 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
1167 if (!hdr) {
1168 IWL_ERR(priv,
1169 "BUG_ON idx doesn't point to valid skb"
1170 " idx=%d, txq_id=%d\n", idx, txq_id);
1171 return -1;
1172 }
1166 1173
1167 sc = le16_to_cpu(hdr->seq_ctrl); 1174 sc = le16_to_cpu(hdr->seq_ctrl);
1168 if (idx != (SEQ_TO_SN(sc) & 0xff)) { 1175 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
@@ -1529,6 +1536,8 @@ struct iwl_lib_ops iwl5000_lib = {
1529 .rx_handler_setup = iwl5000_rx_handler_setup, 1536 .rx_handler_setup = iwl5000_rx_handler_setup,
1530 .setup_deferred_work = iwl5000_setup_deferred_work, 1537 .setup_deferred_work = iwl5000_setup_deferred_work,
1531 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 1538 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
1539 .dump_nic_event_log = iwl_dump_nic_event_log,
1540 .dump_nic_error_log = iwl_dump_nic_error_log,
1532 .load_ucode = iwl5000_load_ucode, 1541 .load_ucode = iwl5000_load_ucode,
1533 .init_alive_start = iwl5000_init_alive_start, 1542 .init_alive_start = iwl5000_init_alive_start,
1534 .alive_notify = iwl5000_alive_notify, 1543 .alive_notify = iwl5000_alive_notify,
@@ -1579,6 +1588,8 @@ static struct iwl_lib_ops iwl5150_lib = {
1579 .rx_handler_setup = iwl5000_rx_handler_setup, 1588 .rx_handler_setup = iwl5000_rx_handler_setup,
1580 .setup_deferred_work = iwl5000_setup_deferred_work, 1589 .setup_deferred_work = iwl5000_setup_deferred_work,
1581 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 1590 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
1591 .dump_nic_event_log = iwl_dump_nic_event_log,
1592 .dump_nic_error_log = iwl_dump_nic_error_log,
1582 .load_ucode = iwl5000_load_ucode, 1593 .load_ucode = iwl5000_load_ucode,
1583 .init_alive_start = iwl5000_init_alive_start, 1594 .init_alive_start = iwl5000_init_alive_start,
1584 .alive_notify = iwl5000_alive_notify, 1595 .alive_notify = iwl5000_alive_notify,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 82b9c93dff54..c295b8ee9228 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -100,6 +100,8 @@ static struct iwl_lib_ops iwl6000_lib = {
100 .setup_deferred_work = iwl5000_setup_deferred_work, 100 .setup_deferred_work = iwl5000_setup_deferred_work,
101 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 101 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
102 .load_ucode = iwl5000_load_ucode, 102 .load_ucode = iwl5000_load_ucode,
103 .dump_nic_event_log = iwl_dump_nic_event_log,
104 .dump_nic_error_log = iwl_dump_nic_error_log,
103 .init_alive_start = iwl5000_init_alive_start, 105 .init_alive_start = iwl5000_init_alive_start,
104 .alive_notify = iwl5000_alive_notify, 106 .alive_notify = iwl5000_alive_notify,
105 .send_tx_power = iwl5000_send_tx_power, 107 .send_tx_power = iwl5000_send_tx_power,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 00457bff1ed1..eaafae091f5b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -33,6 +33,7 @@
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/sched.h>
36#include <linux/skbuff.h> 37#include <linux/skbuff.h>
37#include <linux/netdevice.h> 38#include <linux/netdevice.h>
38#include <linux/wireless.h> 39#include <linux/wireless.h>
@@ -1526,6 +1527,191 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1526 return ret; 1527 return ret;
1527} 1528}
1528 1529
1530#ifdef CONFIG_IWLWIFI_DEBUG
1531static const char *desc_lookup_text[] = {
1532 "OK",
1533 "FAIL",
1534 "BAD_PARAM",
1535 "BAD_CHECKSUM",
1536 "NMI_INTERRUPT_WDG",
1537 "SYSASSERT",
1538 "FATAL_ERROR",
1539 "BAD_COMMAND",
1540 "HW_ERROR_TUNE_LOCK",
1541 "HW_ERROR_TEMPERATURE",
1542 "ILLEGAL_CHAN_FREQ",
1543 "VCC_NOT_STABLE",
1544 "FH_ERROR",
1545 "NMI_INTERRUPT_HOST",
1546 "NMI_INTERRUPT_ACTION_PT",
1547 "NMI_INTERRUPT_UNKNOWN",
1548 "UCODE_VERSION_MISMATCH",
1549 "HW_ERROR_ABS_LOCK",
1550 "HW_ERROR_CAL_LOCK_FAIL",
1551 "NMI_INTERRUPT_INST_ACTION_PT",
1552 "NMI_INTERRUPT_DATA_ACTION_PT",
1553 "NMI_TRM_HW_ER",
1554 "NMI_INTERRUPT_TRM",
1555 "NMI_INTERRUPT_BREAK_POINT"
1556 "DEBUG_0",
1557 "DEBUG_1",
1558 "DEBUG_2",
1559 "DEBUG_3",
1560 "UNKNOWN"
1561};
1562
1563static const char *desc_lookup(int i)
1564{
1565 int max = ARRAY_SIZE(desc_lookup_text) - 1;
1566
1567 if (i < 0 || i > max)
1568 i = max;
1569
1570 return desc_lookup_text[i];
1571}
1572
1573#define ERROR_START_OFFSET (1 * sizeof(u32))
1574#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1575
1576void iwl_dump_nic_error_log(struct iwl_priv *priv)
1577{
1578 u32 data2, line;
1579 u32 desc, time, count, base, data1;
1580 u32 blink1, blink2, ilink1, ilink2;
1581
1582 if (priv->ucode_type == UCODE_INIT)
1583 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1584 else
1585 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1586
1587 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1588 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
1589 return;
1590 }
1591
1592 count = iwl_read_targ_mem(priv, base);
1593
1594 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1595 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1596 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1597 priv->status, count);
1598 }
1599
1600 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
1601 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
1602 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
1603 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
1604 ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
1605 data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
1606 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
1607 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
1608 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
1609
1610 IWL_ERR(priv, "Desc Time "
1611 "data1 data2 line\n");
1612 IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
1613 desc_lookup(desc), desc, time, data1, data2, line);
1614 IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n");
1615 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
1616 ilink1, ilink2);
1617
1618}
1619
1620#define EVENT_START_OFFSET (4 * sizeof(u32))
1621
1622/**
1623 * iwl_print_event_log - Dump error event log to syslog
1624 *
1625 */
1626static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1627 u32 num_events, u32 mode)
1628{
1629 u32 i;
1630 u32 base; /* SRAM byte address of event log header */
1631 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1632 u32 ptr; /* SRAM byte address of log data */
1633 u32 ev, time, data; /* event log data */
1634
1635 if (num_events == 0)
1636 return;
1637 if (priv->ucode_type == UCODE_INIT)
1638 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1639 else
1640 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1641
1642 if (mode == 0)
1643 event_size = 2 * sizeof(u32);
1644 else
1645 event_size = 3 * sizeof(u32);
1646
1647 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1648
1649 /* "time" is actually "data" for mode 0 (no timestamp).
1650 * place event id # at far right for easier visual parsing. */
1651 for (i = 0; i < num_events; i++) {
1652 ev = iwl_read_targ_mem(priv, ptr);
1653 ptr += sizeof(u32);
1654 time = iwl_read_targ_mem(priv, ptr);
1655 ptr += sizeof(u32);
1656 if (mode == 0) {
1657 /* data, ev */
1658 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev);
1659 } else {
1660 data = iwl_read_targ_mem(priv, ptr);
1661 ptr += sizeof(u32);
1662 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1663 time, data, ev);
1664 }
1665 }
1666}
1667
1668void iwl_dump_nic_event_log(struct iwl_priv *priv)
1669{
1670 u32 base; /* SRAM byte address of event log header */
1671 u32 capacity; /* event log capacity in # entries */
1672 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1673 u32 num_wraps; /* # times uCode wrapped to top of log */
1674 u32 next_entry; /* index of next entry to be written by uCode */
1675 u32 size; /* # entries that we'll print */
1676
1677 if (priv->ucode_type == UCODE_INIT)
1678 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1679 else
1680 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1681
1682 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1683 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
1684 return;
1685 }
1686
1687 /* event log header */
1688 capacity = iwl_read_targ_mem(priv, base);
1689 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
1690 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1691 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1692
1693 size = num_wraps ? capacity : next_entry;
1694
1695 /* bail out if nothing in log */
1696 if (size == 0) {
1697 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1698 return;
1699 }
1700
1701 IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n",
1702 size, num_wraps);
1703
1704 /* if uCode has wrapped back to top of log, start at the oldest entry,
1705 * i.e the next one that uCode would fill. */
1706 if (num_wraps)
1707 iwl_print_event_log(priv, next_entry,
1708 capacity - next_entry, mode);
1709 /* (then/else) start at top of log */
1710 iwl_print_event_log(priv, 0, next_entry, mode);
1711
1712}
1713#endif
1714
1529/** 1715/**
1530 * iwl_alive_start - called after REPLY_ALIVE notification received 1716 * iwl_alive_start - called after REPLY_ALIVE notification received
1531 * from protocol/runtime uCode (initialization uCode's 1717 * from protocol/runtime uCode (initialization uCode's
@@ -2920,8 +3106,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2920 out_pci_disable_device: 3106 out_pci_disable_device:
2921 pci_disable_device(pdev); 3107 pci_disable_device(pdev);
2922 out_ieee80211_free_hw: 3108 out_ieee80211_free_hw:
2923 ieee80211_free_hw(priv->hw);
2924 iwl_free_traffic_mem(priv); 3109 iwl_free_traffic_mem(priv);
3110 ieee80211_free_hw(priv->hw);
2925 out: 3111 out:
2926 return err; 3112 return err;
2927} 3113}
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 2c5c88fc38f5..4afaf773aeac 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -1154,7 +1154,7 @@ struct iwl_wep_cmd {
1154#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1) 1154#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
1155#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2) 1155#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
1156#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3) 1156#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
1157#define RX_RES_PHY_FLAGS_ANTENNA_MSK cpu_to_le16(0xf0) 1157#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0
1158#define RX_RES_PHY_FLAGS_ANTENNA_POS 4 1158#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
1159 1159
1160#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) 1160#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index fd26c0dc9c54..2dc928755454 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -29,6 +29,7 @@
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/sched.h>
32#include <net/mac80211.h> 33#include <net/mac80211.h>
33 34
34#include "iwl-eeprom.h" 35#include "iwl-eeprom.h"
@@ -1309,189 +1310,6 @@ static void iwl_print_rx_config_cmd(struct iwl_priv *priv)
1309 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr); 1310 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
1310 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); 1311 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
1311} 1312}
1312
1313static const char *desc_lookup_text[] = {
1314 "OK",
1315 "FAIL",
1316 "BAD_PARAM",
1317 "BAD_CHECKSUM",
1318 "NMI_INTERRUPT_WDG",
1319 "SYSASSERT",
1320 "FATAL_ERROR",
1321 "BAD_COMMAND",
1322 "HW_ERROR_TUNE_LOCK",
1323 "HW_ERROR_TEMPERATURE",
1324 "ILLEGAL_CHAN_FREQ",
1325 "VCC_NOT_STABLE",
1326 "FH_ERROR",
1327 "NMI_INTERRUPT_HOST",
1328 "NMI_INTERRUPT_ACTION_PT",
1329 "NMI_INTERRUPT_UNKNOWN",
1330 "UCODE_VERSION_MISMATCH",
1331 "HW_ERROR_ABS_LOCK",
1332 "HW_ERROR_CAL_LOCK_FAIL",
1333 "NMI_INTERRUPT_INST_ACTION_PT",
1334 "NMI_INTERRUPT_DATA_ACTION_PT",
1335 "NMI_TRM_HW_ER",
1336 "NMI_INTERRUPT_TRM",
1337 "NMI_INTERRUPT_BREAK_POINT"
1338 "DEBUG_0",
1339 "DEBUG_1",
1340 "DEBUG_2",
1341 "DEBUG_3",
1342 "UNKNOWN"
1343};
1344
1345static const char *desc_lookup(int i)
1346{
1347 int max = ARRAY_SIZE(desc_lookup_text) - 1;
1348
1349 if (i < 0 || i > max)
1350 i = max;
1351
1352 return desc_lookup_text[i];
1353}
1354
1355#define ERROR_START_OFFSET (1 * sizeof(u32))
1356#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1357
1358static void iwl_dump_nic_error_log(struct iwl_priv *priv)
1359{
1360 u32 data2, line;
1361 u32 desc, time, count, base, data1;
1362 u32 blink1, blink2, ilink1, ilink2;
1363
1364 if (priv->ucode_type == UCODE_INIT)
1365 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1366 else
1367 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1368
1369 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1370 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
1371 return;
1372 }
1373
1374 count = iwl_read_targ_mem(priv, base);
1375
1376 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1377 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1378 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1379 priv->status, count);
1380 }
1381
1382 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
1383 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
1384 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
1385 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
1386 ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
1387 data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
1388 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
1389 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
1390 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
1391
1392 IWL_ERR(priv, "Desc Time "
1393 "data1 data2 line\n");
1394 IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
1395 desc_lookup(desc), desc, time, data1, data2, line);
1396 IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n");
1397 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
1398 ilink1, ilink2);
1399
1400}
1401
1402#define EVENT_START_OFFSET (4 * sizeof(u32))
1403
1404/**
1405 * iwl_print_event_log - Dump error event log to syslog
1406 *
1407 */
1408static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1409 u32 num_events, u32 mode)
1410{
1411 u32 i;
1412 u32 base; /* SRAM byte address of event log header */
1413 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1414 u32 ptr; /* SRAM byte address of log data */
1415 u32 ev, time, data; /* event log data */
1416
1417 if (num_events == 0)
1418 return;
1419 if (priv->ucode_type == UCODE_INIT)
1420 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1421 else
1422 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1423
1424 if (mode == 0)
1425 event_size = 2 * sizeof(u32);
1426 else
1427 event_size = 3 * sizeof(u32);
1428
1429 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1430
1431 /* "time" is actually "data" for mode 0 (no timestamp).
1432 * place event id # at far right for easier visual parsing. */
1433 for (i = 0; i < num_events; i++) {
1434 ev = iwl_read_targ_mem(priv, ptr);
1435 ptr += sizeof(u32);
1436 time = iwl_read_targ_mem(priv, ptr);
1437 ptr += sizeof(u32);
1438 if (mode == 0) {
1439 /* data, ev */
1440 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev);
1441 } else {
1442 data = iwl_read_targ_mem(priv, ptr);
1443 ptr += sizeof(u32);
1444 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1445 time, data, ev);
1446 }
1447 }
1448}
1449
1450void iwl_dump_nic_event_log(struct iwl_priv *priv)
1451{
1452 u32 base; /* SRAM byte address of event log header */
1453 u32 capacity; /* event log capacity in # entries */
1454 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1455 u32 num_wraps; /* # times uCode wrapped to top of log */
1456 u32 next_entry; /* index of next entry to be written by uCode */
1457 u32 size; /* # entries that we'll print */
1458
1459 if (priv->ucode_type == UCODE_INIT)
1460 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1461 else
1462 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1463
1464 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1465 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
1466 return;
1467 }
1468
1469 /* event log header */
1470 capacity = iwl_read_targ_mem(priv, base);
1471 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
1472 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1473 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1474
1475 size = num_wraps ? capacity : next_entry;
1476
1477 /* bail out if nothing in log */
1478 if (size == 0) {
1479 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1480 return;
1481 }
1482
1483 IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n",
1484 size, num_wraps);
1485
1486 /* if uCode has wrapped back to top of log, start at the oldest entry,
1487 * i.e the next one that uCode would fill. */
1488 if (num_wraps)
1489 iwl_print_event_log(priv, next_entry,
1490 capacity - next_entry, mode);
1491 /* (then/else) start at top of log */
1492 iwl_print_event_log(priv, 0, next_entry, mode);
1493
1494}
1495#endif 1313#endif
1496/** 1314/**
1497 * iwl_irq_handle_error - called for HW or SW error interrupt from card 1315 * iwl_irq_handle_error - called for HW or SW error interrupt from card
@@ -1506,8 +1324,8 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
1506 1324
1507#ifdef CONFIG_IWLWIFI_DEBUG 1325#ifdef CONFIG_IWLWIFI_DEBUG
1508 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) { 1326 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) {
1509 iwl_dump_nic_error_log(priv); 1327 priv->cfg->ops->lib->dump_nic_error_log(priv);
1510 iwl_dump_nic_event_log(priv); 1328 priv->cfg->ops->lib->dump_nic_event_log(priv);
1511 iwl_print_rx_config_cmd(priv); 1329 iwl_print_rx_config_cmd(priv);
1512 } 1330 }
1513#endif 1331#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 7ff9ffb2b702..e50103a956b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -166,6 +166,8 @@ struct iwl_lib_ops {
166 int (*is_valid_rtc_data_addr)(u32 addr); 166 int (*is_valid_rtc_data_addr)(u32 addr);
167 /* 1st ucode load */ 167 /* 1st ucode load */
168 int (*load_ucode)(struct iwl_priv *priv); 168 int (*load_ucode)(struct iwl_priv *priv);
169 void (*dump_nic_event_log)(struct iwl_priv *priv);
170 void (*dump_nic_error_log)(struct iwl_priv *priv);
169 /* power management */ 171 /* power management */
170 struct iwl_apm_ops apm_ops; 172 struct iwl_apm_ops apm_ops;
171 173
@@ -540,7 +542,19 @@ int iwl_pci_resume(struct pci_dev *pdev);
540/***************************************************** 542/*****************************************************
541* Error Handling Debugging 543* Error Handling Debugging
542******************************************************/ 544******************************************************/
545#ifdef CONFIG_IWLWIFI_DEBUG
543void iwl_dump_nic_event_log(struct iwl_priv *priv); 546void iwl_dump_nic_event_log(struct iwl_priv *priv);
547void iwl_dump_nic_error_log(struct iwl_priv *priv);
548#else
549static inline void iwl_dump_nic_event_log(struct iwl_priv *priv)
550{
551}
552
553static inline void iwl_dump_nic_error_log(struct iwl_priv *priv)
554{
555}
556#endif
557
544void iwl_clear_isr_stats(struct iwl_priv *priv); 558void iwl_clear_isr_stats(struct iwl_priv *priv);
545 559
546/***************************************************** 560/*****************************************************
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index fb844859a443..a198bcf61022 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -410,7 +410,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
410 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); 410 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
411 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos, 411 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
412 buf_size - pos, 0); 412 buf_size - pos, 0);
413 pos += strlen(buf); 413 pos += strlen(buf + pos);
414 if (buf_size - pos > 0) 414 if (buf_size - pos > 0)
415 buf[pos++] = '\n'; 415 buf[pos++] = '\n';
416 } 416 }
@@ -436,7 +436,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
436 if (sscanf(buf, "%d", &event_log_flag) != 1) 436 if (sscanf(buf, "%d", &event_log_flag) != 1)
437 return -EFAULT; 437 return -EFAULT;
438 if (event_log_flag == 1) 438 if (event_log_flag == 1)
439 iwl_dump_nic_event_log(priv); 439 priv->cfg->ops->lib->dump_nic_event_log(priv);
440 440
441 return count; 441 return count;
442} 442}
@@ -909,7 +909,7 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
909 "0x%.4x ", ofs); 909 "0x%.4x ", ofs);
910 hex_dump_to_buffer(ptr + ofs, 16, 16, 2, 910 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
911 buf + pos, bufsz - pos, 0); 911 buf + pos, bufsz - pos, 0);
912 pos += strlen(buf); 912 pos += strlen(buf + pos);
913 if (bufsz - pos > 0) 913 if (bufsz - pos > 0)
914 buf[pos++] = '\n'; 914 buf[pos++] = '\n';
915 } 915 }
@@ -932,7 +932,7 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
932 "0x%.4x ", ofs); 932 "0x%.4x ", ofs);
933 hex_dump_to_buffer(ptr + ofs, 16, 16, 2, 933 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
934 buf + pos, bufsz - pos, 0); 934 buf + pos, bufsz - pos, 0);
935 pos += strlen(buf); 935 pos += strlen(buf + pos);
936 if (bufsz - pos > 0) 936 if (bufsz - pos > 0)
937 buf[pos++] = '\n'; 937 buf[pos++] = '\n';
938 } 938 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 3d2b93a61e62..e14c9952a935 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -410,7 +410,6 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
410 u16 *validblockaddr) 410 u16 *validblockaddr)
411{ 411{
412 u16 next_link_addr = 0, link_value = 0, valid_addr; 412 u16 next_link_addr = 0, link_value = 0, valid_addr;
413 int ret = 0;
414 int usedblocks = 0; 413 int usedblocks = 0;
415 414
416 /* set addressing mode to absolute to traverse the link list */ 415 /* set addressing mode to absolute to traverse the link list */
@@ -430,29 +429,29 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
430 * check for more block on the link list 429 * check for more block on the link list
431 */ 430 */
432 valid_addr = next_link_addr; 431 valid_addr = next_link_addr;
433 next_link_addr = link_value; 432 next_link_addr = link_value * sizeof(u16);
434 IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n", 433 IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n",
435 usedblocks, next_link_addr); 434 usedblocks, next_link_addr);
436 if (iwl_read_otp_word(priv, next_link_addr, &link_value)) 435 if (iwl_read_otp_word(priv, next_link_addr, &link_value))
437 return -EINVAL; 436 return -EINVAL;
438 if (!link_value) { 437 if (!link_value) {
439 /* 438 /*
440 * reach the end of link list, 439 * reach the end of link list, return success and
441 * set address point to the starting address 440 * set address point to the starting address
442 * of the image 441 * of the image
443 */ 442 */
444 goto done; 443 *validblockaddr = valid_addr;
444 /* skip first 2 bytes (link list pointer) */
445 *validblockaddr += 2;
446 return 0;
445 } 447 }
446 /* more in the link list, continue */ 448 /* more in the link list, continue */
447 usedblocks++; 449 usedblocks++;
448 } while (usedblocks < priv->cfg->max_ll_items); 450 } while (usedblocks <= priv->cfg->max_ll_items);
449 /* OTP full, use last block */ 451
450 IWL_DEBUG_INFO(priv, "OTP is full, use last block\n"); 452 /* OTP has no valid blocks */
451done: 453 IWL_DEBUG_INFO(priv, "OTP has no valid blocks\n");
452 *validblockaddr = valid_addr; 454 return -EINVAL;
453 /* skip first 2 bytes (link list pointer) */
454 *validblockaddr += 2;
455 return ret;
456} 455}
457 456
458/** 457/**
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 6b68db7b1b81..80b9e45d9b9c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -220,35 +220,35 @@ struct iwl_eeprom_enhanced_txpwr {
220 * Section 10: 2.4 GHz 40MHz channels: 132, 44 (_above_) 220 * Section 10: 2.4 GHz 40MHz channels: 132, 44 (_above_)
221 */ 221 */
222/* 2.4 GHz band: CCK */ 222/* 2.4 GHz band: CCK */
223#define EEPROM_LB_CCK_20_COMMON ((0xAA)\ 223#define EEPROM_LB_CCK_20_COMMON ((0xA8)\
224 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 8 bytes */ 224 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 8 bytes */
225/* 2.4 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */ 225/* 2.4 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */
226#define EEPROM_LB_OFDM_COMMON ((0xB2)\ 226#define EEPROM_LB_OFDM_COMMON ((0xB0)\
227 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ 227 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
228/* 5.2 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */ 228/* 5.2 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */
229#define EEPROM_HB_OFDM_COMMON ((0xCA)\ 229#define EEPROM_HB_OFDM_COMMON ((0xC8)\
230 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ 230 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
231/* 2.4GHz band channels: 231/* 2.4GHz band channels:
232 * 1Legacy, 1HT, 2Legacy, 2HT, 10Legacy, 10HT, 11Legacy, 11HT */ 232 * 1Legacy, 1HT, 2Legacy, 2HT, 10Legacy, 10HT, 11Legacy, 11HT */
233#define EEPROM_LB_OFDM_20_BAND ((0xE2)\ 233#define EEPROM_LB_OFDM_20_BAND ((0xE0)\
234 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 64 bytes */ 234 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 64 bytes */
235/* 2.4 GHz band HT40 channels: (1,+1) (2,+1) (6,+1) (7,+1) (9,+1) */ 235/* 2.4 GHz band HT40 channels: (1,+1) (2,+1) (6,+1) (7,+1) (9,+1) */
236#define EEPROM_LB_OFDM_HT40_BAND ((0x122)\ 236#define EEPROM_LB_OFDM_HT40_BAND ((0x120)\
237 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 40 bytes */ 237 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 40 bytes */
238/* 5.2GHz band channels: 36Legacy, 36HT, 64Legacy, 64HT, 100Legacy, 100HT */ 238/* 5.2GHz band channels: 36Legacy, 36HT, 64Legacy, 64HT, 100Legacy, 100HT */
239#define EEPROM_HB_OFDM_20_BAND ((0x14A)\ 239#define EEPROM_HB_OFDM_20_BAND ((0x148)\
240 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 48 bytes */ 240 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 48 bytes */
241/* 5.2 GHz band HT40 channels: (36,+1) (60,+1) (100,+1) */ 241/* 5.2 GHz band HT40 channels: (36,+1) (60,+1) (100,+1) */
242#define EEPROM_HB_OFDM_HT40_BAND ((0x17A)\ 242#define EEPROM_HB_OFDM_HT40_BAND ((0x178)\
243 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ 243 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
244/* 2.4 GHz band, channnel 13: Legacy, HT */ 244/* 2.4 GHz band, channnel 13: Legacy, HT */
245#define EEPROM_LB_OFDM_20_CHANNEL_13 ((0x192)\ 245#define EEPROM_LB_OFDM_20_CHANNEL_13 ((0x190)\
246 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ 246 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */
247/* 5.2 GHz band, channnel 140: Legacy, HT */ 247/* 5.2 GHz band, channnel 140: Legacy, HT */
248#define EEPROM_HB_OFDM_20_CHANNEL_140 ((0x1A2)\ 248#define EEPROM_HB_OFDM_20_CHANNEL_140 ((0x1A0)\
249 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ 249 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */
250/* 5.2 GHz band, HT40 channnels (132,+1) (44,+1) */ 250/* 5.2 GHz band, HT40 channnels (132,+1) (44,+1) */
251#define EEPROM_HB_OFDM_HT40_BAND_1 ((0x1B2)\ 251#define EEPROM_HB_OFDM_HT40_BAND_1 ((0x1B0)\
252 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ 252 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */
253 253
254 254
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 532c8d6cd8da..a6856daf14cb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -28,6 +28,7 @@
28 28
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/sched.h>
31#include <net/mac80211.h> 32#include <net/mac80211.h>
32 33
33#include "iwl-dev.h" /* FIXME: remove */ 34#include "iwl-dev.h" /* FIXME: remove */
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index b90adcb73b06..493626bcd3ec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -250,12 +250,20 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
250 } 250 }
251 spin_unlock_irqrestore(&rxq->lock, flags); 251 spin_unlock_irqrestore(&rxq->lock, flags);
252 252
253 if (rxq->free_count > RX_LOW_WATERMARK)
254 priority |= __GFP_NOWARN;
253 /* Alloc a new receive buffer */ 255 /* Alloc a new receive buffer */
254 skb = alloc_skb(priv->hw_params.rx_buf_size + 256, 256 skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
255 priority); 257 priority);
256 258
257 if (!skb) { 259 if (!skb) {
258 IWL_CRIT(priv, "Can not allocate SKB buffers\n"); 260 if (net_ratelimit())
261 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
262 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
263 net_ratelimit())
264 IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
265 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
266 rxq->free_count);
259 /* We don't reschedule replenish work here -- we will 267 /* We don't reschedule replenish work here -- we will
260 * call the restock method and if it still needs 268 * call the restock method and if it still needs
261 * more buffers it will schedule replenish */ 269 * more buffers it will schedule replenish */
@@ -1036,7 +1044,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1036 * as a bitmask. 1044 * as a bitmask.
1037 */ 1045 */
1038 rx_status.antenna = 1046 rx_status.antenna =
1039 le16_to_cpu(phy_res->phy_flags & RX_RES_PHY_FLAGS_ANTENNA_MSK) 1047 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
1040 >> RX_RES_PHY_FLAGS_ANTENNA_POS; 1048 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
1041 1049
1042 /* set the preamble flag if appropriate */ 1050 /* set the preamble flag if appropriate */
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index a2b9ec82b965..c6633fec8216 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -520,7 +520,7 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
520 struct iwl_host_cmd cmd = { 520 struct iwl_host_cmd cmd = {
521 .id = REPLY_WEPKEY, 521 .id = REPLY_WEPKEY,
522 .data = wep_cmd, 522 .data = wep_cmd,
523 .flags = CMD_SYNC, 523 .flags = CMD_ASYNC,
524 }; 524 };
525 525
526 memset(wep_cmd, 0, cmd_size + 526 memset(wep_cmd, 0, cmd_size +
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index a7422e52d883..fb9bcfa6d947 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -28,6 +28,7 @@
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
31#include <linux/sched.h>
31#include <net/mac80211.h> 32#include <net/mac80211.h>
32#include "iwl-eeprom.h" 33#include "iwl-eeprom.h"
33#include "iwl-dev.h" 34#include "iwl-dev.h"
@@ -197,6 +198,12 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
197 pci_free_consistent(dev, priv->hw_params.tfd_size * 198 pci_free_consistent(dev, priv->hw_params.tfd_size *
198 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 199 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
199 200
201 /* deallocate arrays */
202 kfree(txq->cmd);
203 kfree(txq->meta);
204 txq->cmd = NULL;
205 txq->meta = NULL;
206
200 /* 0-fill queue descriptor structure */ 207 /* 0-fill queue descriptor structure */
201 memset(txq, 0, sizeof(*txq)); 208 memset(txq, 0, sizeof(*txq));
202} 209}
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 090966837f3c..d00a80334095 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -33,6 +33,7 @@
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/sched.h>
36#include <linux/skbuff.h> 37#include <linux/skbuff.h>
37#include <linux/netdevice.h> 38#include <linux/netdevice.h>
38#include <linux/wireless.h> 39#include <linux/wireless.h>
@@ -1146,11 +1147,18 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1146 } 1147 }
1147 spin_unlock_irqrestore(&rxq->lock, flags); 1148 spin_unlock_irqrestore(&rxq->lock, flags);
1148 1149
1150 if (rxq->free_count > RX_LOW_WATERMARK)
1151 priority |= __GFP_NOWARN;
1149 /* Alloc a new receive buffer */ 1152 /* Alloc a new receive buffer */
1150 skb = alloc_skb(priv->hw_params.rx_buf_size, priority); 1153 skb = alloc_skb(priv->hw_params.rx_buf_size, priority);
1151 if (!skb) { 1154 if (!skb) {
1152 if (net_ratelimit()) 1155 if (net_ratelimit())
1153 IWL_CRIT(priv, ": Can not allocate SKB buffers\n"); 1156 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
1157 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
1158 net_ratelimit())
1159 IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
1160 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
1161 rxq->free_count);
1154 /* We don't reschedule replenish work here -- we will 1162 /* We don't reschedule replenish work here -- we will
1155 * call the restock method and if it still needs 1163 * call the restock method and if it still needs
1156 * more buffers it will schedule replenish */ 1164 * more buffers it will schedule replenish */
@@ -1474,6 +1482,7 @@ static inline void iwl_synchronize_irq(struct iwl_priv *priv)
1474 tasklet_kill(&priv->irq_tasklet); 1482 tasklet_kill(&priv->irq_tasklet);
1475} 1483}
1476 1484
1485#ifdef CONFIG_IWLWIFI_DEBUG
1477static const char *desc_lookup(int i) 1486static const char *desc_lookup(int i)
1478{ 1487{
1479 switch (i) { 1488 switch (i) {
@@ -1497,7 +1506,7 @@ static const char *desc_lookup(int i)
1497#define ERROR_START_OFFSET (1 * sizeof(u32)) 1506#define ERROR_START_OFFSET (1 * sizeof(u32))
1498#define ERROR_ELEM_SIZE (7 * sizeof(u32)) 1507#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1499 1508
1500static void iwl3945_dump_nic_error_log(struct iwl_priv *priv) 1509void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1501{ 1510{
1502 u32 i; 1511 u32 i;
1503 u32 desc, time, count, base, data1; 1512 u32 desc, time, count, base, data1;
@@ -1591,7 +1600,7 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1591 } 1600 }
1592} 1601}
1593 1602
1594static void iwl3945_dump_nic_event_log(struct iwl_priv *priv) 1603void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
1595{ 1604{
1596 u32 base; /* SRAM byte address of event log header */ 1605 u32 base; /* SRAM byte address of event log header */
1597 u32 capacity; /* event log capacity in # entries */ 1606 u32 capacity; /* event log capacity in # entries */
@@ -1633,6 +1642,16 @@ static void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
1633 iwl3945_print_event_log(priv, 0, next_entry, mode); 1642 iwl3945_print_event_log(priv, 0, next_entry, mode);
1634 1643
1635} 1644}
1645#else
1646void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
1647{
1648}
1649
1650void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1651{
1652}
1653
1654#endif
1636 1655
1637static void iwl3945_irq_tasklet(struct iwl_priv *priv) 1656static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1638{ 1657{
@@ -3676,21 +3695,6 @@ static ssize_t dump_error_log(struct device *d,
3676 3695
3677static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log); 3696static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
3678 3697
3679static ssize_t dump_event_log(struct device *d,
3680 struct device_attribute *attr,
3681 const char *buf, size_t count)
3682{
3683 struct iwl_priv *priv = dev_get_drvdata(d);
3684 char *p = (char *)buf;
3685
3686 if (p[0] == '1')
3687 iwl3945_dump_nic_event_log(priv);
3688
3689 return strnlen(buf, count);
3690}
3691
3692static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
3693
3694/***************************************************************************** 3698/*****************************************************************************
3695 * 3699 *
3696 * driver setup and tear down 3700 * driver setup and tear down
@@ -3735,7 +3739,6 @@ static struct attribute *iwl3945_sysfs_entries[] = {
3735 &dev_attr_antenna.attr, 3739 &dev_attr_antenna.attr,
3736 &dev_attr_channels.attr, 3740 &dev_attr_channels.attr,
3737 &dev_attr_dump_errors.attr, 3741 &dev_attr_dump_errors.attr,
3738 &dev_attr_dump_events.attr,
3739 &dev_attr_flags.attr, 3742 &dev_attr_flags.attr,
3740 &dev_attr_filter_flags.attr, 3743 &dev_attr_filter_flags.attr,
3741#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT 3744#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
@@ -4094,8 +4097,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4094 pci_set_drvdata(pdev, NULL); 4097 pci_set_drvdata(pdev, NULL);
4095 pci_disable_device(pdev); 4098 pci_disable_device(pdev);
4096 out_ieee80211_free_hw: 4099 out_ieee80211_free_hw:
4097 ieee80211_free_hw(priv->hw);
4098 iwl_free_traffic_mem(priv); 4100 iwl_free_traffic_mem(priv);
4101 ieee80211_free_hw(priv->hw);
4099 out: 4102 out:
4100 return err; 4103 return err;
4101} 4104}
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index a56a2b0ac99a..f3c55658225b 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#include <linux/sched.h>
26#include <linux/etherdevice.h> 27#include <linux/etherdevice.h>
27#include <linux/wireless.h> 28#include <linux/wireless.h>
28#include <linux/ieee80211.h> 29#include <linux/ieee80211.h>
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 23b52fa2605f..84158b6d35d8 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -40,6 +40,7 @@
40#include <linux/wireless.h> 40#include <linux/wireless.h>
41#include <linux/etherdevice.h> 41#include <linux/etherdevice.h>
42#include <linux/ieee80211.h> 42#include <linux/ieee80211.h>
43#include <linux/sched.h>
43 44
44#include "iwm.h" 45#include "iwm.h"
45#include "bus.h" 46#include "bus.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index d668e4756324..222eb2cf1b30 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -38,6 +38,7 @@
38 38
39#include <linux/kernel.h> 39#include <linux/kernel.h>
40#include <linux/netdevice.h> 40#include <linux/netdevice.h>
41#include <linux/sched.h>
41#include <linux/ieee80211.h> 42#include <linux/ieee80211.h>
42#include <linux/wireless.h> 43#include <linux/wireless.h>
43 44
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 40dbcbc16593..771a301003c9 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -38,6 +38,7 @@
38 38
39#include <linux/kernel.h> 39#include <linux/kernel.h>
40#include <linux/netdevice.h> 40#include <linux/netdevice.h>
41#include <linux/sched.h>
41#include <linux/etherdevice.h> 42#include <linux/etherdevice.h>
42#include <linux/wireless.h> 43#include <linux/wireless.h>
43#include <linux/ieee80211.h> 44#include <linux/ieee80211.h>
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 685098148e10..0a324dcd264c 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -6,6 +6,7 @@
6#include <net/iw_handler.h> 6#include <net/iw_handler.h>
7#include <net/lib80211.h> 7#include <net/lib80211.h>
8#include <linux/kfifo.h> 8#include <linux/kfifo.h>
9#include <linux/sched.h>
9#include "host.h" 10#include "host.h"
10#include "hostcmd.h" 11#include "hostcmd.h"
11#include "decl.h" 12#include "decl.h"
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index c42d3faa2660..23f684337fdd 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -3,6 +3,7 @@
3 * responses as well as events generated by firmware. 3 * responses as well as events generated by firmware.
4 */ 4 */
5#include <linux/delay.h> 5#include <linux/delay.h>
6#include <linux/sched.h>
6#include <linux/if_arp.h> 7#include <linux/if_arp.h>
7#include <linux/netdevice.h> 8#include <linux/netdevice.h>
8#include <asm/unaligned.h> 9#include <asm/unaligned.h>
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 446e327180f8..5b3672c4d0cc 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -134,7 +134,7 @@ static void spu_transaction_finish(struct if_spi_card *card)
134static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len) 134static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len)
135{ 135{
136 int err = 0; 136 int err = 0;
137 u16 reg_out = cpu_to_le16(reg | IF_SPI_WRITE_OPERATION_MASK); 137 __le16 reg_out = cpu_to_le16(reg | IF_SPI_WRITE_OPERATION_MASK);
138 struct spi_message m; 138 struct spi_message m;
139 struct spi_transfer reg_trans; 139 struct spi_transfer reg_trans;
140 struct spi_transfer data_trans; 140 struct spi_transfer data_trans;
@@ -166,7 +166,7 @@ static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len)
166 166
167static inline int spu_write_u16(struct if_spi_card *card, u16 reg, u16 val) 167static inline int spu_write_u16(struct if_spi_card *card, u16 reg, u16 val)
168{ 168{
169 u16 buff; 169 __le16 buff;
170 170
171 buff = cpu_to_le16(val); 171 buff = cpu_to_le16(val);
172 return spu_write(card, reg, (u8 *)&buff, sizeof(u16)); 172 return spu_write(card, reg, (u8 *)&buff, sizeof(u16));
@@ -188,7 +188,7 @@ static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len)
188{ 188{
189 unsigned int delay; 189 unsigned int delay;
190 int err = 0; 190 int err = 0;
191 u16 reg_out = cpu_to_le16(reg | IF_SPI_READ_OPERATION_MASK); 191 __le16 reg_out = cpu_to_le16(reg | IF_SPI_READ_OPERATION_MASK);
192 struct spi_message m; 192 struct spi_message m;
193 struct spi_transfer reg_trans; 193 struct spi_transfer reg_trans;
194 struct spi_transfer dummy_trans; 194 struct spi_transfer dummy_trans;
@@ -235,7 +235,7 @@ static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len)
235/* Read 16 bits from an SPI register */ 235/* Read 16 bits from an SPI register */
236static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val) 236static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val)
237{ 237{
238 u16 buf; 238 __le16 buf;
239 int ret; 239 int ret;
240 240
241 ret = spu_read(card, reg, (u8 *)&buf, sizeof(buf)); 241 ret = spu_read(card, reg, (u8 *)&buf, sizeof(buf));
@@ -248,7 +248,7 @@ static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val)
248 * The low 16 bits are read first. */ 248 * The low 16 bits are read first. */
249static int spu_read_u32(struct if_spi_card *card, u16 reg, u32 *val) 249static int spu_read_u32(struct if_spi_card *card, u16 reg, u32 *val)
250{ 250{
251 u32 buf; 251 __le32 buf;
252 int err; 252 int err;
253 253
254 err = spu_read(card, reg, (u8 *)&buf, sizeof(buf)); 254 err = spu_read(card, reg, (u8 *)&buf, sizeof(buf));
@@ -1222,3 +1222,4 @@ MODULE_DESCRIPTION("Libertas SPI WLAN Driver");
1222MODULE_AUTHOR("Andrey Yurovsky <andrey@cozybit.com>, " 1222MODULE_AUTHOR("Andrey Yurovsky <andrey@cozybit.com>, "
1223 "Colin McCabe <colin@cozybit.com>"); 1223 "Colin McCabe <colin@cozybit.com>");
1224MODULE_LICENSE("GPL"); 1224MODULE_LICENSE("GPL");
1225MODULE_ALIAS("spi:libertas_spi");
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 4c018f7a0a8d..8c3766a6e8e7 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -3,6 +3,7 @@
3 */ 3 */
4#include <linux/netdevice.h> 4#include <linux/netdevice.h>
5#include <linux/etherdevice.h> 5#include <linux/etherdevice.h>
6#include <linux/sched.h>
6 7
7#include "hostcmd.h" 8#include "hostcmd.h"
8#include "radiotap.h" 9#include "radiotap.h"
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 896f532182f0..38cfd79e0590 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -631,6 +631,9 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
631 data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000; 631 data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000;
632 if (WARN_ON(!data->beacon_int)) 632 if (WARN_ON(!data->beacon_int))
633 data->beacon_int = 1; 633 data->beacon_int = 1;
634 if (data->started)
635 mod_timer(&data->beacon_timer,
636 jiffies + data->beacon_int);
634 } 637 }
635 638
636 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 639 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 05458d9249ce..afd26bf06649 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -731,3 +731,4 @@ module_exit(p54spi_exit);
731 731
732MODULE_LICENSE("GPL"); 732MODULE_LICENSE("GPL");
733MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>"); 733MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
734MODULE_ALIAS("spi:cx3110x");
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 4c97c6ad6f5d..bc08464d8323 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -19,6 +19,7 @@
19 * 19 *
20 */ 20 */
21 21
22#include <linux/capability.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
24#include <linux/if_arp.h> 25#include <linux/if_arp.h>
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index e26d7b3ceab5..2505be56ae39 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -23,6 +23,7 @@
23#include <linux/netdevice.h> 23#include <linux/netdevice.h>
24#include <linux/ethtool.h> 24#include <linux/ethtool.h>
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <linux/sched.h>
26#include <linux/etherdevice.h> 27#include <linux/etherdevice.h>
27#include <linux/delay.h> 28#include <linux/delay.h>
28#include <linux/if_arp.h> 29#include <linux/if_arp.h>
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index f7c677e2094d..69d2f882fd06 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -20,6 +20,7 @@
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/sched.h>
23 24
24#include <asm/io.h> 25#include <asm/io.h>
25#include <asm/system.h> 26#include <asm/system.h>
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 88cd58eb3b9f..1c88c2ea59aa 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2879,7 +2879,7 @@ static int write_essid(struct file *file, const char __user *buffer,
2879 unsigned long count, void *data) 2879 unsigned long count, void *data)
2880{ 2880{
2881 static char proc_essid[33]; 2881 static char proc_essid[33];
2882 int len = count; 2882 unsigned int len = count;
2883 2883
2884 if (len > 32) 2884 if (len > 32)
2885 len = 32; 2885 len = 32;
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index a084077a1c61..9fe770f7d7bb 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1994,7 +1994,7 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1994 rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size); 1994 rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
1995 rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID, 1995 rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
1996 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ? 1996 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
1997 (skbdesc->entry->entry_idx + 1) : 0xff); 1997 txdesc->key_idx : 0xff);
1998 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT, 1998 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
1999 skb->len - txdesc->l2pad); 1999 skb->len - txdesc->l2pad);
2000 rt2x00_set_field32(&word, TXWI_W1_PACKETID, 2000 rt2x00_set_field32(&word, TXWI_W1_PACKETID,
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 7b3ee8c2eaef..68bc9bb1dbf9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -27,6 +27,7 @@
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/poll.h> 29#include <linux/poll.h>
30#include <linux/sched.h>
30#include <linux/uaccess.h> 31#include <linux/uaccess.h>
31 32
32#include "rt2x00.h" 33#include "rt2x00.h"
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 5462cb5ad994..567f029a8cda 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -380,7 +380,7 @@ static inline void rt2x00crypto_tx_insert_iv(struct sk_buff *skb,
380{ 380{
381} 381}
382 382
383static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, bool l2pad, 383static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
384 unsigned int header_length, 384 unsigned int header_length,
385 struct rxdone_entry_desc *rxdesc) 385 struct rxdone_entry_desc *rxdesc)
386{ 386{
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 1cbd9b4a3efc..b8f5ee33445e 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2381,6 +2381,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2381 /* Huawei-3Com */ 2381 /* Huawei-3Com */
2382 { USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) }, 2382 { USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) },
2383 /* Hercules */ 2383 /* Hercules */
2384 { USB_DEVICE(0x06f8, 0xe002), USB_DEVICE_DATA(&rt73usb_ops) },
2384 { USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) }, 2385 { USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) },
2385 { USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) }, 2386 { USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) },
2386 /* Linksys */ 2387 /* Linksys */
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 7b14d5bc63d6..88060e117541 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -1,5 +1,5 @@
1menuconfig WL12XX 1menuconfig WL12XX
2 boolean "TI wl12xx driver support" 2 tristate "TI wl12xx driver support"
3 depends on MAC80211 && WLAN_80211 && EXPERIMENTAL 3 depends on MAC80211 && WLAN_80211 && EXPERIMENTAL
4 ---help--- 4 ---help---
5 This will enable TI wl12xx driver support. The drivers make 5 This will enable TI wl12xx driver support. The drivers make
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 5809ef5b18f8..1103256ad989 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -1426,3 +1426,4 @@ EXPORT_SYMBOL_GPL(wl1251_free_hw);
1426MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core"); 1426MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
1427MODULE_LICENSE("GPL"); 1427MODULE_LICENSE("GPL");
1428MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>"); 1428MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>");
1429MODULE_ALIAS("spi:wl12xx");
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 38688847d568..23a6a6d4863b 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1070,7 +1070,7 @@ static int eject_installer(struct usb_interface *intf)
1070 1070
1071 /* Find bulk out endpoint */ 1071 /* Find bulk out endpoint */
1072 endpoint = &iface_desc->endpoint[1].desc; 1072 endpoint = &iface_desc->endpoint[1].desc;
1073 if ((endpoint->bEndpointAddress & USB_TYPE_MASK) == USB_DIR_OUT && 1073 if (usb_endpoint_dir_out(endpoint) &&
1074 usb_endpoint_xfer_bulk(endpoint)) { 1074 usb_endpoint_xfer_bulk(endpoint)) {
1075 bulk_out_ep = endpoint->bEndpointAddress; 1075 bulk_out_ep = endpoint->bEndpointAddress;
1076 } else { 1076 } else {
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index dc22782633a5..83a044dbd1d7 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -134,18 +134,15 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
134 } 134 }
135 135
136 /* Enable the Rx interrupts for the first buffer */ 136 /* Enable the Rx interrupts for the first buffer */
137 reg_data = in_be32(drvdata->base_addr + XEL_RSR_OFFSET);
138 out_be32(drvdata->base_addr + XEL_RSR_OFFSET, 137 out_be32(drvdata->base_addr + XEL_RSR_OFFSET,
139 reg_data | XEL_RSR_RECV_IE_MASK); 138 XEL_RSR_RECV_IE_MASK);
140 139
141 /* Enable the Rx interrupts for the second Buffer if 140 /* Enable the Rx interrupts for the second Buffer if
142 * configured in HW */ 141 * configured in HW */
143 if (drvdata->rx_ping_pong != 0) { 142 if (drvdata->rx_ping_pong != 0) {
144 reg_data = in_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
145 XEL_RSR_OFFSET);
146 out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + 143 out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
147 XEL_RSR_OFFSET, 144 XEL_RSR_OFFSET,
148 reg_data | XEL_RSR_RECV_IE_MASK); 145 XEL_RSR_RECV_IE_MASK);
149 } 146 }
150 147
151 /* Enable the Global Interrupt Enable */ 148 /* Enable the Global Interrupt Enable */
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index a0384b6f09b6..b42347333750 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -169,7 +169,6 @@ static void znet_tx_timeout (struct net_device *dev);
169static int znet_request_resources (struct net_device *dev) 169static int znet_request_resources (struct net_device *dev)
170{ 170{
171 struct znet_private *znet = netdev_priv(dev); 171 struct znet_private *znet = netdev_priv(dev);
172 unsigned long flags;
173 172
174 if (request_irq (dev->irq, &znet_interrupt, 0, "ZNet", dev)) 173 if (request_irq (dev->irq, &znet_interrupt, 0, "ZNet", dev))
175 goto failed; 174 goto failed;
@@ -187,13 +186,9 @@ static int znet_request_resources (struct net_device *dev)
187 free_sia: 186 free_sia:
188 release_region (znet->sia_base, znet->sia_size); 187 release_region (znet->sia_base, znet->sia_size);
189 free_tx_dma: 188 free_tx_dma:
190 flags = claim_dma_lock();
191 free_dma (znet->tx_dma); 189 free_dma (znet->tx_dma);
192 release_dma_lock (flags);
193 free_rx_dma: 190 free_rx_dma:
194 flags = claim_dma_lock();
195 free_dma (znet->rx_dma); 191 free_dma (znet->rx_dma);
196 release_dma_lock (flags);
197 free_irq: 192 free_irq:
198 free_irq (dev->irq, dev); 193 free_irq (dev->irq, dev);
199 failed: 194 failed:
@@ -203,14 +198,11 @@ static int znet_request_resources (struct net_device *dev)
203static void znet_release_resources (struct net_device *dev) 198static void znet_release_resources (struct net_device *dev)
204{ 199{
205 struct znet_private *znet = netdev_priv(dev); 200 struct znet_private *znet = netdev_priv(dev);
206 unsigned long flags;
207 201
208 release_region (znet->sia_base, znet->sia_size); 202 release_region (znet->sia_base, znet->sia_size);
209 release_region (dev->base_addr, znet->io_size); 203 release_region (dev->base_addr, znet->io_size);
210 flags = claim_dma_lock();
211 free_dma (znet->tx_dma); 204 free_dma (znet->tx_dma);
212 free_dma (znet->rx_dma); 205 free_dma (znet->rx_dma);
213 release_dma_lock (flags);
214 free_irq (dev->irq, dev); 206 free_irq (dev->irq, dev);
215} 207}
216 208